aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
authorVitaly Buka <vitalybuka@google.com>2024-04-02 14:23:42 -0700
committerVitaly Buka <vitalybuka@google.com>2024-04-02 14:23:42 -0700
commit2fe88fc8b7a3c27d473b6a172f0dc8aae7be3310 (patch)
tree4a2ce5eb31e8242dcbb7d7a3de82d3309fdc23c5 /llvm/test/CodeGen
parenteb6a41808ef4e058a24f9ebc6c85b10c966eb183 (diff)
parent89271b46761749503dffe94c60b9cbe0bda80284 (diff)
downloadllvm-2fe88fc8b7a3c27d473b6a172f0dc8aae7be3310.zip
llvm-2fe88fc8b7a3c27d473b6a172f0dc8aae7be3310.tar.gz
llvm-2fe88fc8b7a3c27d473b6a172f0dc8aae7be3310.tar.bz2
[𝘀𝗽𝗿] changes introduced through rebase
Created using spr 1.3.4 [skip ci]
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll72
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-extract-vec-elt.mir299
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-overflow.mir178
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll92
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir245
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-nneg-disjoint.ll135
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-trunc.ll90
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unreachable.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-vector-deinterleave2.ll34
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-vector-interleave2.ll30
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-abs.mir28
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir27
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-128.mir29
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir36
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-ctpop-no-implicit-float.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir29
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir49
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir57
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector-widen-crash.ll14
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir12
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir9
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir329
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-addo-zero.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/select.mir319
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/uaddo-8-16-bits.mir52
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-smull.ll53
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll14
-rw-r--r--llvm/test/CodeGen/AArch64/abs.ll26
-rw-r--r--llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll36
-rw-r--r--llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/allow-check.ll30
-rw-r--r--llvm/test/CodeGen/AArch64/and-sink.ll60
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-anyregcc.ll225
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll21
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll34
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-patchpoint.ll139
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-xaluo.ll3
-rw-r--r--llvm/test/CodeGen/AArch64/avoid-zero-copy.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/bitcast.ll211
-rw-r--r--llvm/test/CodeGen/AArch64/bswap.ll26
-rw-r--r--llvm/test/CodeGen/AArch64/clear-dead-implicit-def-impdef.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add.ll81
-rw-r--r--llvm/test/CodeGen/AArch64/dllexport.ll39
-rw-r--r--llvm/test/CodeGen/AArch64/extbinopload.ll31
-rw-r--r--llvm/test/CodeGen/AArch64/extract-vector-elt.ll1114
-rw-r--r--llvm/test/CodeGen/AArch64/extractvector-oob-load.mir7
-rw-r--r--llvm/test/CodeGen/AArch64/fcmp.ll81
-rw-r--r--llvm/test/CodeGen/AArch64/fexplog.ll65
-rw-r--r--llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll79
-rw-r--r--llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll51
-rw-r--r--llvm/test/CodeGen/AArch64/fold-global-offsets.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/fp-conversion-to-tbl.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/fp-intrinsics.ll55
-rw-r--r--llvm/test/CodeGen/AArch64/fpow.ll15
-rw-r--r--llvm/test/CodeGen/AArch64/fptoi.ll301
-rw-r--r--llvm/test/CodeGen/AArch64/fsincos.ll26
-rw-r--r--llvm/test/CodeGen/AArch64/hadd-combine.ll48
-rw-r--r--llvm/test/CodeGen/AArch64/implicit-def-remat-requires-impdef-check.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/implicit-def-with-impdef-greedy-assert.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/insert-subvector.ll150
-rw-r--r--llvm/test/CodeGen/AArch64/isinf.ll22
-rw-r--r--llvm/test/CodeGen/AArch64/itofp.ll270
-rw-r--r--llvm/test/CodeGen/AArch64/llvm.exp10.ll17
-rw-r--r--llvm/test/CodeGen/AArch64/load.ll3
-rw-r--r--llvm/test/CodeGen/AArch64/machine-cp-sub-reg.mir1
-rw-r--r--llvm/test/CodeGen/AArch64/misched-bundle.mir195
-rw-r--r--llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll46
-rw-r--r--llvm/test/CodeGen/AArch64/neon-compare-instructions.ll101
-rw-r--r--llvm/test/CodeGen/AArch64/neon-truncstore.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/overflow.ll117
-rw-r--r--llvm/test/CodeGen/AArch64/peephole-movd.mir60
-rw-r--r--llvm/test/CodeGen/AArch64/pr86717.ll22
-rw-r--r--llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/sadd_sat_vec.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/setcc_knownbits.ll93
-rw-r--r--llvm/test/CodeGen/AArch64/sext.ll14
-rw-r--r--llvm/test/CodeGen/AArch64/shift.ll225
-rw-r--r--llvm/test/CodeGen/AArch64/shuffle-tbl34.ll14
-rw-r--r--llvm/test/CodeGen/AArch64/shufflevector.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sme-avoid-coalescing-locally-streaming.ll120
-rw-r--r--llvm/test/CodeGen/AArch64/sme-call-streaming-compatible-to-normal-fn-wihout-sme-attr.ll39
-rw-r--r--llvm/test/CodeGen/AArch64/sme-machine-licm-vg.mir64
-rw-r--r--llvm/test/CodeGen/AArch64/sme-streaming-body-streaming-compatible-interface.ll26
-rw-r--r--llvm/test/CodeGen/AArch64/sme-streaming-body.ll34
-rw-r--r--llvm/test/CodeGen/AArch64/sme-write-vg.ll24
-rw-r--r--llvm/test/CodeGen/AArch64/soft-float-abi.ll161
-rw-r--r--llvm/test/CodeGen/AArch64/srem-vec-crash.ll15
-rw-r--r--llvm/test/CodeGen/AArch64/ssub_sat_vec.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/stack-probing-no-scratch-reg.mir1
-rw-r--r--llvm/test/CodeGen/AArch64/stack-probing-shrink-wrap.mir1
-rw-r--r--llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/stackmap.ll32
-rw-r--r--llvm/test/CodeGen/AArch64/strictfp_f16_abi_promote.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/tbl-loops.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/trunc-to-tbl.ll28
-rw-r--r--llvm/test/CodeGen/AArch64/uadd_sat_vec.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/usub_sat_vec.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/vcvt-oversize.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/vec-combine-compare-truncate-store.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll22
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-add.ll905
-rw-r--r--llvm/test/CodeGen/AArch64/wrong-callee-save-size-after-livedebugvariables.mir1
-rw-r--r--llvm/test/CodeGen/AArch64/xor.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/zext.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll274
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-atomic-cmpxchg-with-success.mir15
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir15
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir45
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubo.mir15
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir45
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.interp.inreg.ll30
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll41
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.format.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.ptr.buffer.load.format.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll232
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll147
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll1063
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll408
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll229
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/add_sub_u64_pseudos.mir68
-rw-r--r--llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/allow-check.ll30
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll255
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-break-large-phis.ll51
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll108
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll44
-rw-r--r--llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll19
-rw-r--r--llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/av_spill_cross_bb_usage.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16-conversions.ll357
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16.ll903
-rw-r--r--llvm/test/CodeGen/AMDGPU/clamp.ll64
-rw-r--r--llvm/test/CodeGen/AMDGPU/convergence-tokens.ll55
-rw-r--r--llvm/test/CodeGen/AMDGPU/copy-vgpr-clobber-spill-vgpr.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/div_i128.ll2298
-rw-r--r--llvm/test/CodeGen/AMDGPU/div_v2i128.ll3233
-rw-r--r--llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll103
-rw-r--r--llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll728
-rw-r--r--llvm/test/CodeGen/AMDGPU/fcanonicalize.ll22
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll22
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fold-restore-undef-use.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fp-classify.ll60
-rw-r--r--llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll270
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptoi.i128.ll1502
-rw-r--r--llvm/test/CodeGen/AMDGPU/fract-match.ll167
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll867
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll564
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll564
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll669
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll5578
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll3960
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll3960
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll5576
-rw-r--r--llvm/test/CodeGen/AMDGPU/greedy-alloc-fail-sgpr1024-spill.mir1
-rw-r--r--llvm/test/CodeGen/AMDGPU/implicitarg-offset-attributes.ll30
-rw-r--r--llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/itofp.i128.ll1618
-rw-r--r--llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/lds-mixed-absolute-addresses-unused.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/lds-reject-mixed-absolute-addresses.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/llc-pipeline.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w32.ll146
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w64.ll146
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.single.2b.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.msaa.load.ll111
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.inreg.ll30
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx12.ll333
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.format.ll317
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.format.ll280
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll122
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.minnum.f16.ll122
-rw-r--r--llvm/test/CodeGen/AMDGPU/lto-lower-module-lds.ll47
-rw-r--r--llvm/test/CodeGen/AMDGPU/mad-mix-lo.ll191
-rw-r--r--llvm/test/CodeGen/AMDGPU/merge-buffer-gfx12.mir1154
-rw-r--r--llvm/test/CodeGen/AMDGPU/merge-buffer.mir1130
-rw-r--r--llvm/test/CodeGen/AMDGPU/merge-tbuffer.mir28
-rw-r--r--llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/neighboring-mfma-padding.mir504
-rw-r--r--llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll305
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll11
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-kernargs-inreg-hints.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-kernargs.ll8976
-rw-r--r--llvm/test/CodeGen/AMDGPU/promote-alloca-scoring.ll69
-rw-r--r--llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/propagate-waves-per-eu.ll44
-rw-r--r--llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir1
-rw-r--r--llvm/test/CodeGen/AMDGPU/recursive_global_initializer.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/remove-no-kernel-id-attribute.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir1
-rw-r--r--llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll15
-rw-r--r--llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/snippet-copy-bundle-regression.mir1
-rw-r--r--llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll78
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-propagate-attribute.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/vgpr-agpr-limit-gfx90a.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/virtregrewrite-undef-identity-copy.mir1
-rw-r--r--llvm/test/CodeGen/AMDGPU/wave32.ll34
-rw-r--r--llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/wwm-reserved.ll4
-rw-r--r--llvm/test/CodeGen/ARM/arm-and-tst-peephole.ll15
-rw-r--r--llvm/test/CodeGen/ARM/no-register-coalescing-in-returnsTwice.mir2
-rw-r--r--llvm/test/CodeGen/ARM/select.ll399
-rw-r--r--llvm/test/CodeGen/AVR/bug-81911.ll163
-rw-r--r--llvm/test/CodeGen/BPF/addr-space-globals.ll2
-rw-r--r--llvm/test/CodeGen/BPF/addr-space-globals2.ll4
-rw-r--r--llvm/test/CodeGen/BPF/cttz-ctlz.ll304
-rw-r--r--llvm/test/CodeGen/DirectX/ShaderFlags/double-extensions.ll3
-rw-r--r--llvm/test/CodeGen/DirectX/ShaderFlags/doubles.ll4
-rw-r--r--llvm/test/CodeGen/DirectX/abs-vec.ll34
-rw-r--r--llvm/test/CodeGen/DirectX/abs.ll38
-rw-r--r--llvm/test/CodeGen/DirectX/any.ll113
-rw-r--r--llvm/test/CodeGen/DirectX/ceil.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/ceil_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/clamp-vec.ll74
-rw-r--r--llvm/test/CodeGen/DirectX/clamp.ll94
-rw-r--r--llvm/test/CodeGen/DirectX/cos.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/cos_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/dot2_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/dot3_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/dot4_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/exp-vec.ll17
-rw-r--r--llvm/test/CodeGen/DirectX/exp.ll31
-rw-r--r--llvm/test/CodeGen/DirectX/fabs.ll32
-rw-r--r--llvm/test/CodeGen/DirectX/fdot.ll94
-rw-r--r--llvm/test/CodeGen/DirectX/floor.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/floor_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/fmax.ll31
-rw-r--r--llvm/test/CodeGen/DirectX/fmin.ll31
-rw-r--r--llvm/test/CodeGen/DirectX/idot.ll100
-rw-r--r--llvm/test/CodeGen/DirectX/isinf.ll25
-rw-r--r--llvm/test/CodeGen/DirectX/isinf_error.ll13
-rw-r--r--llvm/test/CodeGen/DirectX/lerp.ll56
-rw-r--r--llvm/test/CodeGen/DirectX/lib_entry.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/log-vec.ll30
-rw-r--r--llvm/test/CodeGen/DirectX/log.ll25
-rw-r--r--llvm/test/CodeGen/DirectX/log10.ll25
-rw-r--r--llvm/test/CodeGen/DirectX/log2.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/log2_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/pow-vec.ll15
-rw-r--r--llvm/test/CodeGen/DirectX/pow.ll29
-rw-r--r--llvm/test/CodeGen/DirectX/rcp.ll52
-rw-r--r--llvm/test/CodeGen/DirectX/reversebits.ll31
-rw-r--r--llvm/test/CodeGen/DirectX/round.ll35
-rw-r--r--llvm/test/CodeGen/DirectX/round_error.ll4
-rw-r--r--llvm/test/CodeGen/DirectX/rsqrt.ll28
-rw-r--r--llvm/test/CodeGen/DirectX/rsqrt_error.ll14
-rw-r--r--llvm/test/CodeGen/DirectX/smax.ll31
-rw-r--r--llvm/test/CodeGen/DirectX/smin.ll31
-rw-r--r--llvm/test/CodeGen/DirectX/sqrt.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/sqrt_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/trunc.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/trunc_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/umax.ll29
-rw-r--r--llvm/test/CodeGen/DirectX/umin.ll31
-rw-r--r--llvm/test/CodeGen/Generic/ForceStackAlign.ll2
-rw-r--r--llvm/test/CodeGen/Generic/allow-check.ll31
-rw-r--r--llvm/test/CodeGen/Generic/builtin-hot.ll19
-rw-r--r--llvm/test/CodeGen/Generic/gc-lowering.ll62
-rw-r--r--llvm/test/CodeGen/Hexagon/addrmode-immop.mir4
-rw-r--r--llvm/test/CodeGen/Hexagon/build-attributes.ll16
-rw-r--r--llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_invalid.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll1
-rw-r--r--llvm/test/CodeGen/Hexagon/livephysregs-regmask-clobber.mir2
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll1
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll1
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll1
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-long-loop.ll1
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll1
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/pmpy.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir2
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/intrinsic-permi.ll30
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll42
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll42
-rw-r--r--llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir10
-rw-r--r--llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir2
-rw-r--r--llvm/test/CodeGen/MIR/AMDGPU/stack-id-assert.mir2
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/instruction-select/trap.mir (renamed from llvm/test/CodeGen/Mips/GlobalISel/legalizer/trap.mir)9
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir21
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/legalizer/ctpop.mir126
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/legalizer/cttz.mir4
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir34
-rw-r--r--llvm/test/CodeGen/Mips/atomic-min-max.ll56
-rw-r--r--llvm/test/CodeGen/Mips/avoid-zero-copy.mir2
-rw-r--r--llvm/test/CodeGen/Mips/msa/emergency-spill.mir2
-rw-r--r--llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll10
-rw-r--r--llvm/test/CodeGen/Mips/no-unaligned-access-r6.ll69
-rw-r--r--llvm/test/CodeGen/NVPTX/atomics-sm70.ll142
-rw-r--r--llvm/test/CodeGen/NVPTX/atomics.ll7
-rw-r--r--llvm/test/CodeGen/NVPTX/b52037.ll2
-rw-r--r--llvm/test/CodeGen/NVPTX/bswap.ll77
-rw-r--r--llvm/test/CodeGen/NVPTX/common-linkage.ll29
-rw-r--r--llvm/test/CodeGen/NVPTX/dynamic_stackalloc.ll48
-rw-r--r--llvm/test/CodeGen/NVPTX/weak-global.ll9
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-codemodel-attr.ll166
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-overflow-toc-data.py59
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-largeaccess.ll632
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-types.ll1066
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-funcattr.ll105
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-loadaddr.ll222
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-targetattr.ll53
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-xcoff-funcsect-explicitsect.ll142
-rw-r--r--llvm/test/CodeGen/PowerPC/ctrloop-constrained-fp.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/fp-classify.ll113
-rw-r--r--llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir2
-rw-r--r--llvm/test/CodeGen/PowerPC/rldimi.ll71
-rw-r--r--llvm/test/CodeGen/PowerPC/rlwimi.ll42
-rw-r--r--llvm/test/CodeGen/PowerPC/rlwinm.ll20
-rw-r--r--llvm/test/CodeGen/PowerPC/scalar-double-ldst.ll58
-rw-r--r--llvm/test/CodeGen/PowerPC/scalar-float-ldst.ll58
-rw-r--r--llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/toc-data-large-array.ll16
-rw-r--r--llvm/test/CodeGen/PowerPC/toc-data-large-array2.ll8
-rw-r--r--llvm/test/CodeGen/PowerPC/toc-data-struct-array.ll110
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/select.mir345
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale32.mir300
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale64.mir139
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/trap.mir4
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll948
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir21
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir21
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv32.mir33
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir12
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv32.mir20
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir4
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv32.mir10
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir2
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv32.mir20
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir4
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv32.mir21
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv64.mir21
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-vacopy.mir2
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-bitcast.mir356
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-implicit-def.mir410
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-select.mir400
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv32.mir228
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv64.mir110
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/implicit-def.mir425
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/select.mir558
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv32.mir48
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv64.mir25
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll936
-rw-r--r--llvm/test/CodeGen/RISCV/allow-check.ll32
-rw-r--r--llvm/test/CodeGen/RISCV/attributes-module-flag.ll17
-rw-r--r--llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll496
-rw-r--r--llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll102
-rw-r--r--llvm/test/CodeGen/RISCV/double-arith-strict.ll174
-rw-r--r--llvm/test/CodeGen/RISCV/double-arith.ll358
-rw-r--r--llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/double-br-fcmp.ll210
-rw-r--r--llvm/test/CodeGen/RISCV/double-calling-conv.ll45
-rw-r--r--llvm/test/CodeGen/RISCV/double-convert-strict.ll78
-rw-r--r--llvm/test/CodeGen/RISCV/double-convert.ll365
-rw-r--r--llvm/test/CodeGen/RISCV/double-fcmp-strict.ll400
-rw-r--r--llvm/test/CodeGen/RISCV/double-fcmp.ll140
-rw-r--r--llvm/test/CodeGen/RISCV/double-imm.ll32
-rw-r--r--llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll72
-rw-r--r--llvm/test/CodeGen/RISCV/double-intrinsics.ll127
-rw-r--r--llvm/test/CodeGen/RISCV/double-isnan.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/double-maximum-minimum.ll136
-rw-r--r--llvm/test/CodeGen/RISCV/double-mem.ll86
-rw-r--r--llvm/test/CodeGen/RISCV/double-previous-failure.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/double-round-conv-sat.ll1092
-rw-r--r--llvm/test/CodeGen/RISCV/double-round-conv.ll210
-rw-r--r--llvm/test/CodeGen/RISCV/double-select-fcmp.ll237
-rw-r--r--llvm/test/CodeGen/RISCV/double-select-icmp.ll224
-rw-r--r--llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll40
-rw-r--r--llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll32
-rw-r--r--llvm/test/CodeGen/RISCV/float-convert.ll48
-rw-r--r--llvm/test/CodeGen/RISCV/float-round-conv-sat.ll168
-rw-r--r--llvm/test/CodeGen/RISCV/half-convert-strict.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/half-convert.ll107
-rw-r--r--llvm/test/CodeGen/RISCV/half-round-conv-sat.ll336
-rw-r--r--llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/live-sp.mir2
-rw-r--r--llvm/test/CodeGen/RISCV/machine-combiner.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/make-compressible-zbc.mir585
-rw-r--r--llvm/test/CodeGen/RISCV/misched-postra-direction.mir20
-rw-r--r--llvm/test/CodeGen/RISCV/module-elf-flags.ll13
-rw-r--r--llvm/test/CodeGen/RISCV/pr64645.ll26
-rw-r--r--llvm/test/CodeGen/RISCV/rv32xtheadbb.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/rv32zbb.ll447
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll15
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll30
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-typepromotion.ll27
-rw-r--r--llvm/test/CodeGen/RISCV/rv64xtheadbb.ll209
-rw-r--r--llvm/test/CodeGen/RISCV/rv64zba.ll179
-rw-r--r--llvm/test/CodeGen/RISCV/rv64zbb.ll438
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/abd.ll343
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/binop-zext.ll154
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll95
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/calling-conv.ll163
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/compressstore.ll871
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll727
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll1004
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll928
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll1868
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll189
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll53
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll124
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll32
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsll.ll920
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll27
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll19
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll30
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll18
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll38
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll541
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll368
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll320
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir2
-rw-r--r--llvm/test/CodeGen/RISCV/spill-fill-fold.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/stack-inst-compress.mir3
-rw-r--r--llvm/test/CodeGen/RISCV/strip-w-suffix.ll74
-rw-r--r--llvm/test/CodeGen/RISCV/tlsdesc-symbol.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll38
-rw-r--r--llvm/test/CodeGen/RISCV/zdinx-large-spill.mir74
-rw-r--r--llvm/test/CodeGen/SPIRV/ComparePointers.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/ExecutionMode.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/LinkOnceODR.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/LinkOnceODRFun.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/assume.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/capability-kernel.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/empty-logical.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/empty-module.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/empty-opencl32.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/empty-opencl64.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/empty.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/exec_mode_float_control_khr.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/expect.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_arbitrary_precision_integers.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative1.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative2.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative3.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative4.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_two_calls.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_optnone.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_subgroups/cl_intel_sub_groups.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr_spec_const.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_no_integer_wrap_decoration.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_uniform_group_instructions/uniform-group-instructions.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/both-allowed-disallowed-extension-error.ll7
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll9
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions.ll7
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/invalid-extension-list-format.ll6
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/unknown-extension-name.ll6
-rw-r--r--llvm/test/CodeGen/SPIRV/fence.ll54
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveGetLaneIndex.ll68
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/abs.ll3
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/ceil.ll20
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/cos.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/exp.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/exp2.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/floor.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmad.ll29
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmax.ll29
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmin.ll31
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log10.ll3
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log2.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/pow.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/reversebits.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/round.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/sin.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/smax.ll29
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/smin.ll32
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/sqrt.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/trunc.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/umax.ll29
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/umin.ll32
-rw-r--r--llvm/test/CodeGen/SPIRV/instructions/atomic.ll28
-rw-r--r--llvm/test/CodeGen/SPIRV/instructions/atomic_acqrel.ll28
-rw-r--r--llvm/test/CodeGen/SPIRV/instructions/atomic_seq.ll28
-rw-r--r--llvm/test/CodeGen/SPIRV/instructions/bitwise-i1.ll69
-rw-r--r--llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll12
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll25
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/add.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fadd.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmax.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmaximum.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmin.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fminimum.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmul.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smax.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smin.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umax.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umin.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/xor.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-accesschain.ll37
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-load.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-store.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/getelementptr-addressspace.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/getelementptr-base-type.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-addressspace.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type-deduction-no-bitcast-to-generic.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/load-addressspace.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/nested-struct-opaque-pointers.ll20
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/store-operand-ptr-to-struct.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll10
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/two-bitcast-or-param-users.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/two-subsequent-bitcasts.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/type-deduce-args-rev.ll28
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/type-deduce-args.ll97
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-chain.ll57
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-complex.ll29
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-rev.ll28
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call.ll28
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/typeof-ptr-int.ll29
-rw-r--r--llvm/test/CodeGen/SPIRV/relationals.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/scfg-add-pre-headers.ll9
-rw-r--r--llvm/test/CodeGen/SPIRV/simple.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchangeExplicit_cl20.ll3
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/BitReversePref.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange_2.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/ConvertPtr.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/DecorationAlignment.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/DecorationMaxByteOffset.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/DivRem.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/ExecutionMode_SPIR_to_SPIRV.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/GlobalFunAnnotate.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_cmpxchg.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_legacy.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_work_item_fence.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/OpenCL/barrier.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/OpenCL/sub_group_mask.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/OpenCL/work_group_barrier.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/atomic_flag.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/atomic_load_store.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/bitcast.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/block_w_struct_return.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/builtin_calls.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/builtin_vars.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_arithmetics.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_opt.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/check_ro_qualifier.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/cl-types.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/clk_event_t.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/enqueue_kernel.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/explicit-conversions.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/extract_insert_value.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/fadd.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/fclamp.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/fcmp.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/fdiv.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/fmod.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/fmul.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/fneg.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/fp_contract_reassoc_fast_mode.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/frem.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/fsub.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/get_image_num_mip_levels.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/global_block.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/group_ops.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/isequal.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/relationals_double.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/relationals_float.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/relationals_half.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll2
-rw-r--r--llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir2
-rw-r--r--llvm/test/CodeGen/SystemZ/atomic-load-06.ll4
-rw-r--r--llvm/test/CodeGen/SystemZ/atomic-memops-fp128.ll31
-rw-r--r--llvm/test/CodeGen/SystemZ/atomic-memops.ll739
-rw-r--r--llvm/test/CodeGen/SystemZ/atomic-store-06.ll5
-rw-r--r--llvm/test/CodeGen/SystemZ/call-zos-01.ll14
-rw-r--r--llvm/test/CodeGen/SystemZ/call-zos-i128.ll4
-rw-r--r--llvm/test/CodeGen/SystemZ/call-zos-vararg.ll10
-rw-r--r--llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir1
-rw-r--r--llvm/test/CodeGen/SystemZ/cond-move-04.mir3
-rw-r--r--llvm/test/CodeGen/SystemZ/cond-move-08.mir3
-rw-r--r--llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir3
-rw-r--r--llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir7
-rw-r--r--llvm/test/CodeGen/SystemZ/frame-28.mir4
-rw-r--r--llvm/test/CodeGen/SystemZ/frame-adjstack.ll16
-rw-r--r--llvm/test/CodeGen/SystemZ/int-cmp-56.mir4
-rw-r--r--llvm/test/CodeGen/SystemZ/int-usub-12.ll22
-rw-r--r--llvm/test/CodeGen/SystemZ/int-usub-13.ll2
-rw-r--r--llvm/test/CodeGen/SystemZ/readcyclecounter.ll27
-rw-r--r--llvm/test/CodeGen/SystemZ/regcoal-subranges-update.mir2
-rw-r--r--llvm/test/CodeGen/SystemZ/swifterror.ll8
-rw-r--r--llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll16
-rw-r--r--llvm/test/CodeGen/SystemZ/zos-ada-relocations.ll6
-rw-r--r--llvm/test/CodeGen/SystemZ/zos-landingpad.ll2
-rw-r--r--llvm/test/CodeGen/SystemZ/zos-ppa2.ll14
-rw-r--r--llvm/test/CodeGen/SystemZ/zos-prologue-epilog.ll14
-rw-r--r--llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll3
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-gather-increment.ll16
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll1
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-laneinterleaving-reduct.ll4
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll3
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vldst4.ll5
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vpt-optimisations.mir25
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll4
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj.ll5
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll2
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-sjlj-debuginfo.ll35
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-sjlj.ll37
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-wasm-ehsjlj.ll6
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-wasm-sjlj.ll22
-rw-r--r--llvm/test/CodeGen/WebAssembly/pr63817.ll15
-rw-r--r--llvm/test/CodeGen/WinCFGuard/cfguard-mingw.ll12
-rw-r--r--llvm/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll8
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/legalize-icmp-vec.mir25
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/x86-select-trap.mir2
-rw-r--r--llvm/test/CodeGen/X86/addcarry.ll23
-rw-r--r--llvm/test/CodeGen/X86/allow-check.ll28
-rw-r--r--llvm/test/CodeGen/X86/apx/add.ll90
-rw-r--r--llvm/test/CodeGen/X86/apx/cfcmov.ll95
-rw-r--r--llvm/test/CodeGen/X86/apx/domain-reassignment.mir929
-rw-r--r--llvm/test/CodeGen/X86/apx/flags-copy-lowering.mir102
-rw-r--r--llvm/test/CodeGen/X86/apx/foldimmediate.mir70
-rw-r--r--llvm/test/CodeGen/X86/apx/inc.ll24
-rw-r--r--llvm/test/CodeGen/X86/apx/shift-eflags.ll22
-rw-r--r--llvm/test/CodeGen/X86/apx/sub.ll80
-rw-r--r--llvm/test/CodeGen/X86/asm-dialect-module.ll10
-rw-r--r--llvm/test/CodeGen/X86/avgceils.ll3821
-rw-r--r--llvm/test/CodeGen/X86/avgceilu.ll2187
-rw-r--r--llvm/test/CodeGen/X86/avgfloors.ll3437
-rw-r--r--llvm/test/CodeGen/X86/avgflooru.ll2629
-rw-r--r--llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll8
-rw-r--r--llvm/test/CodeGen/X86/callbr-asm-kill.mir1
-rw-r--r--llvm/test/CodeGen/X86/cmov.ll139
-rw-r--r--llvm/test/CodeGen/X86/cmp.ll13
-rw-r--r--llvm/test/CodeGen/X86/combine-pavg.ll46
-rw-r--r--llvm/test/CodeGen/X86/combine-sra.ll273
-rw-r--r--llvm/test/CodeGen/X86/dagcombine-shifts.ll127
-rw-r--r--llvm/test/CodeGen/X86/extractelement-load.ll364
-rw-r--r--llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir1
-rw-r--r--llvm/test/CodeGen/X86/heap-alloc-markers.mir1
-rw-r--r--llvm/test/CodeGen/X86/huge-stack-offset.ll4
-rw-r--r--llvm/test/CodeGen/X86/huge-stack-offset2.ll2
-rw-r--r--llvm/test/CodeGen/X86/insertelement-var-index.ll44
-rw-r--r--llvm/test/CodeGen/X86/instr-symbols.mir1
-rw-r--r--llvm/test/CodeGen/X86/int-to-fp-demanded.ll382
-rw-r--r--llvm/test/CodeGen/X86/isel-select-cmov.ll50
-rw-r--r--llvm/test/CodeGen/X86/isel-traps.ll73
-rw-r--r--llvm/test/CodeGen/X86/known-never-zero.ll1831
-rw-r--r--llvm/test/CodeGen/X86/late-remat-update.mir1
-rw-r--r--llvm/test/CodeGen/X86/limit-split-cost.mir1
-rw-r--r--llvm/test/CodeGen/X86/masked_store.ll793
-rw-r--r--llvm/test/CodeGen/X86/oddshuffles.ll25
-rw-r--r--llvm/test/CodeGen/X86/optimize-max-0.ll1
-rw-r--r--llvm/test/CodeGen/X86/pr45378.ll40
-rw-r--r--llvm/test/CodeGen/X86/pr85681.ll41
-rw-r--r--llvm/test/CodeGen/X86/pr86305.ll74
-rw-r--r--llvm/test/CodeGen/X86/pr86880.mir21
-rw-r--r--llvm/test/CodeGen/X86/regalloc-copy-hints.mir1
-rw-r--r--llvm/test/CodeGen/X86/sar_fold.ll41
-rw-r--r--llvm/test/CodeGen/X86/setcc-non-simple-type.ll36
-rw-r--r--llvm/test/CodeGen/X86/shrink_vmul.ll223
-rw-r--r--llvm/test/CodeGen/X86/shuffle-vs-trunc-256.ll6
-rw-r--r--llvm/test/CodeGen/X86/stack-protector.ll9
-rw-r--r--llvm/test/CodeGen/X86/statepoint-fastregalloc.mir4
-rw-r--r--llvm/test/CodeGen/X86/statepoint-fixup-undef.mir2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-invoke-ra-hoist-copies.mir2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-invoke-ra-inline-spiller.mir2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-invoke-ra.mir2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-vreg-folding.mir2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-vreg.mir2
-rw-r--r--llvm/test/CodeGen/X86/tls-align.ll2
-rw-r--r--llvm/test/CodeGen/X86/tls-desc.ll199
-rw-r--r--llvm/test/CodeGen/X86/tls-loads-control3.ll5
-rw-r--r--llvm/test/CodeGen/X86/var-permute-128.ll32
-rw-r--r--llvm/test/CodeGen/X86/vec_int_to_fp.ll305
-rw-r--r--llvm/test/CodeGen/X86/vector-half-conversions.ll254
-rw-r--r--llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll2911
-rw-r--r--llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll736
-rw-r--r--llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll6402
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll30
-rw-r--r--llvm/test/CodeGen/X86/vpdpwssd.ll12
-rw-r--r--llvm/test/CodeGen/X86/widen_fadd.ll91
-rw-r--r--llvm/test/CodeGen/X86/widen_fmul.ll91
-rw-r--r--llvm/test/CodeGen/X86/widen_fsub.ll91
735 files changed, 86204 insertions, 32845 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
index 458c2cb..7163da0 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
@@ -512,9 +512,9 @@ define i32 @fetch_and_nand(ptr %p) #0 {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB6_2
; CHECK-NOLSE-O0-NEXT: LBB6_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB6_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB6_1
; CHECK-NOLSE-O0-NEXT: b LBB6_5
@@ -540,9 +540,9 @@ define i32 @fetch_and_nand(ptr %p) #0 {
; CHECK-OUTLINE-O0-NEXT: mvn w1, w8
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas4_rel
; CHECK-OUTLINE-O0-NEXT: ldr w8, [sp, #8] ; 4-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs w8, w0, w8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #28] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB6_1
; CHECK-OUTLINE-O0-NEXT: b LBB6_2
@@ -582,9 +582,9 @@ define i32 @fetch_and_nand(ptr %p) #0 {
; CHECK-LSE-O0-NEXT: mvn w10, w9
; CHECK-LSE-O0-NEXT: mov x9, x8
; CHECK-LSE-O0-NEXT: casl w9, w10, [x11]
-; CHECK-LSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-LSE-O0-NEXT: subs w8, w9, w8
; CHECK-LSE-O0-NEXT: cset w8, eq
+; CHECK-LSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-LSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-LSE-O0-NEXT: tbz w8, #0, LBB6_1
; CHECK-LSE-O0-NEXT: b LBB6_2
@@ -649,9 +649,9 @@ define i64 @fetch_and_nand_64(ptr %p) #0 {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB7_2
; CHECK-NOLSE-O0-NEXT: LBB7_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB7_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp, #8] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp, #8] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB7_1
; CHECK-NOLSE-O0-NEXT: b LBB7_5
@@ -677,9 +677,9 @@ define i64 @fetch_and_nand_64(ptr %p) #0 {
; CHECK-OUTLINE-O0-NEXT: mvn x1, x8
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas8_acq_rel
; CHECK-OUTLINE-O0-NEXT: ldr x8, [sp] ; 8-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #8] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs x8, x0, x8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #8] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #24] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB7_1
; CHECK-OUTLINE-O0-NEXT: b LBB7_2
@@ -719,9 +719,9 @@ define i64 @fetch_and_nand_64(ptr %p) #0 {
; CHECK-LSE-O0-NEXT: mvn x10, x9
; CHECK-LSE-O0-NEXT: mov x9, x8
; CHECK-LSE-O0-NEXT: casal x9, x10, [x11]
-; CHECK-LSE-O0-NEXT: str x9, [sp, #8] ; 8-byte Folded Spill
; CHECK-LSE-O0-NEXT: subs x8, x9, x8
; CHECK-LSE-O0-NEXT: cset w8, eq
+; CHECK-LSE-O0-NEXT: str x9, [sp, #8] ; 8-byte Folded Spill
; CHECK-LSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-LSE-O0-NEXT: tbz w8, #0, LBB7_1
; CHECK-LSE-O0-NEXT: b LBB7_2
@@ -782,9 +782,9 @@ define i32 @fetch_and_or(ptr %p) #0 {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB8_2
; CHECK-NOLSE-O0-NEXT: LBB8_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB8_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB8_1
; CHECK-NOLSE-O0-NEXT: b LBB8_5
@@ -855,9 +855,9 @@ define i64 @fetch_and_or_64(ptr %p) #0 {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB9_2
; CHECK-NOLSE-O0-NEXT: LBB9_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB9_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp, #8] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp, #8] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB9_1
; CHECK-NOLSE-O0-NEXT: b LBB9_5
@@ -4005,9 +4005,9 @@ define i32 @atomicrmw_add_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB47_2
; CHECK-NOLSE-O0-NEXT: LBB47_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB47_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB47_1
; CHECK-NOLSE-O0-NEXT: b LBB47_5
@@ -4097,9 +4097,9 @@ define i32 @atomicrmw_xchg_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB48_2
; CHECK-NOLSE-O0-NEXT: LBB48_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB48_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB48_1
; CHECK-NOLSE-O0-NEXT: b LBB48_5
@@ -4190,9 +4190,9 @@ define i32 @atomicrmw_sub_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB49_2
; CHECK-NOLSE-O0-NEXT: LBB49_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB49_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB49_1
; CHECK-NOLSE-O0-NEXT: b LBB49_5
@@ -4287,9 +4287,9 @@ define i32 @atomicrmw_and_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB50_2
; CHECK-NOLSE-O0-NEXT: LBB50_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB50_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB50_1
; CHECK-NOLSE-O0-NEXT: b LBB50_5
@@ -4384,9 +4384,9 @@ define i32 @atomicrmw_or_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB51_2
; CHECK-NOLSE-O0-NEXT: LBB51_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB51_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB51_1
; CHECK-NOLSE-O0-NEXT: b LBB51_5
@@ -4477,9 +4477,9 @@ define i32 @atomicrmw_xor_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB52_2
; CHECK-NOLSE-O0-NEXT: LBB52_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB52_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB52_1
; CHECK-NOLSE-O0-NEXT: b LBB52_5
@@ -4572,9 +4572,9 @@ define i32 @atomicrmw_min_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB53_2
; CHECK-NOLSE-O0-NEXT: LBB53_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB53_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB53_1
; CHECK-NOLSE-O0-NEXT: b LBB53_5
@@ -4605,9 +4605,9 @@ define i32 @atomicrmw_min_i32(ptr %ptr, i32 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel w1, w0, w8, le
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas4_acq
; CHECK-OUTLINE-O0-NEXT: ldr w8, [sp, #8] ; 4-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs w8, w0, w8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #28] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB53_1
; CHECK-OUTLINE-O0-NEXT: b LBB53_2
@@ -4686,9 +4686,9 @@ define i32 @atomicrmw_max_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB54_2
; CHECK-NOLSE-O0-NEXT: LBB54_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB54_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB54_1
; CHECK-NOLSE-O0-NEXT: b LBB54_5
@@ -4719,9 +4719,9 @@ define i32 @atomicrmw_max_i32(ptr %ptr, i32 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel w1, w0, w8, gt
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas4_rel
; CHECK-OUTLINE-O0-NEXT: ldr w8, [sp, #8] ; 4-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs w8, w0, w8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #28] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB54_1
; CHECK-OUTLINE-O0-NEXT: b LBB54_2
@@ -4800,9 +4800,9 @@ define i32 @atomicrmw_umin_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB55_2
; CHECK-NOLSE-O0-NEXT: LBB55_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB55_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB55_1
; CHECK-NOLSE-O0-NEXT: b LBB55_5
@@ -4833,9 +4833,9 @@ define i32 @atomicrmw_umin_i32(ptr %ptr, i32 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel w1, w0, w8, ls
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas4_acq_rel
; CHECK-OUTLINE-O0-NEXT: ldr w8, [sp, #8] ; 4-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs w8, w0, w8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #28] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB55_1
; CHECK-OUTLINE-O0-NEXT: b LBB55_2
@@ -4914,9 +4914,9 @@ define i32 @atomicrmw_umax_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB56_2
; CHECK-NOLSE-O0-NEXT: LBB56_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB56_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB56_1
; CHECK-NOLSE-O0-NEXT: b LBB56_5
@@ -4947,9 +4947,9 @@ define i32 @atomicrmw_umax_i32(ptr %ptr, i32 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel w1, w0, w8, hi
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas4_relax
; CHECK-OUTLINE-O0-NEXT: ldr w8, [sp, #8] ; 4-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs w8, w0, w8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #28] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB56_1
; CHECK-OUTLINE-O0-NEXT: b LBB56_2
@@ -5026,9 +5026,9 @@ define i64 @atomicrmw_add_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB57_2
; CHECK-NOLSE-O0-NEXT: LBB57_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB57_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB57_1
; CHECK-NOLSE-O0-NEXT: b LBB57_5
@@ -5117,9 +5117,9 @@ define i64 @atomicrmw_xchg_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB58_2
; CHECK-NOLSE-O0-NEXT: LBB58_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB58_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB58_1
; CHECK-NOLSE-O0-NEXT: b LBB58_5
@@ -5210,9 +5210,9 @@ define i64 @atomicrmw_sub_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB59_2
; CHECK-NOLSE-O0-NEXT: LBB59_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB59_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB59_1
; CHECK-NOLSE-O0-NEXT: b LBB59_5
@@ -5307,9 +5307,9 @@ define i64 @atomicrmw_and_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB60_2
; CHECK-NOLSE-O0-NEXT: LBB60_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB60_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB60_1
; CHECK-NOLSE-O0-NEXT: b LBB60_5
@@ -5404,9 +5404,9 @@ define i64 @atomicrmw_or_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB61_2
; CHECK-NOLSE-O0-NEXT: LBB61_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB61_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB61_1
; CHECK-NOLSE-O0-NEXT: b LBB61_5
@@ -5497,9 +5497,9 @@ define i64 @atomicrmw_xor_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB62_2
; CHECK-NOLSE-O0-NEXT: LBB62_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB62_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB62_1
; CHECK-NOLSE-O0-NEXT: b LBB62_5
@@ -5592,9 +5592,9 @@ define i64 @atomicrmw_min_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB63_2
; CHECK-NOLSE-O0-NEXT: LBB63_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB63_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB63_1
; CHECK-NOLSE-O0-NEXT: b LBB63_5
@@ -5625,9 +5625,9 @@ define i64 @atomicrmw_min_i64(ptr %ptr, i64 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel x1, x0, x8, le
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas8_acq
; CHECK-OUTLINE-O0-NEXT: ldr x8, [sp, #8] ; 8-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs x8, x0, x8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #40] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB63_1
; CHECK-OUTLINE-O0-NEXT: b LBB63_2
@@ -5706,9 +5706,9 @@ define i64 @atomicrmw_max_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB64_2
; CHECK-NOLSE-O0-NEXT: LBB64_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB64_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB64_1
; CHECK-NOLSE-O0-NEXT: b LBB64_5
@@ -5739,9 +5739,9 @@ define i64 @atomicrmw_max_i64(ptr %ptr, i64 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel x1, x0, x8, gt
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas8_rel
; CHECK-OUTLINE-O0-NEXT: ldr x8, [sp, #8] ; 8-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs x8, x0, x8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #40] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB64_1
; CHECK-OUTLINE-O0-NEXT: b LBB64_2
@@ -5820,9 +5820,9 @@ define i64 @atomicrmw_umin_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB65_2
; CHECK-NOLSE-O0-NEXT: LBB65_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB65_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB65_1
; CHECK-NOLSE-O0-NEXT: b LBB65_5
@@ -5853,9 +5853,9 @@ define i64 @atomicrmw_umin_i64(ptr %ptr, i64 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel x1, x0, x8, ls
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas8_acq_rel
; CHECK-OUTLINE-O0-NEXT: ldr x8, [sp, #8] ; 8-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs x8, x0, x8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #40] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB65_1
; CHECK-OUTLINE-O0-NEXT: b LBB65_2
@@ -5934,9 +5934,9 @@ define i64 @atomicrmw_umax_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB66_2
; CHECK-NOLSE-O0-NEXT: LBB66_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB66_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB66_1
; CHECK-NOLSE-O0-NEXT: b LBB66_5
@@ -5967,9 +5967,9 @@ define i64 @atomicrmw_umax_i64(ptr %ptr, i64 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel x1, x0, x8, hi
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas8_relax
; CHECK-OUTLINE-O0-NEXT: ldr x8, [sp, #8] ; 8-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs x8, x0, x8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #40] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB66_1
; CHECK-OUTLINE-O0-NEXT: b LBB66_2
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-extract-vec-elt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-extract-vec-elt.mir
index a2116cc..c2a38e2 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-extract-vec-elt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-extract-vec-elt.mir
@@ -192,8 +192,8 @@ body: |
...
---
+# This test checks that this combine runs after the insertvec->build_vector
name: extract_from_insert
-alignment: 4
tracksRegLiveness: true
liveins:
- { reg: '$x0' }
@@ -203,8 +203,6 @@ frameInfo:
body: |
bb.1:
liveins: $x0, $x1
- ; This test checks that this combine runs after the insertvec->build_vector
- ; combine.
; CHECK-LABEL: name: extract_from_insert
; CHECK: liveins: $x0, $x1
; CHECK-NEXT: {{ $}}
@@ -247,3 +245,298 @@ body: |
RET_ReallyLR implicit $x0
...
+---
+name: extract_from_vector_undef
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_vector_undef
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %extract:_(s64) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = G_IMPLICIT_DEF
+ %idx:_(s32) = G_CONSTANT i32 -2
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %vec(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_index_undef
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: extract_from_index_undef
+ ; CHECK: %extract:_(s64) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = G_IMPLICIT_DEF
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %vec(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_index_too_large
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_index_too_large
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %extract:_(s64) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = G_CONSTANT i32 3000
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %vec(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_with_freeze
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_with_freeze
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %vec:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: %idx:_(s32) = COPY $w1
+ ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT %vec(<2 x s64>), %idx(s32)
+ ; CHECK-NEXT: %extract:_(s64) = G_FREEZE [[EVEC]]
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = COPY $w1
+ %fvec:_(<2 x s64>) = G_FREEZE %vec
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %fvec(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_insert_symmetry
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_insert_symmetry
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %element:_(s64) = COPY $x1
+ ; CHECK-NEXT: $x0 = COPY %element(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = COPY $w1
+ %element:_(s64) = COPY $x1
+ %invec:_(<2 x s64>) = G_INSERT_VECTOR_ELT %vec(<2 x s64>), %element(s64), %idx(s32)
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %invec(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_insert_with_different_consts
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_insert_with_different_consts
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %vec:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: %idx2:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: %extract:_(s64) = G_EXTRACT_VECTOR_ELT %vec(<2 x s64>), %idx2(s32)
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = G_CONSTANT i32 0
+ %idx2:_(s32) = G_CONSTANT i32 1
+ %element:_(s64) = COPY $x1
+ %invec:_(<2 x s64>) = G_INSERT_VECTOR_ELT %vec(<2 x s64>), %element(s64), %idx(s32)
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %invec(<2 x s64>), %idx2(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_build_vector_non_const
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_build_vector_non_const
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %idx:_(s32) = COPY $w0
+ ; CHECK-NEXT: %arg1:_(s64) = COPY $x0
+ ; CHECK-NEXT: %arg2:_(s64) = COPY $x1
+ ; CHECK-NEXT: %bv:_(<2 x s64>) = G_BUILD_VECTOR %arg1(s64), %arg2(s64)
+ ; CHECK-NEXT: %extract:_(s64) = G_EXTRACT_VECTOR_ELT %bv(<2 x s64>), %idx(s32)
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = COPY $w0
+ %arg1:_(s64) = COPY $x0
+ %arg2:_(s64) = COPY $x1
+ %bv:_(<2 x s64>) = G_BUILD_VECTOR %arg1(s64), %arg2(s64)
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %bv(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_build_vector_const
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_build_vector_const
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %arg1:_(s64) = COPY $x0
+ ; CHECK-NEXT: $x0 = COPY %arg1(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = G_CONSTANT i32 0
+ %arg1:_(s64) = COPY $x0
+ %arg2:_(s64) = COPY $x1
+ %bv:_(<2 x s64>) = G_BUILD_VECTOR %arg1(s64), %arg2(s64)
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %bv(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_build_vector_trunc_const2
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_build_vector_trunc_const2
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %arg1:_(s64) = COPY $x0
+ ; CHECK-NEXT: %extract:_(s32) = G_TRUNC %arg1(s64)
+ ; CHECK-NEXT: $w0 = COPY %extract(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %arg1:_(s64) = COPY $x0
+ %arg2:_(s64) = COPY $x1
+ %arg3:_(s64) = COPY $x0
+ %arg4:_(s64) = COPY $x1
+ %idx:_(s32) = G_CONSTANT i32 0
+ %bv:_(<4 x s32>) = G_BUILD_VECTOR_TRUNC %arg1(s64), %arg2(s64), %arg3(s64), %arg4(s64)
+ %extract:_(s32) = G_EXTRACT_VECTOR_ELT %bv(<4 x s32>), %idx(s32)
+ $w0 = COPY %extract(s32)
+ RET_ReallyLR implicit $x0
+...
+---
+name: extract_from_build_vector_trunc2
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_build_vector_trunc2
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %arg1:_(s64) = COPY $x0
+ ; CHECK-NEXT: %arg2:_(s64) = COPY $x1
+ ; CHECK-NEXT: %idx:_(s32) = COPY $w0
+ ; CHECK-NEXT: %bv:_(<2 x s32>) = G_BUILD_VECTOR_TRUNC %arg1(s64), %arg2(s64)
+ ; CHECK-NEXT: %extract:_(s32) = G_EXTRACT_VECTOR_ELT %bv(<2 x s32>), %idx(s32)
+ ; CHECK-NEXT: $w0 = COPY %extract(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %arg1:_(s64) = COPY $x0
+ %arg2:_(s64) = COPY $x1
+ %idx:_(s32) = COPY $w0
+ %bv:_(<2 x s32>) = G_BUILD_VECTOR_TRUNC %arg1(s64), %arg2(s64)
+ %extract:_(s32) = G_EXTRACT_VECTOR_ELT %bv(<2 x s32>), %idx(s32)
+ $w0 = COPY %extract(s32)
+ RET_ReallyLR implicit $x0
+...
+---
+name: extract_from_build_vector_trunc_const3
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_build_vector_trunc_const3
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %arg1:_(s128) = COPY $q0
+ ; CHECK-NEXT: %extract:_(s64) = G_TRUNC %arg1(s128)
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %arg1:_(s128) = COPY $q0
+ %arg2:_(s128) = COPY $q1
+ %idx:_(s32) = G_CONSTANT i32 0
+ %bv:_(<2 x s64>) = G_BUILD_VECTOR_TRUNC %arg1(s128), %arg2(s128)
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %bv(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+...
+---
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-overflow.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-overflow.mir
new file mode 100644
index 0000000..ec66892
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-overflow.mir
@@ -0,0 +1,178 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -run-pass=aarch64-prelegalizer-combiner -mtriple aarch64-unknown-unknown %s -o - | FileCheck %s
+
+---
+name: add_unused
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_unused
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: %add:_(s32) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $w0 = COPY %add(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = COPY $w1
+ %add:_(s32), %o:_(s1) = G_SADDO %0, %1
+ $w0 = COPY %add(s32)
+ RET_ReallyLR implicit $w0
+...
+---
+name: add_canon
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_canon
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: %const:_(s32) = G_CONSTANT i32 10
+ ; CHECK-NEXT: %add:_(s32), %o:_(s1) = G_SADDO [[COPY]], %const
+ ; CHECK-NEXT: %o_wide:_(s32) = G_ZEXT %o(s1)
+ ; CHECK-NEXT: $w0 = COPY %add(s32)
+ ; CHECK-NEXT: $w1 = COPY %o_wide(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = COPY $w1
+ %const:_(s32) = G_CONSTANT i32 10
+ %add:_(s32), %o:_(s1) = G_SADDO %const, %1
+ %o_wide:_(s32) = G_ZEXT %o(s1)
+ $w0 = COPY %add(s32)
+ $w1 = COPY %o_wide
+ RET_ReallyLR implicit $w0
+...
+---
+name: add_const_fold
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_const_fold
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %add:_(s32) = G_CONSTANT i32 21
+ ; CHECK-NEXT: %o_wide:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: $w0 = COPY %add(s32)
+ ; CHECK-NEXT: $w1 = COPY %o_wide(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = COPY $w1
+ %const:_(s32) = G_CONSTANT i32 10
+ %const1:_(s32) = G_CONSTANT i32 11
+ %add:_(s32), %o:_(s1) = G_UADDO %const, %const1
+ %o_wide:_(s32) = G_ZEXT %o(s1)
+ $w0 = COPY %add(s32)
+ $w1 = COPY %o_wide
+ RET_ReallyLR implicit $w0
+...
+---
+name: add_add_zero
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_add_zero
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w2
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $w1 = COPY [[C]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = COPY $w1
+ %2:_(s32) = COPY $w2
+ %const:_(s32) = G_CONSTANT i32 10
+ %addl:_(s32) = nsw G_ADD %2, %const
+ %const1:_(s32) = G_CONSTANT i32 -10
+ %add:_(s32), %o:_(s1) = G_SADDO %addl, %const1
+ %o_wide:_(s32) = G_ZEXT %o(s1)
+ $w0 = COPY %add(s32)
+ $w1 = COPY %o_wide
+ RET_ReallyLR implicit $w0
+...
+---
+name: add_multiuse
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_multiuse
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: %const:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $w1 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $w2 = COPY %const(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %const:_(s32) = G_CONSTANT i32 0
+ %add:_(s32), %o:_(s1) = G_SADDO %0, %const
+ %o_wide:_(s32) = G_ZEXT %o(s1)
+ $w0 = COPY %add(s32)
+ $w1 = COPY %add(s32)
+ $w2 = COPY %o_wide
+ RET_ReallyLR implicit $w0
+...
+---
+name: add_vector
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_vector
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $w3
+ ; CHECK-NEXT: %bv0:_(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY]](s32), [[COPY1]](s32)
+ ; CHECK-NEXT: %bv1:_(<4 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32), [[COPY2]](s32), [[COPY3]](s32)
+ ; CHECK-NEXT: %add:_(<4 x s32>), %o:_(<4 x s1>) = G_UADDO %bv0, %bv1
+ ; CHECK-NEXT: %o_wide:_(<4 x s32>) = G_ZEXT %o(<4 x s1>)
+ ; CHECK-NEXT: $q0 = COPY %add(<4 x s32>)
+ ; CHECK-NEXT: $q1 = COPY %o_wide(<4 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = COPY $w1
+ %2:_(s32) = COPY $w2
+ %3:_(s32) = COPY $w3
+ %bv0:_(<4 x s32>) = G_BUILD_VECTOR %0:_(s32), %1:_(s32), %0:_(s32), %1:_(s32)
+ %bv1:_(<4 x s32>) = G_BUILD_VECTOR %2:_(s32), %3:_(s32), %2:_(s32), %3:_(s32)
+ %add:_(<4 x s32>), %o:_(<4 x s1>) = G_UADDO %bv0, %bv1
+ %o_wide:_(<4 x s32>) = G_ZEXT %o(<4 x s1>)
+ $q0 = COPY %add(<4 x s32>)
+ $q1 = COPY %o_wide
+ RET_ReallyLR implicit $w0
+...
+---
+name: add_splat_vector
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_splat_vector
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: %bv0:_(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY]](s32), [[COPY1]](s32)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; CHECK-NEXT: %o:_(<4 x s1>) = G_BUILD_VECTOR [[C]](s1), [[C]](s1), [[C]](s1), [[C]](s1)
+ ; CHECK-NEXT: %o_wide:_(<4 x s32>) = G_ZEXT %o(<4 x s1>)
+ ; CHECK-NEXT: $q0 = COPY %bv0(<4 x s32>)
+ ; CHECK-NEXT: $q1 = COPY %o_wide(<4 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = COPY $w1
+ %2:_(s32) = COPY $w2
+ %3:_(s32) = COPY $w3
+ %const:_(s32) = G_CONSTANT i32 0
+ %bv0:_(<4 x s32>) = G_BUILD_VECTOR %0:_(s32), %1:_(s32), %0:_(s32), %1:_(s32)
+ %bv1:_(<4 x s32>) = G_BUILD_VECTOR %const:_(s32), %const:_(s32), %const:_(s32), %const:_(s32)
+ %add:_(<4 x s32>), %o:_(<4 x s1>) = G_SADDO %bv0, %bv1
+ %o_wide:_(<4 x s32>) = G_ZEXT %o(<4 x s1>)
+ $q0 = COPY %add(<4 x s32>)
+ $q1 = COPY %o_wide
+ RET_ReallyLR implicit $w0
+...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll
index 8aea944..ceef0c4 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll
@@ -65,22 +65,17 @@ define <8 x i16> @combine_vec_udiv_nonuniform(<8 x i16> %x) {
; GISEL-NEXT: ushl v1.8h, v0.8h, v1.8h
; GISEL-NEXT: umull2 v3.4s, v1.8h, v2.8h
; GISEL-NEXT: umull v1.4s, v1.4h, v2.4h
-; GISEL-NEXT: uzp2 v1.8h, v1.8h, v3.8h
-; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI1_1]
+; GISEL-NEXT: ldr q2, [x8, :lo12:.LCPI1_1]
; GISEL-NEXT: adrp x8, .LCPI1_0
-; GISEL-NEXT: sub v2.8h, v0.8h, v1.8h
-; GISEL-NEXT: umull2 v4.4s, v2.8h, v3.8h
-; GISEL-NEXT: umull v2.4s, v2.4h, v3.4h
-; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI1_0]
-; GISEL-NEXT: adrp x8, .LCPI1_4
-; GISEL-NEXT: uzp2 v2.8h, v2.8h, v4.8h
-; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI1_4]
-; GISEL-NEXT: add v1.8h, v2.8h, v1.8h
-; GISEL-NEXT: neg v2.8h, v3.8h
-; GISEL-NEXT: movi v3.8h, #1
-; GISEL-NEXT: ushl v1.8h, v1.8h, v2.8h
-; GISEL-NEXT: cmeq v2.8h, v4.8h, v3.8h
-; GISEL-NEXT: bif v0.16b, v1.16b, v2.16b
+; GISEL-NEXT: uzp2 v1.8h, v1.8h, v3.8h
+; GISEL-NEXT: sub v0.8h, v0.8h, v1.8h
+; GISEL-NEXT: umull2 v3.4s, v0.8h, v2.8h
+; GISEL-NEXT: umull v0.4s, v0.4h, v2.4h
+; GISEL-NEXT: ldr q2, [x8, :lo12:.LCPI1_0]
+; GISEL-NEXT: uzp2 v0.8h, v0.8h, v3.8h
+; GISEL-NEXT: add v0.8h, v0.8h, v1.8h
+; GISEL-NEXT: neg v1.8h, v2.8h
+; GISEL-NEXT: ushl v0.8h, v0.8h, v1.8h
; GISEL-NEXT: ret
%1 = udiv <8 x i16> %x, <i16 23, i16 34, i16 -23, i16 56, i16 128, i16 -1, i16 -256, i16 -32768>
ret <8 x i16> %1
@@ -107,21 +102,16 @@ define <8 x i16> @combine_vec_udiv_nonuniform2(<8 x i16> %x) {
; GISEL-NEXT: adrp x8, .LCPI2_2
; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI2_2]
; GISEL-NEXT: adrp x8, .LCPI2_1
-; GISEL-NEXT: ldr q2, [x8, :lo12:.LCPI2_1]
+; GISEL-NEXT: neg v1.8h, v1.8h
+; GISEL-NEXT: ushl v0.8h, v0.8h, v1.8h
+; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI2_1]
; GISEL-NEXT: adrp x8, .LCPI2_0
+; GISEL-NEXT: umull2 v2.4s, v0.8h, v1.8h
+; GISEL-NEXT: umull v0.4s, v0.4h, v1.4h
+; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI2_0]
; GISEL-NEXT: neg v1.8h, v1.8h
-; GISEL-NEXT: ushl v1.8h, v0.8h, v1.8h
-; GISEL-NEXT: umull2 v3.4s, v1.8h, v2.8h
-; GISEL-NEXT: umull v1.4s, v1.4h, v2.4h
-; GISEL-NEXT: ldr q2, [x8, :lo12:.LCPI2_0]
-; GISEL-NEXT: adrp x8, .LCPI2_3
-; GISEL-NEXT: neg v2.8h, v2.8h
-; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI2_3]
-; GISEL-NEXT: uzp2 v1.8h, v1.8h, v3.8h
-; GISEL-NEXT: movi v3.8h, #1
-; GISEL-NEXT: ushl v1.8h, v1.8h, v2.8h
-; GISEL-NEXT: cmeq v2.8h, v4.8h, v3.8h
-; GISEL-NEXT: bif v0.16b, v1.16b, v2.16b
+; GISEL-NEXT: uzp2 v0.8h, v0.8h, v2.8h
+; GISEL-NEXT: ushl v0.8h, v0.8h, v1.8h
; GISEL-NEXT: ret
%1 = udiv <8 x i16> %x, <i16 -34, i16 35, i16 36, i16 -37, i16 38, i16 -39, i16 40, i16 -41>
ret <8 x i16> %1
@@ -145,21 +135,16 @@ define <8 x i16> @combine_vec_udiv_nonuniform3(<8 x i16> %x) {
; GISEL-LABEL: combine_vec_udiv_nonuniform3:
; GISEL: // %bb.0:
; GISEL-NEXT: adrp x8, .LCPI3_1
-; GISEL-NEXT: movi v3.8h, #1
; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI3_1]
; GISEL-NEXT: adrp x8, .LCPI3_0
; GISEL-NEXT: umull2 v2.4s, v0.8h, v1.8h
; GISEL-NEXT: umull v1.4s, v0.4h, v1.4h
; GISEL-NEXT: uzp2 v1.8h, v1.8h, v2.8h
-; GISEL-NEXT: sub v2.8h, v0.8h, v1.8h
-; GISEL-NEXT: usra v1.8h, v2.8h, #1
-; GISEL-NEXT: ldr q2, [x8, :lo12:.LCPI3_0]
-; GISEL-NEXT: adrp x8, .LCPI3_2
-; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI3_2]
-; GISEL-NEXT: neg v2.8h, v2.8h
-; GISEL-NEXT: ushl v1.8h, v1.8h, v2.8h
-; GISEL-NEXT: cmeq v2.8h, v4.8h, v3.8h
-; GISEL-NEXT: bif v0.16b, v1.16b, v2.16b
+; GISEL-NEXT: sub v0.8h, v0.8h, v1.8h
+; GISEL-NEXT: usra v1.8h, v0.8h, #1
+; GISEL-NEXT: ldr q0, [x8, :lo12:.LCPI3_0]
+; GISEL-NEXT: neg v0.8h, v0.8h
+; GISEL-NEXT: ushl v0.8h, v1.8h, v0.8h
; GISEL-NEXT: ret
%1 = udiv <8 x i16> %x, <i16 7, i16 23, i16 25, i16 27, i16 31, i16 47, i16 63, i16 127>
ret <8 x i16> %1
@@ -184,19 +169,19 @@ define <16 x i8> @combine_vec_udiv_nonuniform4(<16 x i8> %x) {
;
; GISEL-LABEL: combine_vec_udiv_nonuniform4:
; GISEL: // %bb.0:
+; GISEL-NEXT: adrp x8, .LCPI4_2
+; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI4_2]
; GISEL-NEXT: adrp x8, .LCPI4_1
-; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI4_1]
+; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI4_1]
; GISEL-NEXT: adrp x8, .LCPI4_0
-; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI4_0]
-; GISEL-NEXT: adrp x8, .LCPI4_2
; GISEL-NEXT: umull2 v2.8h, v0.16b, v1.16b
; GISEL-NEXT: umull v1.8h, v0.8b, v1.8b
-; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI4_2]
+; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI4_0]
; GISEL-NEXT: uzp2 v1.16b, v1.16b, v2.16b
; GISEL-NEXT: neg v2.16b, v3.16b
-; GISEL-NEXT: movi v3.16b, #1
+; GISEL-NEXT: shl v3.16b, v4.16b, #7
; GISEL-NEXT: ushl v1.16b, v1.16b, v2.16b
-; GISEL-NEXT: cmeq v2.16b, v4.16b, v3.16b
+; GISEL-NEXT: sshr v2.16b, v3.16b, #7
; GISEL-NEXT: bif v0.16b, v1.16b, v2.16b
; GISEL-NEXT: ret
%div = udiv <16 x i8> %x, <i8 -64, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -232,10 +217,10 @@ define <8 x i16> @pr38477(<8 x i16> %a0) {
;
; GISEL-LABEL: pr38477:
; GISEL: // %bb.0:
+; GISEL-NEXT: adrp x8, .LCPI5_3
+; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI5_3]
; GISEL-NEXT: adrp x8, .LCPI5_2
-; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI5_2]
-; GISEL-NEXT: adrp x8, .LCPI5_1
-; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI5_1]
+; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI5_2]
; GISEL-NEXT: adrp x8, .LCPI5_0
; GISEL-NEXT: umull2 v2.4s, v0.8h, v1.8h
; GISEL-NEXT: umull v1.4s, v0.4h, v1.4h
@@ -243,15 +228,16 @@ define <8 x i16> @pr38477(<8 x i16> %a0) {
; GISEL-NEXT: sub v2.8h, v0.8h, v1.8h
; GISEL-NEXT: umull2 v4.4s, v2.8h, v3.8h
; GISEL-NEXT: umull v2.4s, v2.4h, v3.4h
-; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI5_0]
-; GISEL-NEXT: adrp x8, .LCPI5_3
+; GISEL-NEXT: ldr d3, [x8, :lo12:.LCPI5_0]
+; GISEL-NEXT: adrp x8, .LCPI5_1
+; GISEL-NEXT: ushll v3.8h, v3.8b, #0
; GISEL-NEXT: uzp2 v2.8h, v2.8h, v4.8h
-; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI5_3]
+; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI5_1]
+; GISEL-NEXT: shl v3.8h, v3.8h, #15
; GISEL-NEXT: add v1.8h, v2.8h, v1.8h
-; GISEL-NEXT: neg v2.8h, v3.8h
-; GISEL-NEXT: movi v3.8h, #1
+; GISEL-NEXT: neg v2.8h, v4.8h
; GISEL-NEXT: ushl v1.8h, v1.8h, v2.8h
-; GISEL-NEXT: cmeq v2.8h, v4.8h, v3.8h
+; GISEL-NEXT: sshr v2.8h, v3.8h, #15
; GISEL-NEXT: bif v0.16b, v1.16b, v2.16b
; GISEL-NEXT: ret
%1 = udiv <8 x i16> %a0, <i16 1, i16 119, i16 73, i16 -111, i16 -3, i16 118, i16 32, i16 31>
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir
index ee33b9c..02233b9 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir
@@ -6,7 +6,9 @@ body: |
bb.1:
liveins: $w0
; CHECK-LABEL: name: udiv_by_scalar_const
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK: liveins: $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 818089009
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
@@ -68,44 +70,32 @@ body: |
; CHECK: liveins: $q0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 23
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 34
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -23
- ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 56
- ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 128
- ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
- ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 -256
- ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C1]](s16), [[C2]](s16), [[C3]](s16), [[C4]](s16), [[C5]](s16), [[C6]](s16), [[C7]](s16)
- ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
- ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 25645
- ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
- ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 -3855
- ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 5
- ; CHECK-NEXT: [[C13:%[0-9]+]]:_(s16) = G_CONSTANT i16 8195
- ; CHECK-NEXT: [[C14:%[0-9]+]]:_(s16) = G_CONSTANT i16 13
- ; CHECK-NEXT: [[C15:%[0-9]+]]:_(s16) = G_CONSTANT i16 3
- ; CHECK-NEXT: [[C16:%[0-9]+]]:_(s16) = G_CONSTANT i16 9363
- ; CHECK-NEXT: [[C17:%[0-9]+]]:_(s16) = G_CONSTANT i16 512
- ; CHECK-NEXT: [[C18:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32767
- ; CHECK-NEXT: [[C19:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
- ; CHECK-NEXT: [[C20:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32639
- ; CHECK-NEXT: [[C21:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C15]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C9]](s16), [[C11]](s16), [[C13]](s16), [[C16]](s16), [[C17]](s16), [[C18]](s16), [[C20]](s16), [[C21]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C7]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C10]](s16), [[C12]](s16), [[C14]](s16), [[C8]](s16), [[C8]](s16), [[C19]](s16), [[C19]](s16), [[C8]](s16)
- ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[COPY]], [[BUILD_VECTOR1]](<8 x s16>)
- ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[LSHR]], [[BUILD_VECTOR2]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 25645
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -3855
+ ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 5
+ ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 8195
+ ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 13
+ ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 3
+ ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 9363
+ ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 512
+ ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32767
+ ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
+ ; CHECK-NEXT: [[C13:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32639
+ ; CHECK-NEXT: [[C14:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C1]](s16), [[C1]](s16), [[C1]](s16), [[C8]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C2]](s16), [[C4]](s16), [[C6]](s16), [[C9]](s16), [[C10]](s16), [[C11]](s16), [[C13]](s16), [[C14]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C3]](s16), [[C5]](s16), [[C7]](s16), [[C1]](s16), [[C1]](s16), [[C12]](s16), [[C12]](s16), [[C1]](s16)
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[COPY]], [[BUILD_VECTOR]](<8 x s16>)
+ ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[LSHR]], [[BUILD_VECTOR1]]
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<8 x s16>) = G_SUB [[COPY]], [[UMULH]]
- ; CHECK-NEXT: [[UMULH1:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[SUB]], [[BUILD_VECTOR3]]
+ ; CHECK-NEXT: [[UMULH1:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[SUB]], [[BUILD_VECTOR2]]
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UMULH1]], [[UMULH]]
- ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[ADD]], [[BUILD_VECTOR4]](<8 x s16>)
- ; CHECK-NEXT: [[C22:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
- ; CHECK-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C22]](s16), [[C22]](s16), [[C22]](s16), [[C22]](s16), [[C22]](s16), [[C22]](s16), [[C22]](s16), [[C22]](s16)
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<8 x s1>) = G_ICMP intpred(eq), [[BUILD_VECTOR]](<8 x s16>), [[BUILD_VECTOR5]]
- ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<8 x s16>) = G_SELECT [[ICMP]](<8 x s1>), [[COPY]], [[LSHR1]]
- ; CHECK-NEXT: $q0 = COPY [[SELECT]](<8 x s16>)
+ ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[ADD]], [[BUILD_VECTOR3]](<8 x s16>)
+ ; CHECK-NEXT: $q0 = COPY [[LSHR1]](<8 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<8 x s16>) = COPY $q0
%2:_(s16) = G_CONSTANT i16 23
@@ -136,38 +126,26 @@ body: |
; CHECK: liveins: $q0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -34
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 35
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 36
- ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -37
- ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 38
- ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 -39
- ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 40
- ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 -41
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C1]](s16), [[C2]](s16), [[C3]](s16), [[C4]](s16), [[C5]](s16), [[C6]](s16), [[C7]](s16)
- ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
- ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 16393
- ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
- ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 13
- ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 -5617
- ; CHECK-NEXT: [[C13:%[0-9]+]]:_(s16) = G_CONSTANT i16 5
- ; CHECK-NEXT: [[C14:%[0-9]+]]:_(s16) = G_CONSTANT i16 -7281
- ; CHECK-NEXT: [[C15:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32749
- ; CHECK-NEXT: [[C16:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
- ; CHECK-NEXT: [[C17:%[0-9]+]]:_(s16) = G_CONSTANT i16 -10347
- ; CHECK-NEXT: [[C18:%[0-9]+]]:_(s16) = G_CONSTANT i16 8197
- ; CHECK-NEXT: [[C19:%[0-9]+]]:_(s16) = G_CONSTANT i16 -13107
- ; CHECK-NEXT: [[C20:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32747
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C10]](s16), [[C10]](s16), [[C10]](s16), [[C10]](s16), [[C10]](s16), [[C10]](s16), [[C10]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C9]](s16), [[C12]](s16), [[C14]](s16), [[C15]](s16), [[C17]](s16), [[C18]](s16), [[C19]](s16), [[C20]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C11]](s16), [[C13]](s16), [[C13]](s16), [[C16]](s16), [[C13]](s16), [[C11]](s16), [[C13]](s16), [[C16]](s16)
- ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[COPY]], [[BUILD_VECTOR1]](<8 x s16>)
- ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[LSHR]], [[BUILD_VECTOR2]]
- ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[UMULH]], [[BUILD_VECTOR3]](<8 x s16>)
- ; CHECK-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16)
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<8 x s1>) = G_ICMP intpred(eq), [[BUILD_VECTOR]](<8 x s16>), [[BUILD_VECTOR4]]
- ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<8 x s16>) = G_SELECT [[ICMP]](<8 x s1>), [[COPY]], [[LSHR1]]
- ; CHECK-NEXT: $q0 = COPY [[SELECT]](<8 x s16>)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 16393
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 13
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -5617
+ ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 5
+ ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 -7281
+ ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32749
+ ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
+ ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 -10347
+ ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 8197
+ ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 -13107
+ ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32747
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C2]](s16), [[C2]](s16), [[C2]](s16), [[C2]](s16), [[C2]](s16), [[C2]](s16), [[C2]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C1]](s16), [[C4]](s16), [[C6]](s16), [[C7]](s16), [[C9]](s16), [[C10]](s16), [[C11]](s16), [[C12]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C3]](s16), [[C5]](s16), [[C5]](s16), [[C8]](s16), [[C5]](s16), [[C3]](s16), [[C5]](s16), [[C8]](s16)
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[COPY]], [[BUILD_VECTOR]](<8 x s16>)
+ ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[LSHR]], [[BUILD_VECTOR1]]
+ ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[UMULH]], [[BUILD_VECTOR2]](<8 x s16>)
+ ; CHECK-NEXT: $q0 = COPY [[LSHR1]](<8 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<8 x s16>) = COPY $q0
%2:_(s16) = G_CONSTANT i16 -34
@@ -198,39 +176,28 @@ body: |
; CHECK: liveins: $q0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 7
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 23
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 25
- ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 27
- ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 31
- ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 47
- ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 63
- ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 127
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C1]](s16), [[C2]](s16), [[C3]](s16), [[C4]](s16), [[C5]](s16), [[C6]](s16), [[C7]](s16)
- ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 9363
- ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
- ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 25645
- ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
- ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 18351
- ; CHECK-NEXT: [[C13:%[0-9]+]]:_(s16) = G_CONSTANT i16 12137
- ; CHECK-NEXT: [[C14:%[0-9]+]]:_(s16) = G_CONSTANT i16 2115
- ; CHECK-NEXT: [[C15:%[0-9]+]]:_(s16) = G_CONSTANT i16 23705
- ; CHECK-NEXT: [[C16:%[0-9]+]]:_(s16) = G_CONSTANT i16 5
- ; CHECK-NEXT: [[C17:%[0-9]+]]:_(s16) = G_CONSTANT i16 1041
- ; CHECK-NEXT: [[C18:%[0-9]+]]:_(s16) = G_CONSTANT i16 517
- ; CHECK-NEXT: [[C19:%[0-9]+]]:_(s16) = G_CONSTANT i16 6
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C10]](s16), [[C12]](s16), [[C13]](s16), [[C14]](s16), [[C15]](s16), [[C17]](s16), [[C18]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C9]](s16), [[C11]](s16), [[C11]](s16), [[C11]](s16), [[C11]](s16), [[C16]](s16), [[C16]](s16), [[C19]](s16)
- ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[COPY]], [[BUILD_VECTOR1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 9363
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 25645
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 18351
+ ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 12137
+ ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 2115
+ ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 23705
+ ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 5
+ ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 1041
+ ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 517
+ ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 6
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C2]](s16), [[C4]](s16), [[C5]](s16), [[C6]](s16), [[C7]](s16), [[C9]](s16), [[C10]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C1]](s16), [[C3]](s16), [[C3]](s16), [[C3]](s16), [[C3]](s16), [[C8]](s16), [[C8]](s16), [[C11]](s16)
+ ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[COPY]], [[BUILD_VECTOR]]
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<8 x s16>) = G_SUB [[COPY]], [[UMULH]]
- ; CHECK-NEXT: [[C20:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
- ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C20]](s16), [[C20]](s16), [[C20]](s16), [[C20]](s16), [[C20]](s16), [[C20]](s16), [[C20]](s16), [[C20]](s16)
- ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[SUB]], [[BUILD_VECTOR3]](<8 x s16>)
+ ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C12]](s16), [[C12]](s16), [[C12]](s16), [[C12]](s16), [[C12]](s16), [[C12]](s16), [[C12]](s16), [[C12]](s16)
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[SUB]], [[BUILD_VECTOR2]](<8 x s16>)
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[LSHR]], [[UMULH]]
- ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[ADD]], [[BUILD_VECTOR2]](<8 x s16>)
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<8 x s1>) = G_ICMP intpred(eq), [[BUILD_VECTOR]](<8 x s16>), [[BUILD_VECTOR3]]
- ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<8 x s16>) = G_SELECT [[ICMP]](<8 x s1>), [[COPY]], [[LSHR1]]
- ; CHECK-NEXT: $q0 = COPY [[SELECT]](<8 x s16>)
+ ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[ADD]], [[BUILD_VECTOR1]](<8 x s16>)
+ ; CHECK-NEXT: $q0 = COPY [[LSHR1]](<8 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<8 x s16>) = COPY $q0
%2:_(s16) = G_CONSTANT i16 7
@@ -261,19 +228,17 @@ body: |
; CHECK: liveins: $q0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -64
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8)
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
- ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s8) = G_CONSTANT i8 -85
- ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s8) = G_CONSTANT i8 7
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C3]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C4]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8)
- ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<16 x s8>) = G_UMULH [[COPY]], [[BUILD_VECTOR1]]
- ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<16 x s8>) = G_LSHR [[UMULH]], [[BUILD_VECTOR2]](<16 x s8>)
- ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8)
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<16 x s1>) = G_ICMP intpred(eq), [[BUILD_VECTOR]](<16 x s8>), [[BUILD_VECTOR3]]
- ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<16 x s8>) = G_SELECT [[ICMP]](<16 x s1>), [[COPY]], [[LSHR]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 -85
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s8) = G_CONSTANT i8 7
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C1]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C2]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8)
+ ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<16 x s8>) = G_UMULH [[COPY]], [[BUILD_VECTOR]]
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<16 x s8>) = G_LSHR [[UMULH]], [[BUILD_VECTOR1]](<16 x s8>)
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s1>) = G_BUILD_VECTOR [[C3]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1)
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<16 x s8>) = G_SELECT [[BUILD_VECTOR2]](<16 x s1>), [[COPY]], [[LSHR]]
; CHECK-NEXT: $q0 = COPY [[SELECT]](<16 x s8>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<16 x s8>) = COPY $q0
@@ -299,39 +264,31 @@ body: |
; CHECK: liveins: $q0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 119
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 73
- ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -111
- ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -3
- ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 118
- ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 32
- ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 31
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C1]](s16), [[C2]](s16), [[C3]](s16), [[C4]](s16), [[C5]](s16), [[C6]](s16), [[C7]](s16)
- ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
- ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 4957
- ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
- ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 6
- ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 -8079
- ; CHECK-NEXT: [[C13:%[0-9]+]]:_(s16) = G_CONSTANT i16 4103
- ; CHECK-NEXT: [[C14:%[0-9]+]]:_(s16) = G_CONSTANT i16 12
- ; CHECK-NEXT: [[C15:%[0-9]+]]:_(s16) = G_CONSTANT i16 16385
- ; CHECK-NEXT: [[C16:%[0-9]+]]:_(s16) = G_CONSTANT i16 14
- ; CHECK-NEXT: [[C17:%[0-9]+]]:_(s16) = G_CONSTANT i16 -29991
- ; CHECK-NEXT: [[C18:%[0-9]+]]:_(s16) = G_CONSTANT i16 2048
- ; CHECK-NEXT: [[C19:%[0-9]+]]:_(s16) = G_CONSTANT i16 2115
- ; CHECK-NEXT: [[C20:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C9]](s16), [[C12]](s16), [[C13]](s16), [[C15]](s16), [[C17]](s16), [[C18]](s16), [[C19]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C10]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C10]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C11]](s16), [[C11]](s16), [[C14]](s16), [[C16]](s16), [[C11]](s16), [[C8]](s16), [[C20]](s16)
- ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[COPY]], [[BUILD_VECTOR1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 4957
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 6
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -8079
+ ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 4103
+ ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 12
+ ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 16385
+ ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 14
+ ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 -29991
+ ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 2048
+ ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 2115
+ ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C1]](s16), [[C4]](s16), [[C5]](s16), [[C7]](s16), [[C9]](s16), [[C10]](s16), [[C11]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C2]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C2]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C3]](s16), [[C3]](s16), [[C6]](s16), [[C8]](s16), [[C3]](s16), [[C]](s16), [[C12]](s16)
+ ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[COPY]], [[BUILD_VECTOR]]
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<8 x s16>) = G_SUB [[COPY]], [[UMULH]]
- ; CHECK-NEXT: [[UMULH1:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[SUB]], [[BUILD_VECTOR2]]
+ ; CHECK-NEXT: [[UMULH1:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[SUB]], [[BUILD_VECTOR1]]
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UMULH1]], [[UMULH]]
- ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[ADD]], [[BUILD_VECTOR3]](<8 x s16>)
- ; CHECK-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16)
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<8 x s1>) = G_ICMP intpred(eq), [[BUILD_VECTOR]](<8 x s16>), [[BUILD_VECTOR4]]
- ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<8 x s16>) = G_SELECT [[ICMP]](<8 x s1>), [[COPY]], [[LSHR]]
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[ADD]], [[BUILD_VECTOR2]](<8 x s16>)
+ ; CHECK-NEXT: [[C13:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; CHECK-NEXT: [[C14:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s1>) = G_BUILD_VECTOR [[C13]](s1), [[C14]](s1), [[C14]](s1), [[C14]](s1), [[C14]](s1), [[C14]](s1), [[C14]](s1), [[C14]](s1)
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<8 x s16>) = G_SELECT [[BUILD_VECTOR3]](<8 x s1>), [[COPY]], [[LSHR]]
; CHECK-NEXT: $q0 = COPY [[SELECT]](<8 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<8 x s16>) = COPY $q0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-nneg-disjoint.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-nneg-disjoint.ll
new file mode 100644
index 0000000..32c7423
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-nneg-disjoint.ll
@@ -0,0 +1,135 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64-linux-gnu -O0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o - 2>&1 | FileCheck %s
+
+define i32 @call_nneg(i16 %a) {
+ ; CHECK-LABEL: name: call_nneg
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: %2:_(s32) = nneg G_ZEXT [[TRUNC]](s16)
+ ; CHECK-NEXT: $w0 = COPY %2(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = zext nneg i16 %a to i32
+ ret i32 %result
+}
+
+define i32 @call_not_nneg(i16 %a) {
+ ; CHECK-LABEL: name: call_not_nneg
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s16)
+ ; CHECK-NEXT: $w0 = COPY [[ZEXT]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = zext i16 %a to i32
+ ret i32 %result
+}
+
+define i32 @call_disjoint(i32 %a, i32 %b) {
+ ; CHECK-LABEL: name: call_disjoint
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: %2:_(s32) = disjoint G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $w0 = COPY %2(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = or disjoint i32 %a, %b
+ ret i32 %result
+}
+
+define i32 @call_add(i32 %a, i32 %b) {
+ ; CHECK-LABEL: name: call_add
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = nsw G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = add nsw i32 %a, %b
+ ret i32 %result
+}
+
+define i32 @call_not_disjoint(i32 %a, i32 %b) {
+ ; CHECK-LABEL: name: call_not_disjoint
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = or i32 %a, %b
+ ret i32 %result
+}
+
+define <2 x i64> @call_not_disjoint_vector(<2 x i64> %a, <2 x i64> %b) {
+ ; CHECK-LABEL: name: call_not_disjoint_vector
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $q0, $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $q0 = COPY [[OR]](<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+entry:
+ %result = or <2 x i64> %a, %b
+ ret <2 x i64> %result
+}
+
+define <2 x i64> @call_disjoint_vector(<2 x i64> %a, <2 x i64> %b) {
+ ; CHECK-LABEL: name: call_disjoint_vector
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $q0, $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+ ; CHECK-NEXT: %2:_(<2 x s64>) = disjoint G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $q0 = COPY %2(<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+entry:
+ %result = or disjoint <2 x i64> %a, %b
+ ret <2 x i64> %result
+}
+
+define <2 x i64> @call_nneg_vector(<2 x i32> %a) {
+ ; CHECK-LABEL: name: call_nneg_vector
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+ ; CHECK-NEXT: %1:_(<2 x s64>) = nneg G_ZEXT [[COPY]](<2 x s32>)
+ ; CHECK-NEXT: $q0 = COPY %1(<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+entry:
+ %result = zext nneg <2 x i32> %a to <2 x i64>
+ ret <2 x i64> %result
+}
+
+define <2 x i64> @call_not_nneg_vector(<2 x i32> %a) {
+ ; CHECK-LABEL: name: call_not_nneg_vector
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<2 x s64>) = G_ZEXT [[COPY]](<2 x s32>)
+ ; CHECK-NEXT: $q0 = COPY [[ZEXT]](<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+entry:
+ %result = zext <2 x i32> %a to <2 x i64>
+ ret <2 x i64> %result
+}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-trunc.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-trunc.ll
new file mode 100644
index 0000000..d87e9c4
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-trunc.ll
@@ -0,0 +1,90 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -O0 -mtriple=aarch64-linux-gnu -global-isel -stop-after=irtranslator %s -o - | FileCheck %s
+
+define i32 @call_trunc_no_flags(i64 %a) {
+ ; CHECK-LABEL: name: call_trunc_no_flags
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: $w0 = COPY [[TRUNC]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = trunc i64 %a to i32
+ ret i32 %result
+}
+
+define i32 @call_trunc_nsw_flags(i64 %a) {
+ ; CHECK-LABEL: name: call_trunc_nsw_flags
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = nsw G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: $w0 = COPY [[TRUNC]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = trunc nsw i64 %a to i32
+ ret i32 %result
+}
+
+define i32 @call_trunc_nuw_flags(i64 %a) {
+ ; CHECK-LABEL: name: call_trunc_nuw_flags
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = nuw G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: $w0 = COPY [[TRUNC]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = trunc nuw i64 %a to i32
+ ret i32 %result
+}
+
+define i32 @call_trunc_all_flags(i64 %a) {
+ ; CHECK-LABEL: name: call_trunc_all_flags
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = nuw nsw G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: $w0 = COPY [[TRUNC]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = trunc nsw nuw i64 %a to i32
+ ret i32 %result
+}
+
+define <2 x i64> @call_trunc_noop_signed_vector(<2 x i64> %a) {
+ ; CHECK-LABEL: name: call_trunc_noop_signed_vector
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<2 x s32>) = nsw G_TRUNC [[COPY]](<2 x s64>)
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<2 x s64>) = G_SEXT [[TRUNC]](<2 x s32>)
+ ; CHECK-NEXT: $q0 = COPY [[SEXT]](<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+entry:
+ %truncate = trunc nsw <2 x i64> %a to <2 x i32>
+ %result = sext <2 x i32> %truncate to <2 x i64>
+ ret <2 x i64> %result
+}
+
+define <2 x i64> @call_trunc_noop_unsigned_vector(<2 x i64> %a) {
+ ; CHECK-LABEL: name: call_trunc_noop_unsigned_vector
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<2 x s32>) = nuw G_TRUNC [[COPY]](<2 x s64>)
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<2 x s64>) = G_ZEXT [[TRUNC]](<2 x s32>)
+ ; CHECK-NEXT: $q0 = COPY [[ZEXT]](<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+entry:
+ %truncate = trunc nuw <2 x i64> %a to <2 x i32>
+ %result = zext <2 x i32> %truncate to <2 x i64>
+ ret <2 x i64> %result
+}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unreachable.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unreachable.ll
index fe9427d..edae903 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unreachable.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unreachable.ll
@@ -6,7 +6,7 @@ declare void @llvm.trap()
define void @unreachable() {
; CHECK-LABEL: name: unreachable
; CHECK: bb.1 (%ir-block.0):
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
unreachable
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-vector-deinterleave2.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-vector-deinterleave2.ll
new file mode 100644
index 0000000..10882a0
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-vector-deinterleave2.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -O0 -mtriple=aarch64-- --global-isel --global-isel-abort=2 --verify-machineinstrs --stop-after=irtranslator %s -o - | FileCheck %s
+
+define void @vector_deinterleave2_v4i32(<4 x i32> %a) {
+ ; CHECK-LABEL: name: vector_deinterleave2_v4i32
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<4 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<4 x s32>), [[DEF]], shufflemask(0, 2)
+ ; CHECK-NEXT: [[SHUF1:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<4 x s32>), [[DEF]], shufflemask(1, 3)
+ ; CHECK-NEXT: RET_ReallyLR
+ %res = call {<2 x i32>, <2 x i32>} @llvm.experimental.vector.deinterleave2.v4i32(<4 x i32> %a)
+ ret void
+}
+
+define void @vector_deinterleave2_v8f32(<8 x float> %a) {
+ ; CHECK-LABEL: name: vector_deinterleave2_v8f32
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $q0, $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY]](<2 x s64>)
+ ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY1]](<2 x s64>)
+ ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[BITCAST]](<4 x s32>), [[BITCAST1]](<4 x s32>)
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<8 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[CONCAT_VECTORS]](<8 x s32>), [[DEF]], shufflemask(0, 2, 4, 6)
+ ; CHECK-NEXT: [[SHUF1:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[CONCAT_VECTORS]](<8 x s32>), [[DEF]], shufflemask(1, 3, 5, 7)
+ ; CHECK-NEXT: RET_ReallyLR
+ %res = call {<4 x float>, <4 x float>} @llvm.experimental.vector.deinterleave2.v8f32(<8 x float> %a)
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-vector-interleave2.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-vector-interleave2.ll
new file mode 100644
index 0000000..f51e47a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-vector-interleave2.ll
@@ -0,0 +1,30 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -O0 -mtriple=aarch64-- --global-isel --global-isel-abort=2 --verify-machineinstrs --stop-after=irtranslator %s -o - | FileCheck %s
+
+define void @vector_interleave2_v4i32(<2 x i32> %a, <2 x i32> %b) {
+ ; CHECK-LABEL: name: vector_interleave2_v4i32
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $d0, $d1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
+ ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<2 x s32>), [[COPY1]], shufflemask(0, 2, 1, 3)
+ ; CHECK-NEXT: RET_ReallyLR
+ %res = call <4 x i32> @llvm.experimental.vector.interleave2.v4i32(<2 x i32> %a, <2 x i32> %b)
+ ret void
+}
+
+define void @vector_interleave2_v8f32(<4 x float> %a, <4 x float> %b) {
+ ; CHECK-LABEL: name: vector_interleave2_v8f32
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $q0, $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY]](<2 x s64>)
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+ ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY1]](<2 x s64>)
+ ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<8 x s32>) = G_SHUFFLE_VECTOR [[BITCAST]](<4 x s32>), [[BITCAST1]], shufflemask(0, 4, 1, 5, 2, 6, 3, 7)
+ ; CHECK-NEXT: RET_ReallyLR
+ %res = call <8 x float> @llvm.experimental.vector.interleave2.v8f32(<4 x float> %a, <4 x float> %b)
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-abs.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-abs.mir
index 3123e30..0d429ae 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-abs.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-abs.mir
@@ -8,11 +8,12 @@ body: |
bb.0:
; CHECK-LABEL: name: abs_s32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
- ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[C]](s64)
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[ASHR]]
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ADD]], [[ASHR]]
- ; CHECK-NEXT: $w0 = COPY [[XOR]](s32)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY]]
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[COPY]], [[SUB]]
+ ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
+ ;
; CHECK-CSSC-LABEL: name: abs_s32
; CHECK-CSSC: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-CSSC-NEXT: [[ABS:%[0-9]+]]:_(s32) = G_ABS [[COPY]]
@@ -28,11 +29,12 @@ body: |
bb.0:
; CHECK-LABEL: name: abs_s64
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
- ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[C]](s64)
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[ASHR]]
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[ADD]], [[ASHR]]
- ; CHECK-NEXT: $x0 = COPY [[XOR]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY]]
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(sgt), [[COPY]](s64), [[C]]
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s32), [[COPY]], [[SUB]]
+ ; CHECK-NEXT: $x0 = COPY [[SELECT]](s64)
+ ;
; CHECK-CSSC-LABEL: name: abs_s64
; CHECK-CSSC: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK-CSSC-NEXT: [[ABS:%[0-9]+]]:_(s64) = G_ABS [[COPY]]
@@ -55,6 +57,7 @@ body: |
; CHECK-NEXT: [[ABS:%[0-9]+]]:_(<4 x s16>) = G_ABS [[COPY]]
; CHECK-NEXT: $d0 = COPY [[ABS]](<4 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $d0
+ ;
; CHECK-CSSC-LABEL: name: abs_v4s16
; CHECK-CSSC: liveins: $d0
; CHECK-CSSC-NEXT: {{ $}}
@@ -82,6 +85,7 @@ body: |
; CHECK-NEXT: [[ABS:%[0-9]+]]:_(<8 x s16>) = G_ABS [[COPY]]
; CHECK-NEXT: $q0 = COPY [[ABS]](<8 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
+ ;
; CHECK-CSSC-LABEL: name: abs_v8s16
; CHECK-CSSC: liveins: $q0
; CHECK-CSSC-NEXT: {{ $}}
@@ -109,6 +113,7 @@ body: |
; CHECK-NEXT: [[ABS:%[0-9]+]]:_(<2 x s32>) = G_ABS [[COPY]]
; CHECK-NEXT: $d0 = COPY [[ABS]](<2 x s32>)
; CHECK-NEXT: RET_ReallyLR implicit $d0
+ ;
; CHECK-CSSC-LABEL: name: abs_v2s32
; CHECK-CSSC: liveins: $d0
; CHECK-CSSC-NEXT: {{ $}}
@@ -136,6 +141,7 @@ body: |
; CHECK-NEXT: [[ABS:%[0-9]+]]:_(<4 x s32>) = G_ABS [[COPY]]
; CHECK-NEXT: $q0 = COPY [[ABS]](<4 x s32>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
+ ;
; CHECK-CSSC-LABEL: name: abs_v4s32
; CHECK-CSSC: liveins: $q0
; CHECK-CSSC-NEXT: {{ $}}
@@ -163,6 +169,7 @@ body: |
; CHECK-NEXT: [[ABS:%[0-9]+]]:_(<8 x s8>) = G_ABS [[COPY]]
; CHECK-NEXT: $d0 = COPY [[ABS]](<8 x s8>)
; CHECK-NEXT: RET_ReallyLR implicit $d0
+ ;
; CHECK-CSSC-LABEL: name: abs_v4s8
; CHECK-CSSC: liveins: $d0
; CHECK-CSSC-NEXT: {{ $}}
@@ -190,6 +197,7 @@ body: |
; CHECK-NEXT: [[ABS:%[0-9]+]]:_(<16 x s8>) = G_ABS [[COPY]]
; CHECK-NEXT: $q0 = COPY [[ABS]](<16 x s8>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
+ ;
; CHECK-CSSC-LABEL: name: abs_v16s8
; CHECK-CSSC: liveins: $q0
; CHECK-CSSC-NEXT: {{ $}}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir
index c9556e2..a63d8b9 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir
@@ -121,10 +121,11 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY $h1
- ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s16)
- ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY1]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32)
- ; CHECK-NEXT: $d0 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[COPY]](s16), [[COPY1]](s16), [[DEF]](s16), [[DEF]](s16)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<4 x s32>) = G_ANYEXT [[BUILD_VECTOR]](<4 x s16>)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[ANYEXT]](<4 x s32>)
+ ; CHECK-NEXT: $d0 = COPY [[UV]](<2 x s32>)
; CHECK-NEXT: RET_ReallyLR
%0:_(s16) = COPY $h0
%1:_(s16) = COPY $h1
@@ -141,8 +142,8 @@ body: |
; CHECK-LABEL: name: widen_v2s8
; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
- ; CHECK-NEXT: %3:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[DEF]](s32)
- ; CHECK-NEXT: $d0 = COPY %3(<2 x s32>)
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[DEF]](s32)
+ ; CHECK-NEXT: $d0 = COPY [[BUILD_VECTOR]](<2 x s32>)
; CHECK-NEXT: RET_ReallyLR
%0:_(s8) = G_IMPLICIT_DEF
%1:_(s8) = G_IMPLICIT_DEF
@@ -157,12 +158,14 @@ name: widen_v4s8
body: |
bb.0:
; CHECK-LABEL: name: widen_v4s8
- ; CHECK: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY [[DEF]](s16)
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY [[DEF]](s16)
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s16) = COPY [[DEF]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[COPY]](s16), [[COPY1]](s16), [[COPY2]](s16), [[DEF]](s16)
- ; CHECK-NEXT: $d0 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; CHECK: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF2:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF3:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF1]](s8), [[DEF2]](s8), [[DEF3]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR]](<8 x s8>)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<4 x s16>), [[UV1:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT]](<8 x s16>)
+ ; CHECK-NEXT: $d0 = COPY [[UV]](<4 x s16>)
; CHECK-NEXT: RET_ReallyLR
%0:_(s8) = G_IMPLICIT_DEF
%1:_(s8) = G_IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-128.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-128.mir
index 6a6e0b6..26230ef 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-128.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-128.mir
@@ -12,22 +12,6 @@ body: |
liveins: $x0, $x1, $x2, $x3, $x4
- ; CHECK-LABEL: name: compare_swap_128
- ; CHECK: liveins: $x0_x1, $x1, $x0, $x1, $x2, $x3, $x4
- ; CHECK: [[COPY:%[0-9]+]]:gpr64(p0) = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
- ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
- ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
- ; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
- ; CHECK: [[COPY5:%[0-9]+]]:gpr64(s64) = COPY [[COPY1]](s64)
- ; CHECK: [[COPY6:%[0-9]+]]:gpr64(s64) = COPY [[COPY2]](s64)
- ; CHECK: [[COPY7:%[0-9]+]]:gpr64(s64) = COPY [[COPY3]](s64)
- ; CHECK: [[COPY8:%[0-9]+]]:gpr64(s64) = COPY [[COPY4]](s64)
- ; CHECK: early-clobber %13:gpr64(s64), early-clobber %14:gpr64(s64), early-clobber %16:gpr32common = CMP_SWAP_128_ACQUIRE [[COPY]](p0), [[COPY5]](s64), [[COPY6]](s64), [[COPY7]](s64), [[COPY8]](s64) :: (load store acquire acquire 16)
- ; CHECK: [[COPY9:%[0-9]+]]:gpr64 = COPY %16
- ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES %13(s64), %14(s64)
- ; CHECK: G_STORE [[MV]](s128), [[COPY]](p0) :: (store 16)
- ; CHECK: RET_ReallyLR
; CHECK-NOLSE-LABEL: name: compare_swap_128
; CHECK-NOLSE: liveins: $x0_x1, $x1, $x0, $x1, $x2, $x3, $x4
; CHECK-NOLSE-NEXT: {{ $}}
@@ -40,11 +24,13 @@ body: |
; CHECK-NOLSE-NEXT: [[COPY6:%[0-9]+]]:gpr64(s64) = COPY [[COPY2]](s64)
; CHECK-NOLSE-NEXT: [[COPY7:%[0-9]+]]:gpr64(s64) = COPY [[COPY3]](s64)
; CHECK-NOLSE-NEXT: [[COPY8:%[0-9]+]]:gpr64(s64) = COPY [[COPY4]](s64)
- ; CHECK-NOLSE-NEXT: early-clobber %13:gpr64common(s64), early-clobber %14:gpr64common(s64), early-clobber %16:gpr32common = CMP_SWAP_128_ACQUIRE [[COPY]](p0), [[COPY5]](s64), [[COPY6]](s64), [[COPY7]](s64), [[COPY8]](s64) :: (load store acquire acquire (s128))
- ; CHECK-NOLSE-NEXT: [[COPY9:%[0-9]+]]:gpr64 = COPY %16
- ; CHECK-NOLSE-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES %13(s64), %14(s64)
- ; CHECK-NOLSE-NEXT: G_STORE [[MV]](s128), [[COPY]](p0) :: (store (s128))
+ ; CHECK-NOLSE-NEXT: early-clobber %14:gpr64common(s64), early-clobber %15:gpr64common(s64), early-clobber %17:gpr32common = CMP_SWAP_128_ACQUIRE [[COPY]](p0), [[COPY5]](s64), [[COPY6]](s64), [[COPY7]](s64), [[COPY8]](s64) :: (load store acquire acquire (s128))
+ ; CHECK-NOLSE-NEXT: [[COPY9:%[0-9]+]]:gpr64 = COPY %17
+ ; CHECK-NOLSE-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES %14(s64), %15(s64)
+ ; CHECK-NOLSE-NEXT: [[COPY10:%[0-9]+]]:_(s128) = COPY [[MV]](s128)
+ ; CHECK-NOLSE-NEXT: G_STORE [[COPY10]](s128), [[COPY]](p0) :: (store (s128))
; CHECK-NOLSE-NEXT: RET_ReallyLR
+ ;
; CHECK-LSE-LABEL: name: compare_swap_128
; CHECK-LSE: liveins: $x0_x1, $x1, $x0, $x1, $x2, $x3, $x4
; CHECK-LSE-NEXT: {{ $}}
@@ -59,7 +45,8 @@ body: |
; CHECK-LSE-NEXT: [[EXTRACT:%[0-9]+]]:_(s64) = G_EXTRACT [[CASPAX]](s128), 0
; CHECK-LSE-NEXT: [[EXTRACT1:%[0-9]+]]:_(s64) = G_EXTRACT [[CASPAX]](s128), 64
; CHECK-LSE-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[EXTRACT]](s64), [[EXTRACT1]](s64)
- ; CHECK-LSE-NEXT: G_STORE [[MV]](s128), [[COPY]](p0) :: (store (s128))
+ ; CHECK-LSE-NEXT: [[COPY5:%[0-9]+]]:_(s128) = COPY [[MV]](s128)
+ ; CHECK-LSE-NEXT: G_STORE [[COPY5]](s128), [[COPY]](p0) :: (store (s128))
; CHECK-LSE-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
%3:_(s64) = COPY $x1
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir
index 3c01078..05e6212 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir
@@ -16,13 +16,16 @@ body: |
liveins: $x0
; CHECK-LABEL: name: cmpxchg_i32
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s64) on %ir.addr)
- ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s32), [[C]]
- ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[ATOMIC_CMPXCHG]], [[ICMP]]
- ; CHECK: $w0 = COPY [[MUL]](s32)
+ ; CHECK: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s64) on %ir.addr)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s32), [[C]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[ATOMIC_CMPXCHG]](s32)
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY1]], [[ICMP]]
+ ; CHECK-NEXT: $w0 = COPY [[MUL]](s32)
%0:_(p0) = COPY $x0
%1:_(s32) = G_CONSTANT i32 0
%2:_(s32) = G_CONSTANT i32 1
@@ -40,14 +43,17 @@ body: |
liveins: $x0
; CHECK-LABEL: name: cmpxchg_i64
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s64) on %ir.addr)
- ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s64), [[C]]
- ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ICMP]](s32)
- ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ATOMIC_CMPXCHG]], [[ANYEXT]]
- ; CHECK: $x0 = COPY [[MUL]](s64)
+ ; CHECK: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s64) on %ir.addr)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s64), [[C]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ATOMIC_CMPXCHG]](s64)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ICMP]](s32)
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY1]], [[ANYEXT]]
+ ; CHECK-NEXT: $x0 = COPY [[MUL]](s64)
%0:_(p0) = COPY $x0
%1:_(s64) = G_CONSTANT i64 0
%2:_(s64) = G_CONSTANT i64 1
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ctpop-no-implicit-float.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ctpop-no-implicit-float.mir
index d2352be..27f2f0b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ctpop-no-implicit-float.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ctpop-no-implicit-float.mir
@@ -37,6 +37,7 @@ body: |
; CHECK-NEXT: %ctpop:_(s32) = G_LSHR [[MUL]], [[C7]](s64)
; CHECK-NEXT: $w0 = COPY %ctpop(s32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
+ ;
; CHECK-CSSC-LABEL: name: s32
; CHECK-CSSC: liveins: $w0
; CHECK-CSSC-NEXT: {{ $}}
@@ -77,11 +78,12 @@ body: |
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1085102592571150095
; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C5]]
; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C6]]
; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C6]]
; CHECK-NEXT: %ctpop:_(s64) = G_LSHR [[MUL]], [[C7]](s64)
; CHECK-NEXT: $x0 = COPY %ctpop(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
+ ;
; CHECK-CSSC-LABEL: name: s64
; CHECK-CSSC: liveins: $x0
; CHECK-CSSC-NEXT: {{ $}}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
index 5662de4..f7550ce 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
@@ -48,7 +48,7 @@ define void @bar() personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: $x0 = COPY [[LOAD]](p0)
; CHECK-NEXT: BL @_Unwind_Resume, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
%exn.slot = alloca ptr
%ehselector.slot = alloca i32
%1 = invoke i32 @foo(i32 42) to label %continue unwind label %cleanup
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
index e12353c..d3db243 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
@@ -235,31 +235,32 @@ body: |
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32)
+ ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[COPY3]](s32)
; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
- ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
- ; CHECK-NEXT: [[DEF2:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16), [[TRUNC2]](s16), [[DEF2]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[TRUNC]](s8), [[TRUNC1]](s8), [[TRUNC2]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR]](<8 x s8>)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<4 x s16>), [[UV1:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT]](<8 x s16>)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
- ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<4 x s16>) = G_INSERT_VECTOR_ELT [[BUILD_VECTOR]], [[C2]](s16), [[C1]](s64)
- ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[IVEC]](<4 x s16>)
- ; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[UV]](s16)
- ; CHECK-NEXT: [[TRUNC4:%[0-9]+]]:_(s8) = G_TRUNC [[UV1]](s16)
- ; CHECK-NEXT: [[TRUNC5:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s16)
+ ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<4 x s16>) = G_INSERT_VECTOR_ELT [[UV]], [[C2]](s16), [[C1]](s64)
+ ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[IVEC]](<4 x s16>)
+ ; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s16)
+ ; CHECK-NEXT: [[TRUNC4:%[0-9]+]]:_(s8) = G_TRUNC [[UV3]](s16)
+ ; CHECK-NEXT: [[TRUNC5:%[0-9]+]]:_(s8) = G_TRUNC [[UV4]](s16)
; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[TRUNC3]](s8), [[TRUNC4]](s8), [[TRUNC5]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<16 x s8>) = G_SHUFFLE_VECTOR [[BUILD_VECTOR1]](<16 x s8>), [[BUILD_VECTOR2]], shufflemask(0, 16, 16, 16, 1, 16, 16, 16, 2, 16, 16, 16, undef, undef, undef, undef)
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[SHUF]](<16 x s8>)
; CHECK-NEXT: [[UITOFP:%[0-9]+]]:_(<4 x s32>) = G_UITOFP [[BITCAST]](<4 x s32>)
- ; CHECK-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UITOFP]](<4 x s32>)
- ; CHECK-NEXT: G_STORE [[UV4]](s32), [[COPY]](p0) :: (store (s32), align 16)
+ ; CHECK-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UITOFP]](<4 x s32>)
+ ; CHECK-NEXT: G_STORE [[UV6]](s32), [[COPY]](p0) :: (store (s32), align 16)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
- ; CHECK-NEXT: G_STORE [[UV5]](s32), [[PTR_ADD]](p0) :: (store (s32) into unknown-address + 4)
+ ; CHECK-NEXT: G_STORE [[UV7]](s32), [[PTR_ADD]](p0) :: (store (s32) into unknown-address + 4)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
- ; CHECK-NEXT: G_STORE [[UV6]](s32), [[PTR_ADD1]](p0) :: (store (s32) into unknown-address + 8, align 8)
+ ; CHECK-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD1]](p0) :: (store (s32) into unknown-address + 8, align 8)
; CHECK-NEXT: G_BR %bb.1
bb.1:
liveins: $w1, $w2, $w3, $x0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir
index 5cbb864..b8328ed 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir
@@ -607,9 +607,11 @@ body: |
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s16)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD1]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32)
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[BUILD_VECTOR]](<2 x s32>)
- ; CHECK-NEXT: $s0 = COPY [[TRUNC]](<2 x s16>)
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[DEF]](s32), [[DEF]](s32)
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[BUILD_VECTOR]](<4 x s32>)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[TRUNC]](<4 x s16>)
+ ; CHECK-NEXT: $s0 = COPY [[UV]](<2 x s16>)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
%1(<2 x s16>) = G_LOAD %0(p0) :: (load (<2 x s16>))
@@ -711,33 +713,24 @@ body: |
; CHECK: liveins: $x0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %ptr:_(p0) = COPY $x0
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD %ptr(p0) :: (load (p0), align 64)
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr(p0) :: (load (<2 x s64>), align 64)
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD]](<2 x s64>)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[PTR_ADD]](p0) :: (load (p0) from unknown-address + 8)
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
+ ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD1]](<2 x s64>)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C1]](s64)
- ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[PTR_ADD1]](p0) :: (load (p0) from unknown-address + 16, align 16)
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C2]](s64)
- ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(p0) = G_LOAD [[PTR_ADD2]](p0) :: (load (p0) from unknown-address + 24)
- ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C3]](s64)
- ; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(p0) = G_LOAD [[PTR_ADD3]](p0) :: (load (p0) from unknown-address + 32, align 32)
- ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C4]](s64)
- ; CHECK-NEXT: [[LOAD5:%[0-9]+]]:_(p0) = G_LOAD [[PTR_ADD4]](p0) :: (load (p0) from unknown-address + 40)
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p0>) = G_BUILD_VECTOR [[LOAD]](p0), [[LOAD1]](p0)
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x p0>) = G_BUILD_VECTOR [[LOAD2]](p0), [[LOAD3]](p0)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x p0>) = G_BUILD_VECTOR [[LOAD4]](p0), [[LOAD5]](p0)
- ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[BUILD_VECTOR]](<2 x p0>)
- ; CHECK-NEXT: G_STORE [[BITCAST]](<2 x s64>), %ptr(p0) :: (store (<2 x s64>), align 64)
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C1]](s64)
- ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[BUILD_VECTOR1]](<2 x p0>)
- ; CHECK-NEXT: G_STORE [[BITCAST1]](<2 x s64>), [[PTR_ADD5]](p0) :: (store (<2 x s64>) into unknown-address + 16)
- ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C3]](s64)
- ; CHECK-NEXT: [[BITCAST2:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[BUILD_VECTOR2]](<2 x p0>)
- ; CHECK-NEXT: G_STORE [[BITCAST2]](<2 x s64>), [[PTR_ADD6]](p0) :: (store (<2 x s64>) into unknown-address + 32, align 32)
+ ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD1]](p0) :: (load (<2 x s64>) from unknown-address + 32, align 32)
+ ; CHECK-NEXT: [[BITCAST2:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD2]](<2 x s64>)
+ ; CHECK-NEXT: [[BITCAST3:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[BITCAST]](<2 x p0>)
+ ; CHECK-NEXT: G_STORE [[BITCAST3]](<2 x s64>), %ptr(p0) :: (store (<2 x s64>), align 64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
+ ; CHECK-NEXT: [[BITCAST4:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[BITCAST1]](<2 x p0>)
+ ; CHECK-NEXT: G_STORE [[BITCAST4]](<2 x s64>), [[PTR_ADD2]](p0) :: (store (<2 x s64>) into unknown-address + 16)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C1]](s64)
+ ; CHECK-NEXT: [[BITCAST5:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[BITCAST2]](<2 x p0>)
+ ; CHECK-NEXT: G_STORE [[BITCAST5]](<2 x s64>), [[PTR_ADD3]](p0) :: (store (<2 x s64>) into unknown-address + 32, align 32)
; CHECK-NEXT: RET_ReallyLR
%ptr:_(p0) = COPY $x0
%val:_(<6 x p0>) = G_LOAD %ptr(p0) :: (load (<6 x p0>))
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir
index 63a26dc..e49a94c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir
@@ -293,41 +293,44 @@ body: |
; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), %w0(s32), [[C]]
; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ICMP2]], 1
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY [[DEF1]](s16)
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY [[DEF1]](s16)
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s16) = COPY [[DEF1]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[COPY]](s16), [[COPY1]](s16), [[COPY2]](s16), [[DEF1]](s16)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR]](<8 x s8>)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<4 x s16>), [[UV1:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT]](<8 x s16>)
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[SEXT_INREG]](s32)
- ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<4 x s16>) = G_INSERT_VECTOR_ELT [[BUILD_VECTOR]], [[TRUNC]](s16), [[C1]](s64)
- ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[IVEC]](<4 x s16>)
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[UV]](s16)
- ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[UV1]](s16)
- ; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s16)
- ; CHECK-NEXT: [[TRUNC4:%[0-9]+]]:_(s8) = G_TRUNC [[UV3]](s16)
- ; CHECK-NEXT: [[DEF2:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[TRUNC1]](s8), [[TRUNC2]](s8), [[TRUNC3]](s8), [[TRUNC4]](s8), [[DEF2]](s8), [[DEF2]](s8), [[DEF2]](s8), [[DEF2]](s8)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF2]](s8), [[DEF2]](s8), [[DEF2]](s8), [[DEF2]](s8)
+ ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<4 x s16>) = G_INSERT_VECTOR_ELT [[UV]], [[TRUNC]](s16), [[C1]](s64)
+ ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[IVEC]](<4 x s16>)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s16)
+ ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[UV3]](s16)
+ ; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[UV4]](s16)
+ ; CHECK-NEXT: [[TRUNC4:%[0-9]+]]:_(s8) = G_TRUNC [[UV5]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[TRUNC1]](s8), [[TRUNC2]](s8), [[TRUNC3]](s8), [[TRUNC4]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<8 x s8>) = G_SHUFFLE_VECTOR [[BUILD_VECTOR1]](<8 x s8>), [[BUILD_VECTOR2]], shufflemask(0, 0, 0, 0, undef, undef, undef, undef)
- ; CHECK-NEXT: [[UV4:%[0-9]+]]:_(<4 x s8>), [[UV5:%[0-9]+]]:_(<4 x s8>) = G_UNMERGE_VALUES [[SHUF]](<8 x s8>)
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
- ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s16) = COPY [[C2]](s16)
- ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s16) = COPY [[C2]](s16)
- ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s16) = COPY [[C2]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[COPY3]](s16), [[COPY4]](s16), [[COPY5]](s16), [[C2]](s16)
- ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<4 x s16>) = G_ANYEXT [[UV4]](<4 x s8>)
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<4 x s16>) = G_XOR [[ANYEXT]], [[BUILD_VECTOR3]]
+ ; CHECK-NEXT: [[UV6:%[0-9]+]]:_(<4 x s8>), [[UV7:%[0-9]+]]:_(<4 x s8>) = G_UNMERGE_VALUES [[SHUF]](<8 x s8>)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+ ; CHECK-NEXT: [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8), [[UV10:%[0-9]+]]:_(s8), [[UV11:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[UV6]](<4 x s8>)
+ ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[UV8]](s8), [[UV9]](s8), [[UV10]](s8), [[UV11]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR3]](<8 x s8>)
+ ; CHECK-NEXT: [[UV12:%[0-9]+]]:_(<4 x s16>), [[UV13:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT1]](<8 x s16>)
+ ; CHECK-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR4]](<8 x s8>)
+ ; CHECK-NEXT: [[UV14:%[0-9]+]]:_(<4 x s16>), [[UV15:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT2]](<8 x s16>)
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<4 x s16>) = G_XOR [[UV12]], [[UV14]]
; CHECK-NEXT: [[TRUNC5:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[ICMP]](<4 x s32>)
- ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(<4 x s16>) = G_ANYEXT [[UV4]](<4 x s8>)
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<4 x s16>) = G_AND [[TRUNC5]], [[ANYEXT1]]
+ ; CHECK-NEXT: [[UV16:%[0-9]+]]:_(s8), [[UV17:%[0-9]+]]:_(s8), [[UV18:%[0-9]+]]:_(s8), [[UV19:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[UV6]](<4 x s8>)
+ ; CHECK-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[UV16]](s8), [[UV17]](s8), [[UV18]](s8), [[UV19]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR5]](<8 x s8>)
+ ; CHECK-NEXT: [[UV20:%[0-9]+]]:_(<4 x s16>), [[UV21:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT3]](<8 x s16>)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<4 x s16>) = G_AND [[TRUNC5]], [[UV20]]
; CHECK-NEXT: [[TRUNC6:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[ICMP1]](<4 x s32>)
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<4 x s16>) = G_AND [[TRUNC6]], [[XOR]]
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<4 x s16>) = G_OR [[AND]], [[AND1]]
- ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(<4 x s32>) = G_ANYEXT [[OR]](<4 x s16>)
+ ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(<4 x s32>) = G_ANYEXT [[OR]](<4 x s16>)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CHECK-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C3]](s32), [[C3]](s32), [[C3]](s32), [[C3]](s32)
- ; CHECK-NEXT: %zext_select:_(<4 x s32>) = G_AND [[ANYEXT2]], [[BUILD_VECTOR4]]
+ ; CHECK-NEXT: [[BUILD_VECTOR6:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C3]](s32), [[C3]](s32), [[C3]](s32), [[C3]](s32)
+ ; CHECK-NEXT: %zext_select:_(<4 x s32>) = G_AND [[ANYEXT4]], [[BUILD_VECTOR6]]
; CHECK-NEXT: $q0 = COPY %zext_select(<4 x s32>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%w0:_(s32) = COPY $w0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector-widen-crash.ll b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector-widen-crash.ll
index 42a8f51..f7efaea 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector-widen-crash.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector-widen-crash.ll
@@ -1,16 +1,24 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -global-isel-abort=2 -global-isel -o - %s | FileCheck %s
+; RUN: llc -global-isel -o - %s | FileCheck %s
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
target triple = "arm64-apple-macosx11.0.0"
declare i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32>) #0
-; This test currently falls back but ensures we don't crash.
-
define i32 @bar() {
; CHECK-LABEL: bar:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: movi.2d v0, #0000000000000000
+; CHECK-NEXT: mov b1, v0[1]
+; CHECK-NEXT: mov b2, v0[2]
+; CHECK-NEXT: mov b3, v0[3]
+; CHECK-NEXT: mov.h v0[1], v1[0]
+; CHECK-NEXT: mov.h v2[1], v3[0]
+; CHECK-NEXT: ushll.4s v0, v0, #0
+; CHECK-NEXT: ushll.4s v1, v2, #0
+; CHECK-NEXT: mov.d v0[1], v1[0]
+; CHECK-NEXT: movi.4s v1, #1
+; CHECK-NEXT: and.16b v0, v0, v1
; CHECK-NEXT: addv.4s s0, v0
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir
index 6612651..e729f02 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir
@@ -540,10 +540,14 @@ body: |
; CHECK: liveins: $d0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<2 x s8>) = G_TRUNC [[COPY]](<2 x s32>)
- ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s8>) = G_CONCAT_VECTORS [[TRUNC]](<2 x s8>), [[TRUNC]](<2 x s8>)
- ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<4 x s16>) = G_ANYEXT [[CONCAT_VECTORS]](<4 x s8>)
- ; CHECK-NEXT: $d0 = COPY [[ANYEXT]](<4 x s16>)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV]](s32)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[UV1]](s32)
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[TRUNC]](s8), [[TRUNC1]](s8), [[TRUNC]](s8), [[TRUNC1]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR]](<8 x s8>)
+ ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(<4 x s16>), [[UV3:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT]](<8 x s16>)
+ ; CHECK-NEXT: $d0 = COPY [[UV2]](<4 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $d0
%0:_(<2 x s32>) = COPY $d0
%1:_(<2 x s8>) = G_TRUNC %0(<2 x s32>)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
index c9e5f89..ac3c47c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
@@ -752,6 +752,15 @@
# DEBUG-NEXT: G_BZERO (opcode {{[0-9]+}}): 2 type indices, 1 imm index
# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
+# DEBUG-NEXT: G_TRAP (opcode {{[0-9]+}}): 0 type indices, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: G_DEBUGTRAP (opcode {{[0-9]+}}): 0 type indices, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: G_UBSANTRAP (opcode {{[0-9]+}}): 0 type indices, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: G_VECREDUCE_SEQ_FADD (opcode {{[0-9]+}}): 3 type indices, 0 imm indices
# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir b/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
index 0cf9602..499c08f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
@@ -40,11 +40,12 @@ body: |
; CHECK-LABEL: name: ldrxrox_breg_oreg
; CHECK: liveins: $x0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
- ; CHECK: $x0 = COPY [[LDRXroX]]
- ; CHECK: RET_ReallyLR implicit $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $x0 = COPY [[LDRXroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -65,11 +66,12 @@ body: |
liveins: $d0, $x1
; CHECK-LABEL: name: ldrdrox_breg_oreg
; CHECK: liveins: $d0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
- ; CHECK: $d0 = COPY [[LDRDroX]]
- ; CHECK: RET_ReallyLR implicit $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $d0 = COPY [[LDRDroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d0
%0:gpr(p0) = COPY $d0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -78,6 +80,9 @@ body: |
RET_ReallyLR implicit $d0
...
---
+# This shouldn't be folded, since we reuse the result of the G_PTR_ADD outside
+# the G_LOAD
+
name: more_than_one_use
alignment: 4
legalized: true
@@ -87,18 +92,17 @@ machineFunctionInfo: {}
body: |
bb.0:
liveins: $x0, $x1
- ; This shouldn't be folded, since we reuse the result of the G_PTR_ADD outside
- ; the G_LOAD
; CHECK-LABEL: name: more_than_one_use
; CHECK: liveins: $x0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY]], [[COPY1]]
- ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
- ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
- ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[LDRXui]]
- ; CHECK: $x0 = COPY [[ADDXrr1]]
- ; CHECK: RET_ReallyLR implicit $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
+ ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[LDRXui]]
+ ; CHECK-NEXT: $x0 = COPY [[ADDXrr1]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -121,11 +125,12 @@ body: |
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: ldrxrox_shl
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: $x2 = COPY [[LDRXroX]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $x2 = COPY [[LDRXroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -148,11 +153,12 @@ body: |
liveins: $x0, $x1, $d2
; CHECK-LABEL: name: ldrdrox_shl
; CHECK: liveins: $x0, $x1, $d2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: $d2 = COPY [[LDRDroX]]
- ; CHECK: RET_ReallyLR implicit $d2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -175,11 +181,12 @@ body: |
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: ldrxrox_mul_rhs
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: $x2 = COPY [[LDRXroX]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $x2 = COPY [[LDRXroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 8
%2:gpr(s64) = G_MUL %0, %1(s64)
@@ -202,11 +209,12 @@ body: |
liveins: $x0, $x1, $d2
; CHECK-LABEL: name: ldrdrox_mul_rhs
; CHECK: liveins: $x0, $x1, $d2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: $d2 = COPY [[LDRDroX]]
- ; CHECK: RET_ReallyLR implicit $d2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 8
%2:gpr(s64) = G_MUL %0, %1(s64)
@@ -229,11 +237,12 @@ body: |
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: ldrxrox_mul_lhs
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: $x2 = COPY [[LDRXroX]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $x2 = COPY [[LDRXroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 8
%2:gpr(s64) = G_MUL %1, %0(s64)
@@ -256,11 +265,12 @@ body: |
liveins: $x0, $x1, $d2
; CHECK-LABEL: name: ldrdrox_mul_lhs
; CHECK: liveins: $x0, $x1, $d2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: $d2 = COPY [[LDRDroX]]
- ; CHECK: RET_ReallyLR implicit $d2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 8
%2:gpr(s64) = G_MUL %1, %0(s64)
@@ -272,6 +282,9 @@ body: |
...
---
+# Show that we don't get a shifted load from a mul when we don't have a
+# power of 2. (The bit isn't set on the load.)
+
name: mul_not_pow_2
alignment: 4
legalized: true
@@ -280,19 +293,18 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that we don't get a shifted load from a mul when we don't have a
- ; power of 2. (The bit isn't set on the load.)
liveins: $x0, $x1, $d2
; CHECK-LABEL: name: mul_not_pow_2
; CHECK: liveins: $x0, $x1, $d2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 7
- ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
- ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
- ; CHECK: $d2 = COPY [[LDRDroX]]
- ; CHECK: RET_ReallyLR implicit $d2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 7
+ ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
+ ; CHECK-NEXT: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 7
%2:gpr(s64) = G_MUL %1, %0(s64)
@@ -304,6 +316,9 @@ body: |
...
---
+# Show that we don't get a shifted load from a mul when we don't have
+# the right power of 2. (The bit isn't set on the load.)
+
name: mul_wrong_pow_2
alignment: 4
legalized: true
@@ -312,19 +327,18 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that we don't get a shifted load from a mul when we don't have
- ; the right power of 2. (The bit isn't set on the load.)
liveins: $x0, $x1, $d2
; CHECK-LABEL: name: mul_wrong_pow_2
; CHECK: liveins: $x0, $x1, $d2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 16
- ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
- ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
- ; CHECK: $d2 = COPY [[LDRDroX]]
- ; CHECK: RET_ReallyLR implicit $d2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 16
+ ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
+ ; CHECK-NEXT: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 16
%2:gpr(s64) = G_MUL %1, %0(s64)
@@ -336,6 +350,9 @@ body: |
...
---
+# Show that we can still fall back to the register-register addressing
+# mode when we fail to pull in the shift.
+
name: more_than_one_use_shl_1
alignment: 4
legalized: true
@@ -344,19 +361,18 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that we can still fall back to the register-register addressing
- ; mode when we fail to pull in the shift.
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: more_than_one_use_shl_1
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[UBFMXri]], 0, 0 :: (load (s64) from %ir.addr)
- ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
- ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
- ; CHECK: $x2 = COPY [[ADDXrr]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[UBFMXri]], 0, 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
+ ; CHECK-NEXT: $x2 = COPY [[ADDXrr]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -370,6 +386,9 @@ body: |
...
---
+# Show that when the GEP is used outside a memory op, we don't do any
+# folding at all.
+
name: more_than_one_use_shl_2
alignment: 4
legalized: true
@@ -378,22 +397,21 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that when the GEP is used outside a memory op, we don't do any
- ; folding at all.
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: more_than_one_use_shl_2
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
- ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
- ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
- ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[ADDXri]]
- ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
- ; CHECK: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[ADDXrr1]]
- ; CHECK: $x2 = COPY [[ADDXrr2]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
+ ; CHECK-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
+ ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[ADDXri]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
+ ; CHECK-NEXT: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[ADDXrr1]]
+ ; CHECK-NEXT: $x2 = COPY [[ADDXrr2]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -409,6 +427,9 @@ body: |
...
---
+# Show that when we have a fastpath for shift-left, we perform the folding
+# if it has more than one use.
+
name: more_than_one_use_shl_lsl_fast
alignment: 4
legalized: true
@@ -417,18 +438,17 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that when we have a fastpath for shift-left, we perform the folding
- ; if it has more than one use.
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: more_than_one_use_shl_lsl_fast
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[LDRXroX1]]
- ; CHECK: $x2 = COPY [[ADDXrr]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[LDRXroX1]]
+ ; CHECK-NEXT: $x2 = COPY [[ADDXrr]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -442,6 +462,9 @@ body: |
...
---
+# Show that we don't fold into multiple memory ops when we don't have a
+# fastpath for shift-left.
+
name: more_than_one_use_shl_lsl_slow
alignment: 4
legalized: true
@@ -450,19 +473,18 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that we don't fold into multiple memory ops when we don't have a
- ; fastpath for shift-left.
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: more_than_one_use_shl_lsl_slow
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[ADDXrs:%[0-9]+]]:gpr64common = ADDXrs [[COPY1]], [[COPY]], 3
- ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
- ; CHECK: [[LDRXui1:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
- ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[LDRXui1]]
- ; CHECK: $x2 = COPY [[ADDXrr]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[ADDXrs:%[0-9]+]]:gpr64common = ADDXrs [[COPY1]], [[COPY]], 3
+ ; CHECK-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[LDRXui1:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[LDRXui1]]
+ ; CHECK-NEXT: $x2 = COPY [[ADDXrr]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -476,6 +498,9 @@ body: |
...
---
+# Show that when we're optimizing for size, we'll do the folding no matter
+# what.
+
name: more_than_one_use_shl_minsize
alignment: 4
legalized: true
@@ -484,22 +509,21 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that when we're optimizing for size, we'll do the folding no matter
- ; what.
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: more_than_one_use_shl_minsize
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
- ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
- ; CHECK: [[ADDXrs:%[0-9]+]]:gpr64 = ADDXrs [[COPY2]], [[COPY]], 3
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
- ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
- ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrs]], [[ADDXrr]]
- ; CHECK: $x2 = COPY [[ADDXrr1]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
+ ; CHECK-NEXT: [[ADDXrs:%[0-9]+]]:gpr64 = ADDXrs [[COPY2]], [[COPY]], 3
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
+ ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrs]], [[ADDXrr]]
+ ; CHECK-NEXT: $x2 = COPY [[ADDXrr1]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -525,11 +549,12 @@ body: |
liveins: $x0, $x1
; CHECK-LABEL: name: ldrwrox
; CHECK: liveins: $x0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
- ; CHECK: $w2 = COPY [[LDRWroX]]
- ; CHECK: RET_ReallyLR implicit $w2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
+ ; CHECK-NEXT: $w2 = COPY [[LDRWroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $w2
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -549,11 +574,12 @@ body: |
liveins: $d0, $x1
; CHECK-LABEL: name: ldrsrox
; CHECK: liveins: $d0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRSroX:%[0-9]+]]:fpr32 = LDRSroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
- ; CHECK: $s2 = COPY [[LDRSroX]]
- ; CHECK: RET_ReallyLR implicit $h2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRSroX:%[0-9]+]]:fpr32 = LDRSroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
+ ; CHECK-NEXT: $s2 = COPY [[LDRSroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $h2
%0:gpr(p0) = COPY $d0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -573,11 +599,12 @@ body: |
liveins: $x0, $x1
; CHECK-LABEL: name: ldrhrox
; CHECK: liveins: $x0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRHroX:%[0-9]+]]:fpr16 = LDRHroX [[COPY]], [[COPY1]], 0, 0 :: (load (s16) from %ir.addr)
- ; CHECK: $h2 = COPY [[LDRHroX]]
- ; CHECK: RET_ReallyLR implicit $h2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRHroX:%[0-9]+]]:fpr16 = LDRHroX [[COPY]], [[COPY1]], 0, 0 :: (load (s16) from %ir.addr)
+ ; CHECK-NEXT: $h2 = COPY [[LDRHroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $h2
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -597,11 +624,12 @@ body: |
liveins: $x0, $x1
; CHECK-LABEL: name: ldbbrox
; CHECK: liveins: $x0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRBBroX:%[0-9]+]]:gpr32 = LDRBBroX [[COPY]], [[COPY1]], 0, 0 :: (load (s8) from %ir.addr)
- ; CHECK: $w2 = COPY [[LDRBBroX]]
- ; CHECK: RET_ReallyLR implicit $w2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRBBroX:%[0-9]+]]:gpr32 = LDRBBroX [[COPY]], [[COPY1]], 0, 0 :: (load (s8) from %ir.addr)
+ ; CHECK-NEXT: $w2 = COPY [[LDRBBroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $w2
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -621,11 +649,12 @@ body: |
liveins: $d0, $x1
; CHECK-LABEL: name: ldrqrox
; CHECK: liveins: $d0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY]], [[COPY1]], 0, 0 :: (load (<2 x s64>) from %ir.addr)
- ; CHECK: $q0 = COPY [[LDRQroX]]
- ; CHECK: RET_ReallyLR implicit $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY]], [[COPY1]], 0, 0 :: (load (<2 x s64>) from %ir.addr)
+ ; CHECK-NEXT: $q0 = COPY [[LDRQroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:gpr(p0) = COPY $d0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-addo-zero.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-addo-zero.mir
index 94f56e5..9483cbf 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-addo-zero.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-addo-zero.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple aarch64 -debugify-and-strip-all-safe -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombiner-only-enable-rule="addo_by_0" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple aarch64 -debugify-and-strip-all-safe -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombiner-only-enable-rule="match_addos" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
# REQUIRES: asserts
# (G_*ADDO x, 0) -> x + no carry
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir
index ad66fa5..25ecce4 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir
@@ -26,7 +26,7 @@ body: |
; CHECK-LABEL: name: foo
; CHECK: BRK 1
; CHECK: RET_ReallyLR
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
RET_ReallyLR
...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select.mir
index bcdd77a..b3613f5 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select.mir
@@ -1,3 +1,4 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
# RUN: llc -O0 -mtriple=aarch64-apple-ios -run-pass=instruction-select -global-isel-abort=1 -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=IOS
# RUN: llc -O0 -mtriple=aarch64-linux-gnu -relocation-model=pic -run-pass=instruction-select -global-isel-abort=1 -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=LINUX-PIC
@@ -26,40 +27,35 @@
...
---
-# CHECK-LABEL: name: frame_index
name: frame_index
legalized: true
regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64sp, preferred-register: '' }
-registers:
- - { id: 0, class: gpr }
-
stack:
- { id: 0, name: ptr0, offset: 0, size: 8, alignment: 8 }
-
-# CHECK: body:
-# CHECK: %0:gpr64sp = ADDXri %stack.0.ptr0, 0, 0
body: |
bb.0:
- %0(p0) = G_FRAME_INDEX %stack.0.ptr0
+ ; CHECK-LABEL: name: frame_index
+ ; CHECK: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0.ptr0, 0, 0
+ ; CHECK-NEXT: $x0 = COPY [[ADDXri]]
+ %0:gpr(p0) = G_FRAME_INDEX %stack.0.ptr0
$x0 = COPY %0(p0)
...
---
---
-# CHECK-LABEL: name: ptr_mask
name: ptr_mask
legalized: true
regBankSelected: true
-
-# CHECK: body:
-# CHECK: %2:gpr64sp = ANDXri %0, 8060
body: |
bb.0:
liveins: $x0
+ ; CHECK-LABEL: name: ptr_mask
+ ; CHECK: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[COPY]], 8060
+ ; CHECK-NEXT: $x0 = COPY [[ANDXri]]
%0:gpr(p0) = COPY $x0
%const:gpr(s64) = G_CONSTANT i64 -8
%1:gpr(p0) = G_PTRMASK %0, %const
@@ -68,180 +64,171 @@ body: |
---
# Global defined in the same linkage unit so no GOT is needed
-# CHECK-LABEL: name: global_local
name: global_local
legalized: true
regBankSelected: true
-registers:
- - { id: 0, class: gpr }
-
-# CHECK: body:
-# IOS: %0:gpr64common = MOVaddr target-flags(aarch64-page) @var_local, target-flags(aarch64-pageoff, aarch64-nc) @var_local
-# LINUX-PIC: %0:gpr64common = LOADgot target-flags(aarch64-got) @var_local
body: |
bb.0:
- %0(p0) = G_GLOBAL_VALUE @var_local
+ ; IOS-LABEL: name: global_local
+ ; IOS: [[MOVaddr:%[0-9]+]]:gpr64common = MOVaddr target-flags(aarch64-page) @var_local, target-flags(aarch64-pageoff, aarch64-nc) @var_local
+ ; IOS-NEXT: $x0 = COPY [[MOVaddr]]
+ ;
+ ; LINUX-PIC-LABEL: name: global_local
+ ; LINUX-PIC: [[LOADgot:%[0-9]+]]:gpr64common = LOADgot target-flags(aarch64-got) @var_local
+ ; LINUX-PIC-NEXT: $x0 = COPY [[LOADgot]]
+ %0:gpr(p0) = G_GLOBAL_VALUE @var_local
$x0 = COPY %0(p0)
...
---
-# CHECK-LABEL: name: global_got
name: global_got
legalized: true
regBankSelected: true
-registers:
- - { id: 0, class: gpr }
-
-# CHECK: body:
-# IOS: %0:gpr64common = LOADgot target-flags(aarch64-got) @var_got
-# LINUX-PIC: %0:gpr64common = LOADgot target-flags(aarch64-got) @var_got
body: |
bb.0:
- %0(p0) = G_GLOBAL_VALUE @var_got
+ ; CHECK-LABEL: name: global_got
+ ; CHECK: [[LOADgot:%[0-9]+]]:gpr64common = LOADgot target-flags(aarch64-got) @var_got
+ ; CHECK-NEXT: $x0 = COPY [[LOADgot]]
+ %0:gpr(p0) = G_GLOBAL_VALUE @var_got
$x0 = COPY %0(p0)
...
---
-# CHECK-LABEL: name: icmp
name: icmp
legalized: true
regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 1, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 2, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 3, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 4, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 5, class: gpr32, preferred-register: '' }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
- - { id: 3, class: gpr }
- - { id: 4, class: gpr }
- - { id: 5, class: gpr }
- - { id: 6, class: gpr }
- - { id: 7, class: gpr }
- - { id: 8, class: gpr }
- - { id: 9, class: gpr }
- - { id: 10, class: gpr }
- - { id: 11, class: gpr }
-
-# CHECK: body:
-# CHECK: SUBSWrr %0, %0, implicit-def $nzcv
-# CHECK: %1:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
-
-# CHECK: SUBSXrr %2, %2, implicit-def $nzcv
-# CHECK: %3:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
-
-# CHECK: SUBSXrr %4, %4, implicit-def $nzcv
-# CHECK: %5:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
-
body: |
bb.0:
liveins: $w0, $x0
- %0(s32) = COPY $w0
- %1(s32) = G_ICMP intpred(eq), %0, %0
- %6(s8) = G_TRUNC %1(s32)
- %9(s32) = G_ANYEXT %6
+ ; CHECK-LABEL: name: icmp
+ ; CHECK: liveins: $w0, $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK-NEXT: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[COPY]], implicit-def $nzcv
+ ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[CSINCWr]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY1]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY2]], [[COPY2]], implicit-def $nzcv
+ ; CHECK-NEXT: [[CSINCWr1:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr32all = COPY [[CSINCWr1]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY3]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[SUBSXrr1:%[0-9]+]]:gpr64 = SUBSXrr [[COPY4]], [[COPY4]], implicit-def $nzcv
+ ; CHECK-NEXT: [[CSINCWr2:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gpr32all = COPY [[CSINCWr2]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY5]]
+ %0:gpr(s32) = COPY $w0
+ %1:gpr(s32) = G_ICMP intpred(eq), %0, %0
+ %6:gpr(s8) = G_TRUNC %1(s32)
+ %9:gpr(s32) = G_ANYEXT %6
$w0 = COPY %9(s32)
- %2(s64) = COPY $x0
- %3(s32) = G_ICMP intpred(uge), %2, %2
- %7(s8) = G_TRUNC %3(s32)
- %10(s32) = G_ANYEXT %7
+ %2:gpr(s64) = COPY $x0
+ %3:gpr(s32) = G_ICMP intpred(uge), %2, %2
+ %7:gpr(s8) = G_TRUNC %3(s32)
+ %10:gpr(s32) = G_ANYEXT %7
$w0 = COPY %10(s32)
- %4(p0) = COPY $x0
- %5(s32) = G_ICMP intpred(ne), %4, %4
- %8(s8) = G_TRUNC %5(s32)
- %11(s32) = G_ANYEXT %8
+ %4:gpr(p0) = COPY $x0
+ %5:gpr(s32) = G_ICMP intpred(ne), %4, %4
+ %8:gpr(s8) = G_TRUNC %5(s32)
+ %11:gpr(s32) = G_ANYEXT %8
$w0 = COPY %11(s32)
...
---
-# CHECK-LABEL: name: fcmp
name: fcmp
legalized: true
regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 1, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 2, class: fpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 3, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 4, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 5, class: gpr32, preferred-register: '' }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: gpr }
- - { id: 2, class: fpr }
- - { id: 3, class: gpr }
- - { id: 4, class: gpr }
- - { id: 5, class: gpr }
- - { id: 6, class: gpr }
- - { id: 7, class: gpr }
-
-# CHECK: body:
-# CHECK: nofpexcept FCMPSrr %0, %0, implicit-def $nzcv
-# CHECK: [[TST_MI:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 5, implicit $nzcv
-# CHECK: [[TST_GT:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
-# CHECK: %1:gpr32 = ORRWrr [[TST_MI]], [[TST_GT]]
-
-# CHECK: nofpexcept FCMPDrr %2, %2, implicit-def $nzcv
-# CHECK: %3:gpr32 = CSINCWr $wzr, $wzr, 4, implicit $nzcv
-
body: |
bb.0:
liveins: $w0, $x0
- %0(s32) = COPY $s0
- %1(s32) = G_FCMP floatpred(one), %0, %0
- %4(s8) = G_TRUNC %1(s32)
- %6(s32) = G_ANYEXT %4
- $w0 = COPY %6(s32)
+ ; CHECK-LABEL: name: fcmp
+ ; CHECK: liveins: $w0, $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+ ; CHECK-NEXT: nofpexcept FCMPSrr [[COPY]], [[COPY]], implicit-def $nzcv, implicit $fpcr
+ ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 5, implicit $nzcv
+ ; CHECK-NEXT: [[CSINCWr1:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
+ ; CHECK-NEXT: [[ORRWrr:%[0-9]+]]:gpr32 = ORRWrr [[CSINCWr]], [[CSINCWr1]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[ORRWrr]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY1]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
+ ; CHECK-NEXT: nofpexcept FCMPDrr [[COPY2]], [[COPY2]], implicit-def $nzcv, implicit $fpcr
+ ; CHECK-NEXT: [[CSINCWr2:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 4, implicit $nzcv
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr32all = COPY [[CSINCWr2]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY3]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:fpr32 = COPY $s0
+ ; CHECK-NEXT: nofpexcept FCMPSrr [[COPY4]], [[COPY4]], implicit-def $nzcv, implicit $fpcr
+ ; CHECK-NEXT: [[CSINCWr3:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 15, implicit $nzcv
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gpr32all = COPY [[CSINCWr3]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY5]]
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:fpr64 = COPY $d0
+ ; CHECK-NEXT: nofpexcept FCMPDrr [[COPY6]], [[COPY6]], implicit-def $nzcv, implicit $fpcr
+ ; CHECK-NEXT: [[CSINCWr4:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 14, implicit $nzcv
+ ; CHECK-NEXT: [[COPY7:%[0-9]+]]:gpr32all = COPY [[CSINCWr4]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY7]]
+ %0:fpr(s32) = COPY $s0
+ %1:gpr(s32) = G_FCMP floatpred(one), %0, %0
+ %2:gpr(s8) = G_TRUNC %1(s32)
+ %3:gpr(s32) = G_ANYEXT %2
+ $w0 = COPY %3(s32)
- %2(s64) = COPY $d0
- %3(s32) = G_FCMP floatpred(uge), %2, %2
- %5(s8) = G_TRUNC %3(s32)
- %7(s32) = G_ANYEXT %5
+ %4:fpr(s64) = COPY $d0
+ %5:gpr(s32) = G_FCMP floatpred(uge), %4, %4
+ %6:gpr(s8) = G_TRUNC %5(s32)
+ %7:gpr(s32) = G_ANYEXT %6
$w0 = COPY %7(s32)
+ %8:fpr(s32) = COPY $s0
+ %9:gpr(s32) = G_FCMP floatpred(true), %8, %8
+ %10:gpr(s8) = G_TRUNC %9(s32)
+ %11:gpr(s32) = G_ANYEXT %10
+ $w0 = COPY %11(s32)
+
+ %12:fpr(s64) = COPY $d0
+ %13:gpr(s32) = G_FCMP floatpred(false), %12, %12
+ %14:gpr(s8) = G_TRUNC %13(s32)
+ %15:gpr(s32) = G_ANYEXT %14
+ $w0 = COPY %15(s32)
+
...
---
-# CHECK-LABEL: name: phi
name: phi
legalized: true
regBankSelected: true
tracksRegLiveness: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 1, class: gpr, preferred-register: '' }
-# CHECK-NEXT: - { id: 2, class: fpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 3, class: gpr32, preferred-register: '' }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: gpr }
- - { id: 2, class: fpr }
-
-# CHECK: body:
-# CHECK: bb.1:
-# CHECK: %2:fpr32 = PHI %0, %bb.0, %2, %bb.1
-
body: |
+ ; CHECK-LABEL: name: phi
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $s0, $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:fpr32 = PHI [[COPY]], %bb.0, [[PHI]], %bb.1
+ ; CHECK-NEXT: TBNZW [[COPY1]], 0, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $s0 = COPY [[PHI]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $s0
bb.0:
liveins: $s0, $w0
successors: %bb.1
- %0(s32) = COPY $s0
+ %0:fpr(s32) = COPY $s0
%3:gpr(s32) = COPY $w0
bb.1:
successors: %bb.1, %bb.2
- %2(s32) = PHI %0, %bb.0, %2, %bb.1
+ %2:fpr(s32) = PHI %0, %bb.0, %2, %bb.1
G_BRCOND %3, %bb.1
bb.2:
@@ -250,60 +237,46 @@ body: |
...
---
-# CHECK-LABEL: name: select
name: select
legalized: true
regBankSelected: true
tracksRegLiveness: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr, preferred-register: '' }
-# CHECK-NEXT: - { id: 1, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 2, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 3, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 4, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 5, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 6, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 7, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 8, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 9, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 10, class: gpr32, preferred-register: '' }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
- - { id: 3, class: gpr }
- - { id: 4, class: gpr }
- - { id: 5, class: gpr }
- - { id: 6, class: gpr }
- - { id: 7, class: gpr }
- - { id: 8, class: gpr }
- - { id: 9, class: gpr }
-
-# CHECK: body:
-# CHECK: ANDSWri %10, 0, implicit-def $nzcv
-# CHECK: %3:gpr32 = CSELWr %1, %2, 1, implicit $nzcv
-# CHECK: ANDSWri %10, 0, implicit-def $nzcv
-# CHECK: %6:gpr64 = CSELXr %4, %5, 1, implicit $nzcv
-# CHECK: ANDSWri %10, 0, implicit-def $nzcv
-# CHECK: %9:gpr64 = CSELXr %7, %8, 1, implicit $nzcv
body: |
bb.0:
liveins: $w0, $w1, $w2
+ ; CHECK-LABEL: name: select
+ ; CHECK: liveins: $w0, $w1, $w2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32 = COPY $w2
+ ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[COPY]], 0, implicit-def $nzcv
+ ; CHECK-NEXT: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[COPY1]], [[COPY2]], 1, implicit $nzcv
+ ; CHECK-NEXT: $w0 = COPY [[CSELWr]]
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[ANDSWri1:%[0-9]+]]:gpr32 = ANDSWri [[COPY]], 0, implicit-def $nzcv
+ ; CHECK-NEXT: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[COPY3]], [[COPY4]], 1, implicit $nzcv
+ ; CHECK-NEXT: $x0 = COPY [[CSELXr]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[ANDSWri2:%[0-9]+]]:gpr32 = ANDSWri [[COPY]], 0, implicit-def $nzcv
+ ; CHECK-NEXT: [[CSELXr1:%[0-9]+]]:gpr64 = CSELXr [[COPY5]], [[COPY6]], 1, implicit $nzcv
+ ; CHECK-NEXT: $x0 = COPY [[CSELXr1]]
%10:gpr(s32) = COPY $w0
- %1(s32) = COPY $w1
- %2(s32) = COPY $w2
- %3(s32) = G_SELECT %10, %1, %2
+ %1:gpr(s32) = COPY $w1
+ %2:gpr(s32) = COPY $w2
+ %3:gpr(s32) = G_SELECT %10, %1, %2
$w0 = COPY %3(s32)
- %4(s64) = COPY $x0
- %5(s64) = COPY $x1
- %6(s64) = G_SELECT %10, %4, %5
+ %4:gpr(s64) = COPY $x0
+ %5:gpr(s64) = COPY $x1
+ %6:gpr(s64) = G_SELECT %10, %4, %5
$x0 = COPY %6(s64)
- %7(p0) = COPY $x0
- %8(p0) = COPY $x1
- %9(p0) = G_SELECT %10, %7, %8
+ %7:gpr(p0) = COPY $x0
+ %8:gpr(p0) = COPY $x1
+ %9:gpr(p0) = G_SELECT %10, %7, %8
$x0 = COPY %9(p0)
...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/uaddo-8-16-bits.mir b/llvm/test/CodeGen/AArch64/GlobalISel/uaddo-8-16-bits.mir
index f4366fb..b242c68 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/uaddo-8-16-bits.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/uaddo-8-16-bits.mir
@@ -26,7 +26,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
@@ -48,7 +48,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%8:_(s32) = G_ZEXT %6(s8)
@@ -80,7 +80,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
@@ -102,7 +102,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%8:_(s32) = G_ZEXT %6(s16)
@@ -134,7 +134,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: liveins: $x2
@@ -165,7 +165,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
liveins: $x2
@@ -206,7 +206,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
@@ -228,7 +228,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%8:_(s32) = G_ANYEXT %6(s16)
@@ -261,7 +261,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
@@ -284,7 +284,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%8:_(s32) = G_ZEXT %6(s16)
@@ -317,7 +317,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
@@ -340,7 +340,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%8:_(s32) = G_ZEXT %6(s16)
@@ -377,7 +377,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: liveins: $x2
@@ -410,7 +410,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
liveins: $x2
@@ -512,7 +512,7 @@ body: |
; CHECK-NEXT: bb.2:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3:
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO]](s16)
@@ -544,7 +544,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%9:_(s32) = G_ZEXT %6(s16)
@@ -577,7 +577,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO]](s8)
@@ -601,7 +601,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%9:_(s32) = G_ZEXT %7(s8)
@@ -634,7 +634,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO]](s8)
@@ -658,7 +658,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%9:_(s32) = G_ZEXT %7(s8)
@@ -692,7 +692,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO]](s8)
@@ -717,7 +717,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%10:_(s32) = G_ZEXT %8(s8)
@@ -783,7 +783,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UADDO]](s16)
@@ -804,7 +804,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%6:_(s32) = G_ANYEXT %4(s16)
@@ -839,7 +839,7 @@ body: |
; CHECK-NEXT: RET_ReallyLR implicit $w0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
bb.1:
successors: %bb.2(0x7ffff800), %bb.3(0x00000800)
liveins: $w0, $w1
@@ -860,6 +860,6 @@ body: |
RET_ReallyLR implicit $w0
bb.3:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
...
diff --git a/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll b/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll
index 5829969..d4d803a 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll
@@ -3,7 +3,7 @@
define void @UphPNR(target("aarch64.svcount") %predcnt) {
entry:
; CHECK: %0:ppr = COPY $p0
-; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store unknown-size into %ir.predcnt.addr, align 2)
+; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store (<vscale x 1 x s16>) into %ir.predcnt.addr)
; CHECK: %1:pnr_p8to15 = COPY %0
; CHECK: INLINEASM &"ld1w {z0.s,z1.s,z2.s,z3.s}, $0/z, [x10]", 1 /* sideeffect attdialect */, {{[0-9]+}} /* reguse:PNR_p8to15 */, %1
; CHECK: RET_ReallyLR
@@ -17,7 +17,7 @@ entry:
define void @UpaPNR(target("aarch64.svcount") %predcnt) {
entry:
; CHECK: %0:ppr = COPY $p0
-; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store unknown-size into %ir.predcnt.addr, align 2)
+; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store (<vscale x 1 x s16>) into %ir.predcnt.addr)
; CHECK: %1:pnr = COPY %0
; CHECK: INLINEASM &"ld1w {z0.s,z1.s,z2.s,z3.s}, $0/z, [x10]", 1 /* sideeffect attdialect */, {{[0-9]+}} /* reguse:PNR */, %1
; CHECK: RET_ReallyLR
@@ -31,7 +31,7 @@ entry:
define void @UplPNR(target("aarch64.svcount") %predcnt) {
entry:
; CHECK: %0:ppr = COPY $p0
-; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store unknown-size into %ir.predcnt.addr, align 2)
+; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store (<vscale x 1 x s16>) into %ir.predcnt.addr)
; CHECK: %1:pnr_3b = COPY %0
; CHECK: INLINEASM &"fadd z0.h, $0/m, z0.h, #0.5", 1 /* sideeffect attdialect */, {{[0-9]+}} /* reguse:PNR_3b */, %1
; CHECK: RET_ReallyLR
diff --git a/llvm/test/CodeGen/AArch64/aarch64-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
index dbc5417..61a4f64 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
@@ -3,8 +3,7 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+sve < %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SVE
; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
-; CHECK-GI: warning: Instruction selection used fallback path for smull_zext_v4i16_v4i32
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for pmlsl2_v8i16_uzp1
+; CHECK-GI: warning: Instruction selection used fallback path for pmlsl2_v8i16_uzp1
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for smlsl2_v8i16_uzp1
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for umlsl2_v8i16_uzp1
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for smlsl2_v4i32_uzp1
@@ -189,13 +188,49 @@ define <8 x i32> @smull_zext_v8i8_v8i32_top_bit_is_1(ptr %A, ptr %B) nounwind {
}
define <4 x i32> @smull_zext_v4i16_v4i32(ptr %A, ptr %B) nounwind {
-; CHECK-LABEL: smull_zext_v4i16_v4i32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ldr s0, [x0]
-; CHECK-NEXT: ldr d1, [x1]
-; CHECK-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-NEXT: smull v0.4s, v0.4h, v1.4h
-; CHECK-NEXT: ret
+; CHECK-NEON-LABEL: smull_zext_v4i16_v4i32:
+; CHECK-NEON: // %bb.0:
+; CHECK-NEON-NEXT: ldr s0, [x0]
+; CHECK-NEON-NEXT: ldr d1, [x1]
+; CHECK-NEON-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-NEON-NEXT: smull v0.4s, v0.4h, v1.4h
+; CHECK-NEON-NEXT: ret
+;
+; CHECK-SVE-LABEL: smull_zext_v4i16_v4i32:
+; CHECK-SVE: // %bb.0:
+; CHECK-SVE-NEXT: ldr s0, [x0]
+; CHECK-SVE-NEXT: ldr d1, [x1]
+; CHECK-SVE-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-SVE-NEXT: smull v0.4s, v0.4h, v1.4h
+; CHECK-SVE-NEXT: ret
+;
+; CHECK-GI-LABEL: smull_zext_v4i16_v4i32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr w8, [x0]
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: mov b1, v0.b[1]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b3, v0.b[3]
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: fmov w9, s1
+; CHECK-GI-NEXT: fmov w10, s2
+; CHECK-GI-NEXT: fmov w11, s3
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: fmov s1, w9
+; CHECK-GI-NEXT: fmov s2, w10
+; CHECK-GI-NEXT: fmov s3, w11
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: mov v2.h[1], v3.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll v1.4s, v2.4h, #0
+; CHECK-GI-NEXT: ldr d2, [x1]
+; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT: sshll v1.4s, v2.4h, #0
+; CHECK-GI-NEXT: mul v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: ret
%load.A = load <4 x i8>, ptr %A
%load.B = load <4 x i16>, ptr %B
%zext.A = zext <4 x i8> %load.A to <4 x i32>
diff --git a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll b/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
index cf9ed4d..573f921 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
@@ -20,7 +20,7 @@ entry:
define i8 @test2(i32 %a) {
; CHECK-LABEL: test2:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #135
+; CHECK-NEXT: mov w8, #135 // =0x87
; CHECK-NEXT: and w8, w0, w8
; CHECK-NEXT: cmp w8, #1024
; CHECK-NEXT: cset w0, eq
@@ -37,7 +37,7 @@ entry:
define i8 @test3(i32 %a) {
; CHECK-LABEL: test3:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #1024
+; CHECK-NEXT: mov w8, #1024 // =0x400
; CHECK-NEXT: movk w8, #33, lsl #16
; CHECK-NEXT: and w8, w0, w8
; CHECK-NEXT: cmp w8, #1024
@@ -84,7 +84,7 @@ entry:
define i8 @test6(i64 %a) {
; CHECK-LABEL: test6:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #135
+; CHECK-NEXT: mov w8, #135 // =0x87
; CHECK-NEXT: and x8, x0, x8
; CHECK-NEXT: cmp x8, #1024
; CHECK-NEXT: cset w0, eq
@@ -101,7 +101,7 @@ entry:
define i8 @test7(i64 %a) {
; CHECK-LABEL: test7:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #1024
+; CHECK-NEXT: mov w8, #1024 // =0x400
; CHECK-NEXT: movk w8, #33, lsl #16
; CHECK-NEXT: and x8, x0, x8
; CHECK-NEXT: cmp x8, #1024
@@ -175,7 +175,7 @@ define i32 @test9(ptr nocapture %x, ptr nocapture readonly %y, i32 %n) {
; CHECK-NEXT: cmp w2, #1
; CHECK-NEXT: b.lt .LBB8_3
; CHECK-NEXT: // %bb.1: // %for.body.preheader
-; CHECK-NEXT: mov w9, #1024
+; CHECK-NEXT: mov w9, #1024 // =0x400
; CHECK-NEXT: mov w8, w2
; CHECK-NEXT: movk w9, #32, lsl #16
; CHECK-NEXT: .LBB8_2: // %for.body
@@ -226,7 +226,7 @@ define void @test10(ptr nocapture %x, ptr nocapture readonly %y, ptr nocapture %
; CHECK-LABEL: test10:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ldr w8, [x1]
-; CHECK-NEXT: mov w9, #1024
+; CHECK-NEXT: mov w9, #1024 // =0x400
; CHECK-NEXT: movk w9, #32, lsl #16
; CHECK-NEXT: and w8, w8, w9
; CHECK-NEXT: str w8, [x0]
@@ -253,7 +253,7 @@ entry:
define i8 @test11(i64 %a) {
; CHECK-LABEL: test11:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #-1610612736
+; CHECK-NEXT: mov w8, #-1610612736 // =0xa0000000
; CHECK-NEXT: and x8, x0, x8
; CHECK-NEXT: cmp x8, #1024
; CHECK-NEXT: cset w0, eq
diff --git a/llvm/test/CodeGen/AArch64/abs.ll b/llvm/test/CodeGen/AArch64/abs.ll
index e00f70b..78c1ff7 100644
--- a/llvm/test/CodeGen/AArch64/abs.ll
+++ b/llvm/test/CodeGen/AArch64/abs.ll
@@ -15,9 +15,8 @@ define i8 @abs_i8(i8 %a){
; CHECK-GI-LABEL: abs_i8:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: sxtb w8, w0
-; CHECK-GI-NEXT: asr w8, w8, #7
-; CHECK-GI-NEXT: add w9, w0, w8
-; CHECK-GI-NEXT: eor w0, w9, w8
+; CHECK-GI-NEXT: cmp w8, #0
+; CHECK-GI-NEXT: cneg w0, w0, le
; CHECK-GI-NEXT: ret
entry:
%res = call i8 @llvm.abs.i8(i8 %a, i1 0)
@@ -36,9 +35,8 @@ define i16 @abs_i16(i16 %a){
; CHECK-GI-LABEL: abs_i16:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: sxth w8, w0
-; CHECK-GI-NEXT: asr w8, w8, #15
-; CHECK-GI-NEXT: add w9, w0, w8
-; CHECK-GI-NEXT: eor w0, w9, w8
+; CHECK-GI-NEXT: cmp w8, #0
+; CHECK-GI-NEXT: cneg w0, w0, le
; CHECK-GI-NEXT: ret
entry:
%res = call i16 @llvm.abs.i16(i16 %a, i1 0)
@@ -55,9 +53,8 @@ define i32 @abs_i32(i32 %a){
;
; CHECK-GI-LABEL: abs_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: asr w8, w0, #31
-; CHECK-GI-NEXT: add w9, w0, w8
-; CHECK-GI-NEXT: eor w0, w9, w8
+; CHECK-GI-NEXT: cmp w0, #0
+; CHECK-GI-NEXT: cneg w0, w0, le
; CHECK-GI-NEXT: ret
entry:
%res = call i32 @llvm.abs.i32(i32 %a, i1 0)
@@ -74,9 +71,8 @@ define i64 @abs_i64(i64 %a){
;
; CHECK-GI-LABEL: abs_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: asr x8, x0, #63
-; CHECK-GI-NEXT: add x9, x0, x8
-; CHECK-GI-NEXT: eor x0, x9, x8
+; CHECK-GI-NEXT: cmp x0, #0
+; CHECK-GI-NEXT: cneg x0, x0, le
; CHECK-GI-NEXT: ret
entry:
%res = call i64 @llvm.abs.i64(i64 %a, i1 0)
@@ -248,9 +244,9 @@ define <1 x i32> @abs_v1i32(<1 x i32> %a){
; CHECK-GI-LABEL: abs_v1i32:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: asr w9, w8, #31
-; CHECK-GI-NEXT: add w8, w8, w9
-; CHECK-GI-NEXT: eor w8, w8, w9
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: cmp w8, #0
+; CHECK-GI-NEXT: cneg w8, w9, le
; CHECK-GI-NEXT: fmov s0, w8
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll
index 9a4e01a..7244ac9 100644
--- a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll
+++ b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll
@@ -14,12 +14,12 @@ define void @array_1D(ptr %addr) #0 {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, #2, mul vl]
-; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #1, mul vl]
-; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0]
-; CHECK-NEXT: st1d { z0.d }, p0, [sp, #2, mul vl]
-; CHECK-NEXT: st1d { z1.d }, p0, [sp, #1, mul vl]
-; CHECK-NEXT: st1d { z2.d }, p0, [sp]
+; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: st1d { z0.d }, p0, [sp]
+; CHECK-NEXT: st1d { z1.d }, p0, [sp, #2, mul vl]
+; CHECK-NEXT: st1d { z2.d }, p0, [sp, #1, mul vl]
; CHECK-NEXT: addvl sp, sp, #3
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
@@ -81,18 +81,18 @@ define void @array_2D(ptr %addr) #0 {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 48 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, #5, mul vl]
-; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #4, mul vl]
-; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0]
-; CHECK-NEXT: ld1d { z3.d }, p0/z, [x0, #3, mul vl]
-; CHECK-NEXT: ld1d { z4.d }, p0/z, [x0, #1, mul vl]
-; CHECK-NEXT: ld1d { z5.d }, p0/z, [x0, #2, mul vl]
-; CHECK-NEXT: st1d { z0.d }, p0, [sp, #5, mul vl]
-; CHECK-NEXT: st1d { z1.d }, p0, [sp, #4, mul vl]
-; CHECK-NEXT: st1d { z3.d }, p0, [sp, #3, mul vl]
-; CHECK-NEXT: st1d { z5.d }, p0, [sp, #2, mul vl]
-; CHECK-NEXT: st1d { z4.d }, p0, [sp, #1, mul vl]
-; CHECK-NEXT: st1d { z2.d }, p0, [sp]
+; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #5, mul vl]
+; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1d { z3.d }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT: ld1d { z4.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1d { z5.d }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT: st1d { z0.d }, p0, [sp]
+; CHECK-NEXT: st1d { z1.d }, p0, [sp, #5, mul vl]
+; CHECK-NEXT: st1d { z3.d }, p0, [sp, #4, mul vl]
+; CHECK-NEXT: st1d { z5.d }, p0, [sp, #3, mul vl]
+; CHECK-NEXT: st1d { z4.d }, p0, [sp, #2, mul vl]
+; CHECK-NEXT: st1d { z2.d }, p0, [sp, #1, mul vl]
; CHECK-NEXT: addvl sp, sp, #6
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll
index 7292d52..f03a6f0 100644
--- a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll
+++ b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll
@@ -13,12 +13,12 @@ define void @test(ptr %addr) #0 {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, #2, mul vl]
-; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #1, mul vl]
-; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0]
-; CHECK-NEXT: st1d { z0.d }, p0, [sp, #2, mul vl]
-; CHECK-NEXT: st1d { z1.d }, p0, [sp, #1, mul vl]
-; CHECK-NEXT: st1d { z2.d }, p0, [sp]
+; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: st1d { z0.d }, p0, [sp]
+; CHECK-NEXT: st1d { z1.d }, p0, [sp, #2, mul vl]
+; CHECK-NEXT: st1d { z2.d }, p0, [sp, #1, mul vl]
; CHECK-NEXT: addvl sp, sp, #3
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/allow-check.ll b/llvm/test/CodeGen/AArch64/allow-check.ll
new file mode 100644
index 0000000..9e4a473
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/allow-check.ll
@@ -0,0 +1,30 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=aarch64 -global-isel=0 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -global-isel=1 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -global-isel=0 -fast-isel=1 | FileCheck %s
+
+target triple = "aarch64-linux"
+
+define i1 @test_runtime() local_unnamed_addr {
+; CHECK-LABEL: test_runtime:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, #1 // =0x1
+; CHECK-NEXT: ret
+entry:
+ %allow = call i1 @llvm.allow.runtime.check(metadata !"test_check")
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.runtime.check(metadata) nounwind
+
+define i1 @test_ubsan() local_unnamed_addr {
+; CHECK-LABEL: test_ubsan:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, #1 // =0x1
+; CHECK-NEXT: ret
+entry:
+ %allow = call i1 @llvm.allow.ubsan.check(i8 7)
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.ubsan.check(i8) nounwind
diff --git a/llvm/test/CodeGen/AArch64/and-sink.ll b/llvm/test/CodeGen/AArch64/and-sink.ll
index 4d08586..f298a55 100644
--- a/llvm/test/CodeGen/AArch64/and-sink.ll
+++ b/llvm/test/CodeGen/AArch64/and-sink.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs < %s | FileCheck %s
; RUN: opt -S -passes='require<profile-summary>,function(codegenprepare)' -mtriple=aarch64-linux %s | FileCheck --check-prefix=CHECK-CGP %s
; RUN: opt -S -passes='require<profile-summary>,function(codegenprepare)' -cgpp-huge-func=0 -mtriple=aarch64-linux %s | FileCheck --check-prefix=CHECK-CGP %s
@@ -9,9 +10,18 @@
; Test that and is sunk into cmp block to form tbz.
define dso_local i32 @and_sink1(i32 %a, i1 %c) {
; CHECK-LABEL: and_sink1:
-; CHECK: tbz w1, #0
-; CHECK: str wzr, [x{{[0-9]+}}, :lo12:A]
-; CHECK: tbnz {{w[0-9]+}}, #2
+; CHECK: // %bb.0:
+; CHECK-NEXT: tbz w1, #0, .LBB0_3
+; CHECK-NEXT: // %bb.1: // %bb0
+; CHECK-NEXT: adrp x8, A
+; CHECK-NEXT: str wzr, [x8, :lo12:A]
+; CHECK-NEXT: tbnz w0, #2, .LBB0_3
+; CHECK-NEXT: // %bb.2:
+; CHECK-NEXT: mov w0, #1 // =0x1
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB0_3: // %bb2
+; CHECK-NEXT: mov w0, wzr
+; CHECK-NEXT: ret
; CHECK-CGP-LABEL: @and_sink1(
; CHECK-CGP-NOT: and i32
@@ -35,12 +45,30 @@ bb2:
; Test that both 'and' and cmp get sunk to form tbz.
define dso_local i32 @and_sink2(i32 %a, i1 %c, i1 %c2) {
; CHECK-LABEL: and_sink2:
-; CHECK: str wzr, [x{{[0-9]+}}, :lo12:A]
-; CHECK: tbz w1, #0
-; CHECK: str wzr, [x{{[0-9]+}}, :lo12:B]
-; CHECK: tbz w2, #0
-; CHECK: str wzr, [x{{[0-9]+}}, :lo12:C]
-; CHECK: tbnz {{w[0-9]+}}, #2
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, wzr
+; CHECK-NEXT: adrp x9, A
+; CHECK-NEXT: str wzr, [x9, :lo12:A]
+; CHECK-NEXT: tbz w1, #0, .LBB1_5
+; CHECK-NEXT: // %bb.1: // %bb0.preheader
+; CHECK-NEXT: adrp x8, B
+; CHECK-NEXT: adrp x9, C
+; CHECK-NEXT: .LBB1_2: // %bb0
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: str wzr, [x8, :lo12:B]
+; CHECK-NEXT: tbz w2, #0, .LBB1_6
+; CHECK-NEXT: // %bb.3: // %bb1
+; CHECK-NEXT: // in Loop: Header=BB1_2 Depth=1
+; CHECK-NEXT: str wzr, [x9, :lo12:C]
+; CHECK-NEXT: tbnz w0, #2, .LBB1_2
+; CHECK-NEXT: // %bb.4:
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: .LBB1_5: // %common.ret
+; CHECK-NEXT: mov w0, w8
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB1_6:
+; CHECK-NEXT: mov w0, wzr
+; CHECK-NEXT: ret
; CHECK-CGP-LABEL: @and_sink2(
; CHECK-CGP-NOT: and i32
@@ -71,10 +99,16 @@ bb3:
; Test that 'and' is not sunk since cbz is a better alternative.
define dso_local i32 @and_sink3(i32 %a) {
; CHECK-LABEL: and_sink3:
-; CHECK: and [[REG:w[0-9]+]], w0, #0x3
-; CHECK: [[LOOP:.L[A-Z0-9_]+]]:
-; CHECK: str wzr, [x{{[0-9]+}}, :lo12:A]
-; CHECK: cbz [[REG]], [[LOOP]]
+; CHECK: // %bb.0:
+; CHECK-NEXT: adrp x8, A
+; CHECK-NEXT: and w9, w0, #0x3
+; CHECK-NEXT: .LBB2_1: // %bb0
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: str wzr, [x8, :lo12:A]
+; CHECK-NEXT: cbz w9, .LBB2_1
+; CHECK-NEXT: // %bb.2: // %bb2
+; CHECK-NEXT: mov w0, wzr
+; CHECK-NEXT: ret
; CHECK-CGP-LABEL: @and_sink3(
; CHECK-CGP-NEXT: and i32
diff --git a/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll b/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll
index 225d4c6..cb65867 100644
--- a/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -debug-entry-values -mtriple=arm64-apple-darwin | FileCheck %s
-; Stackmap Header: no constants - 6 callsites
+; Stackmap Header: no constants - 18 callsites
; CHECK-LABEL: .section __LLVM_STACKMAPS,__llvm_stackmaps
; CHECK-NEXT: __LLVM_StackMaps:
; Header
@@ -8,11 +8,11 @@
; CHECK-NEXT: .byte 0
; CHECK-NEXT: .short 0
; Num Functions
-; CHECK-NEXT: .long 8
+; CHECK-NEXT: .long 18
; Num LargeConstants
; CHECK-NEXT: .long 0
; Num Callsites
-; CHECK-NEXT: .long 8
+; CHECK-NEXT: .long 18
; Functions and stack size
; CHECK-NEXT: .quad _test
@@ -39,6 +39,36 @@
; CHECK-NEXT: .quad _patchpoint_spillargs
; CHECK-NEXT: .quad 128
; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_i32
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_i64
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_p0
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_f16
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_f32
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_f64
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_v16i8
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_v4i32
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_v4f32
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_v2f64
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
; test
@@ -457,5 +487,194 @@ entry:
ret i64 %result
}
+; generic_test_i32
+; CHECK-LABEL: .long L{{.*}}-_generic_test_i32
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 4
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define i32 @generic_test_i32() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc i32 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i32(i64 14, i32 20, ptr null, i32 0)
+ ret i32 %ret
+}
+
+; generic_test_i64
+; CHECK-LABEL: .long L{{.*}}-_generic_test_i64
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define i64 @generic_test_i64() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 14, i32 20, ptr null, i32 0)
+ ret i64 %ret
+}
+
+; generic_test_p0
+; CHECK-LABEL: .long L{{.*}}-_generic_test_p0
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define ptr @generic_test_p0() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc ptr (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.p0(i64 14, i32 20, ptr null, i32 0)
+ ret ptr %ret
+}
+
+; generic_test_f16
+; CHECK-LABEL: .long L{{.*}}-_generic_test_f16
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 2
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define half @generic_test_f16() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc half (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.f16(i64 14, i32 20, ptr null, i32 0)
+ ret half %ret
+}
+
+; generic_test_f32
+; CHECK-LABEL: .long L{{.*}}-_generic_test_f32
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 4
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define float @generic_test_f32() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc float (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.f32(i64 14, i32 20, ptr null, i32 0)
+ ret float %ret
+}
+
+; generic_test_f64
+; CHECK-LABEL: .long L{{.*}}-_generic_test_f64
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define double @generic_test_f64() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc double (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.f64(i64 14, i32 20, ptr null, i32 0)
+ ret double %ret
+}
+
+; generic_test_v16i8
+; CHECK-LABEL: .long L{{.*}}-_generic_test_v16i8
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 16
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define <16 x i8> @generic_test_v16i8() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc <16 x i8> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v16i8(i64 14, i32 20, ptr null, i32 0)
+ ret <16 x i8> %ret
+}
+
+; generic_test_v4i32
+; CHECK-LABEL: .long L{{.*}}-_generic_test_v4i32
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 16
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define <4 x i32> @generic_test_v4i32() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc <4 x i32> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v4i32(i64 14, i32 20, ptr null, i32 0)
+ ret <4 x i32> %ret
+}
+
+; generic_test_v4f32
+; CHECK-LABEL: .long L{{.*}}-_generic_test_v4f32
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 16
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define <4 x float> @generic_test_v4f32() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc <4 x float> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v4f32(i64 14, i32 20, ptr null, i32 0)
+ ret <4 x float> %ret
+}
+
+; generic_test_v2f64
+; CHECK-LABEL: .long L{{.*}}-_generic_test_v2f64
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 16
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define <2 x double> @generic_test_v2f64() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc <2 x double> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v2f64(i64 14, i32 20, ptr null, i32 0)
+ ret <2 x double> %ret
+}
+
declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i32 @llvm.experimental.patchpoint.i32(i64, i32, ptr, i32, ...)
declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
+declare ptr @llvm.experimental.patchpoint.p0(i64, i32, ptr, i32, ...)
+declare half @llvm.experimental.patchpoint.f16(i64, i32, ptr, i32, ...)
+declare float @llvm.experimental.patchpoint.f32(i64, i32, ptr, i32, ...)
+declare double @llvm.experimental.patchpoint.f64(i64, i32, ptr, i32, ...)
+declare <16 x i8> @llvm.experimental.patchpoint.v16i8(i64, i32, ptr, i32, ...)
+declare <4 x i32> @llvm.experimental.patchpoint.v4i32(i64, i32, ptr, i32, ...)
+declare <4 x float> @llvm.experimental.patchpoint.v4f32(i64, i32, ptr, i32, ...)
+declare <2 x double> @llvm.experimental.patchpoint.v2f64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll b/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll
index 4932529..3007e7c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll
@@ -8,9 +8,8 @@ define <4 x i16> @fptosi_v4f64_to_v4i16(ptr %ptr) {
; CHECK-NEXT: ldp q0, q1, [x0]
; CHECK-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-NEXT: xtn v1.2s, v1.2d
-; CHECK-NEXT: xtn v0.2s, v0.2d
-; CHECK-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: xtn v0.4h, v0.4s
; CHECK-NEXT: ret
%tmp1 = load <4 x double>, ptr %ptr
%tmp2 = fptosi <4 x double> %tmp1 to <4 x i16>
@@ -26,13 +25,10 @@ define <8 x i8> @fptosi_v4f64_to_v4i8(ptr %ptr) {
; CHECK-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-NEXT: fcvtzs v3.2d, v3.2d
; CHECK-NEXT: fcvtzs v2.2d, v2.2d
-; CHECK-NEXT: xtn v0.2s, v0.2d
-; CHECK-NEXT: xtn v1.2s, v1.2d
-; CHECK-NEXT: xtn v3.2s, v3.2d
-; CHECK-NEXT: xtn v2.2s, v2.2d
-; CHECK-NEXT: uzp1 v0.4h, v1.4h, v0.4h
-; CHECK-NEXT: uzp1 v1.4h, v2.4h, v3.4h
-; CHECK-NEXT: uzp1 v0.8b, v1.8b, v0.8b
+; CHECK-NEXT: uzp1 v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: uzp1 v1.4s, v2.4s, v3.4s
+; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: xtn v0.8b, v0.8h
; CHECK-NEXT: ret
%tmp1 = load <8 x double>, ptr %ptr
%tmp2 = fptosi <8 x double> %tmp1 to <8 x i8>
@@ -96,9 +92,8 @@ define <4 x i16> @fptoui_v4f64_to_v4i16(ptr %ptr) {
; CHECK-NEXT: ldp q0, q1, [x0]
; CHECK-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-NEXT: xtn v1.2s, v1.2d
-; CHECK-NEXT: xtn v0.2s, v0.2d
-; CHECK-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: xtn v0.4h, v0.4s
; CHECK-NEXT: ret
%tmp1 = load <4 x double>, ptr %ptr
%tmp2 = fptoui <4 x double> %tmp1 to <4 x i16>
diff --git a/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll b/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll
index a1e0693..bc399c8 100644
--- a/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast -aarch64-enable-sink-fold=true | FileCheck %s --check-prefix=CHECK-SDAG
-; RUN: llc < %s -global-isel -global-isel-abort=2 -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast -aarch64-enable-sink-fold=true | FileCheck %s --check-prefix=CHECK-GISEL
+; RUN: llc < %s -global-isel -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast -aarch64-enable-sink-fold=true | FileCheck %s --check-prefix=CHECK-GISEL
define <4 x i8> @test_varidx_extract_v8s8(<8 x i8> %x, i32 %idx) {
; CHECK-SDAG-LABEL: test_varidx_extract_v8s8:
@@ -29,20 +29,20 @@ define <4 x i8> @test_varidx_extract_v8s8(<8 x i8> %x, i32 %idx) {
; CHECK-GISEL-NEXT: .cfi_def_cfa_offset 16
; CHECK-GISEL-NEXT: mov w9, w0
; CHECK-GISEL-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GISEL-NEXT: mov b1, v0.b[1]
; CHECK-GISEL-NEXT: add x8, sp, #8
-; CHECK-GISEL-NEXT: str d0, [sp, #8]
; CHECK-GISEL-NEXT: and x9, x9, #0x7
-; CHECK-GISEL-NEXT: mov b2, v0.b[1]
+; CHECK-GISEL-NEXT: str d0, [sp, #8]
; CHECK-GISEL-NEXT: mov b3, v0.b[2]
; CHECK-GISEL-NEXT: lsl x10, x9, #1
; CHECK-GISEL-NEXT: mov b0, v0.b[3]
; CHECK-GISEL-NEXT: sub x9, x10, x9
-; CHECK-GISEL-NEXT: ldrb w8, [x8, x9]
-; CHECK-GISEL-NEXT: fmov s1, w8
-; CHECK-GISEL-NEXT: mov v1.h[1], v2.h[0]
-; CHECK-GISEL-NEXT: mov v1.h[2], v3.h[0]
-; CHECK-GISEL-NEXT: mov v1.h[3], v0.h[0]
-; CHECK-GISEL-NEXT: fmov d0, d1
+; CHECK-GISEL-NEXT: ldr b2, [x8, x9]
+; CHECK-GISEL-NEXT: mov v2.b[1], v1.b[0]
+; CHECK-GISEL-NEXT: mov v2.b[2], v3.b[0]
+; CHECK-GISEL-NEXT: mov v2.b[3], v0.b[0]
+; CHECK-GISEL-NEXT: ushll v0.8h, v2.8b, #0
+; CHECK-GISEL-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GISEL-NEXT: add sp, sp, #16
; CHECK-GISEL-NEXT: ret
%tmp = extractelement <8 x i8> %x, i32 %idx
@@ -176,17 +176,15 @@ define <2 x i16> @test_varidx_extract_v4s16(<4 x i16> %x, i32 %idx) {
; CHECK-GISEL: // %bb.0:
; CHECK-GISEL-NEXT: sub sp, sp, #16
; CHECK-GISEL-NEXT: .cfi_def_cfa_offset 16
-; CHECK-GISEL-NEXT: mov w9, w0
-; CHECK-GISEL-NEXT: mov w8, #2 // =0x2
-; CHECK-GISEL-NEXT: add x10, sp, #8
-; CHECK-GISEL-NEXT: and x9, x9, #0x3
; CHECK-GISEL-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GISEL-NEXT: mov w9, w0
+; CHECK-GISEL-NEXT: mov h1, v0.h[1]
+; CHECK-GISEL-NEXT: add x8, sp, #8
; CHECK-GISEL-NEXT: str d0, [sp, #8]
-; CHECK-GISEL-NEXT: madd x8, x9, x8, x10
-; CHECK-GISEL-NEXT: umov w9, v0.h[1]
-; CHECK-GISEL-NEXT: fmov s1, w9
-; CHECK-GISEL-NEXT: ldr h0, [x8]
-; CHECK-GISEL-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GISEL-NEXT: and x9, x9, #0x3
+; CHECK-GISEL-NEXT: ldr h0, [x8, x9, lsl #1]
+; CHECK-GISEL-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GISEL-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GISEL-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GISEL-NEXT: add sp, sp, #16
; CHECK-GISEL-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll b/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll
index c58f4b1..f948d78 100644
--- a/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll
@@ -79,6 +79,145 @@ entry:
ret void
}
+; Test register allocation for an i32 result value of patchpoint.
+define i32 @generic_patchpoint_i32() {
+entry:
+; CHECK-LABEL: generic_patchpoint_i32:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in w0.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call i32 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i32(i64 5, i32 4, ptr null, i32 0)
+ ret i32 %result
+}
+
+; Test register allocation for an i64 result value of patchpoint.
+define i64 @generic_patchpoint_i64() {
+entry:
+; CHECK-LABEL: generic_patchpoint_i64:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in x0.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 4, ptr null, i32 0)
+ ret i64 %result
+}
+
+; Test register allocation for a ptr result value of patchpoint.
+define ptr @generic_patchpoint_p0() {
+entry:
+; CHECK-LABEL: generic_patchpoint_p0:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in x0.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call ptr (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.p0(i64 5, i32 4, ptr null, i32 0)
+ ret ptr %result
+}
+
+; Test register allocation for a half result value of patchpoint.
+define half @generic_patchpoint_f16() {
+entry:
+; CHECK-LABEL: generic_patchpoint_f16:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in h0.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call half (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.f16(i64 5, i32 4, ptr null, i32 0)
+ ret half %result
+}
+
+; Test register allocation for a float result value of patchpoint.
+define float @generic_patchpoint_f32() {
+entry:
+; CHECK-LABEL: generic_patchpoint_f32:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in s0.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call float (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.f32(i64 5, i32 4, ptr null, i32 0)
+ ret float %result
+}
+
+; Test register allocation for a double result value of patchpoint.
+define double @generic_patchpoint_f64() {
+entry:
+; CHECK-LABEL: generic_patchpoint_f64:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in d0.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call double (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.f64(i64 5, i32 4, ptr null, i32 0)
+ ret double %result
+}
+
+; Test register allocation for a <16 x i8> result value of patchpoint.
+define <16 x i8> @generic_patchpoint_v16i8() {
+entry:
+; CHECK-LABEL: generic_patchpoint_v16i8:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in v0.16b.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call <16 x i8> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v16i8(i64 5, i32 4, ptr null, i32 0)
+ ret <16 x i8> %result
+}
+
+; Test register allocation for a <4 x i32> result value of patchpoint.
+define <4 x i32> @generic_patchpoint_v4i32() {
+entry:
+; CHECK-LABEL: generic_patchpoint_v4i32:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in v0.4s.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call <4 x i32> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v4i32(i64 5, i32 4, ptr null, i32 0)
+ ret <4 x i32> %result
+}
+
+; Test register allocation for a <4 x float> result value of patchpoint.
+define <4 x float> @generic_patchpoint_v4f32() {
+entry:
+; CHECK-LABEL: generic_patchpoint_v4f32:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in v0.4s.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call <4 x float> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v4f32(i64 5, i32 4, ptr null, i32 0)
+ ret <4 x float> %result
+}
+
+; Test register allocation for a <2 x double> result value of patchpoint.
+define <2 x double> @generic_patchpoint_v2f64() {
+entry:
+; CHECK-LABEL: generic_patchpoint_v2f64:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in v0.2d.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call <2 x double> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v2f64(i64 5, i32 4, ptr null, i32 0)
+ ret <2 x double> %result
+}
+
declare void @llvm.experimental.stackmap(i64, i32, ...)
declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i32 @llvm.experimental.patchpoint.i32(i64, i32, ptr, i32, ...)
declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
+declare ptr @llvm.experimental.patchpoint.p0(i64, i32, ptr, i32, ...)
+declare half @llvm.experimental.patchpoint.f16(i64, i32, ptr, i32, ...)
+declare float @llvm.experimental.patchpoint.f32(i64, i32, ptr, i32, ...)
+declare double @llvm.experimental.patchpoint.f64(i64, i32, ptr, i32, ...)
+declare <16 x i8> @llvm.experimental.patchpoint.v16i8(i64, i32, ptr, i32, ...)
+declare <4 x i32> @llvm.experimental.patchpoint.v4i32(i64, i32, ptr, i32, ...)
+declare <4 x float> @llvm.experimental.patchpoint.v4f32(i64, i32, ptr, i32, ...)
+declare <2 x double> @llvm.experimental.patchpoint.v2f64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/AArch64/arm64-xaluo.ll b/llvm/test/CodeGen/AArch64/arm64-xaluo.ll
index 77c7066..0ec2d76 100644
--- a/llvm/test/CodeGen/AArch64/arm64-xaluo.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-xaluo.ll
@@ -2643,8 +2643,7 @@ define i8 @pr60530() {
;
; GISEL-LABEL: pr60530:
; GISEL: // %bb.0:
-; GISEL-NEXT: mov w8, #1 // =0x1
-; GISEL-NEXT: sbfx w0, w8, #0, #1
+; GISEL-NEXT: mov w0, #255 // =0xff
; GISEL-NEXT: ret
%1 = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 0, i8 1)
%2 = extractvalue { i8, i1 } %1, 1
diff --git a/llvm/test/CodeGen/AArch64/avoid-zero-copy.mir b/llvm/test/CodeGen/AArch64/avoid-zero-copy.mir
index 859be2d..b940734 100644
--- a/llvm/test/CodeGen/AArch64/avoid-zero-copy.mir
+++ b/llvm/test/CodeGen/AArch64/avoid-zero-copy.mir
@@ -19,6 +19,8 @@
...
---
name: foo
+frameInfo:
+ adjustsStack: true
body: |
bb.0 (%ir-block.0):
; CHECK-LABEL: name: foo
diff --git a/llvm/test/CodeGen/AArch64/bitcast.ll b/llvm/test/CodeGen/AArch64/bitcast.ll
index bccfdb9..e0851fd 100644
--- a/llvm/test/CodeGen/AArch64/bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/bitcast.ll
@@ -1,16 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
; PR23065: SCALAR_TO_VECTOR implies the top elements 1 to N-1 of the N-element vector are undefined.
-; CHECK-GI: warning: Instruction selection used fallback path for bitcast_v4i8_i32
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for bitcast_i32_v4i8
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for bitcast_v2i16_i32
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for bitcast_i32_v2i16
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for bitcast_v2i16_v4i8
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for bitcast_v4i8_v2i16
-
define <4 x i16> @foo1(<2 x i32> %a) {
; CHECK-SD-LABEL: foo1:
; CHECK-SD: // %bb.0:
@@ -54,58 +47,104 @@ define <4 x i16> @foo2(<2 x i32> %a) {
; ===== To and From Scalar Types =====
define i32 @bitcast_v4i8_i32(<4 x i8> %a, <4 x i8> %b){
-; CHECK-LABEL: bitcast_v4i8_i32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: add v0.4h, v0.4h, v1.4h
-; CHECK-NEXT: xtn v0.8b, v0.8h
-; CHECK-NEXT: fmov w0, s0
-; CHECK-NEXT: add sp, sp, #16
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: bitcast_v4i8_i32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: add v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: uzp1 v0.8b, v0.8b, v0.8b
+; CHECK-SD-NEXT: fmov w0, s0
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: bitcast_v4i8_i32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov h2, v0.h[2]
+; CHECK-GI-NEXT: mov h3, v0.h[3]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: mov v0.h[2], v2.h[0]
+; CHECK-GI-NEXT: mov v0.h[3], v3.h[0]
+; CHECK-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-GI-NEXT: fmov w0, s0
+; CHECK-GI-NEXT: ret
%c = add <4 x i8> %a, %b
%d = bitcast <4 x i8> %c to i32
ret i32 %d
}
define <4 x i8> @bitcast_i32_v4i8(i32 %a, i32 %b){
-; CHECK-LABEL: bitcast_i32_v4i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: add w8, w0, w1
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: zip1 v0.8b, v0.8b, v0.8b
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: bitcast_i32_v4i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add w8, w0, w1
+; CHECK-SD-NEXT: fmov s0, w8
+; CHECK-SD-NEXT: zip1 v0.8b, v0.8b, v0.8b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: bitcast_i32_v4i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add w8, w0, w1
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: mov b1, v0.b[1]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b3, v0.b[3]
+; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v3.b[0]
+; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%c = add i32 %a, %b
%d = bitcast i32 %c to <4 x i8>
ret <4 x i8> %d
}
define i32 @bitcast_v2i16_i32(<2 x i16> %a, <2 x i16> %b){
-; CHECK-LABEL: bitcast_v2i16_i32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: add v0.2s, v0.2s, v1.2s
-; CHECK-NEXT: mov w8, v0.s[1]
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: strh w9, [sp, #12]
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: ldr w0, [sp, #12]
-; CHECK-NEXT: add sp, sp, #16
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: bitcast_v2i16_i32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: add v0.2s, v0.2s, v1.2s
+; CHECK-SD-NEXT: mov w8, v0.s[1]
+; CHECK-SD-NEXT: fmov w9, s0
+; CHECK-SD-NEXT: strh w9, [sp, #12]
+; CHECK-SD-NEXT: strh w8, [sp, #14]
+; CHECK-SD-NEXT: ldr w0, [sp, #12]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: bitcast_v2i16_i32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add v0.2s, v0.2s, v1.2s
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-GI-NEXT: fmov w0, s0
+; CHECK-GI-NEXT: ret
%c = add <2 x i16> %a, %b
%d = bitcast <2 x i16> %c to i32
ret i32 %d
}
define <2 x i16> @bitcast_i32_v2i16(i32 %a, i32 %b){
-; CHECK-LABEL: bitcast_i32_v2i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: add w8, w0, w1
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: bitcast_i32_v2i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add w8, w0, w1
+; CHECK-SD-NEXT: fmov s0, w8
+; CHECK-SD-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: bitcast_i32_v2i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add w8, w0, w1
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%c = add i32 %a, %b
%d = bitcast i32 %c to <2 x i16>
ret <2 x i16> %d
@@ -362,40 +401,72 @@ define <8 x i16> @bitcast_v16i8_v8i16(<16 x i8> %a, <16 x i8> %b){
; ===== Smaller/Larger Width Vectors with Legal Element Sizes =====
define <4 x i8> @bitcast_v2i16_v4i8(<2 x i16> %a, <2 x i16> %b){
-; CHECK-LABEL: bitcast_v2i16_v4i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: add v0.2s, v0.2s, v1.2s
-; CHECK-NEXT: mov w8, v0.s[1]
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: strh w9, [sp, #12]
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: ldr s0, [sp, #12]
-; CHECK-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: add sp, sp, #16
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: bitcast_v2i16_v4i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: add v0.2s, v0.2s, v1.2s
+; CHECK-SD-NEXT: mov w8, v0.s[1]
+; CHECK-SD-NEXT: fmov w9, s0
+; CHECK-SD-NEXT: strh w9, [sp, #12]
+; CHECK-SD-NEXT: strh w8, [sp, #14]
+; CHECK-SD-NEXT: ldr s0, [sp, #12]
+; CHECK-SD-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: bitcast_v2i16_v4i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add v0.2s, v0.2s, v1.2s
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-GI-NEXT: mov b1, v0.b[1]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b3, v0.b[3]
+; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v3.b[0]
+; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%c = add <2 x i16> %a, %b
%d = bitcast <2 x i16> %c to <4 x i8>
ret <4 x i8> %d
}
define <2 x i16> @bitcast_v4i8_v2i16(<4 x i8> %a, <4 x i8> %b){
-; CHECK-LABEL: bitcast_v4i8_v2i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: add v0.4h, v0.4h, v1.4h
-; CHECK-NEXT: add x8, sp, #12
-; CHECK-NEXT: xtn v0.8b, v0.8h
-; CHECK-NEXT: str s0, [sp, #12]
-; CHECK-NEXT: ld1 { v0.h }[0], [x8]
-; CHECK-NEXT: orr x8, x8, #0x2
-; CHECK-NEXT: ld1 { v0.h }[2], [x8]
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: add sp, sp, #16
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: bitcast_v4i8_v2i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: add v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: add x8, sp, #12
+; CHECK-SD-NEXT: uzp1 v0.8b, v0.8b, v0.8b
+; CHECK-SD-NEXT: str s0, [sp, #12]
+; CHECK-SD-NEXT: ld1 { v0.h }[0], [x8]
+; CHECK-SD-NEXT: orr x8, x8, #0x2
+; CHECK-SD-NEXT: ld1 { v0.h }[2], [x8]
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: bitcast_v4i8_v2i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov h2, v0.h[2]
+; CHECK-GI-NEXT: mov h3, v0.h[3]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: mov v0.h[2], v2.h[0]
+; CHECK-GI-NEXT: mov v0.h[3], v3.h[0]
+; CHECK-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%c = add <4 x i8> %a, %b
%d = bitcast <4 x i8> %c to <2 x i16>
ret <2 x i16> %d
diff --git a/llvm/test/CodeGen/AArch64/bswap.ll b/llvm/test/CodeGen/AArch64/bswap.ll
index f4221ac..071613b 100644
--- a/llvm/test/CodeGen/AArch64/bswap.ll
+++ b/llvm/test/CodeGen/AArch64/bswap.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
-
-; CHECK-GI: warning: Instruction selection used fallback path for bswap_v2i16
+; RUN: llc -mtriple=aarch64 -global-isel %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
; ====== Scalar Tests =====
define i16 @bswap_i16(i16 %a){
@@ -103,11 +101,23 @@ declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>)
; ===== Smaller/Larger Width Vectors with Legal Element Sizes =====
define <2 x i16> @bswap_v2i16(<2 x i16> %a){
-; CHECK-LABEL: bswap_v2i16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: rev32 v0.8b, v0.8b
-; CHECK-NEXT: ushr v0.2s, v0.2s, #16
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: bswap_v2i16:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: rev32 v0.8b, v0.8b
+; CHECK-SD-NEXT: ushr v0.2s, v0.2s, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: bswap_v2i16:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: rev16 v0.8b, v0.8b
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
entry:
%res = call <2 x i16> @llvm.bswap.v2i16(<2 x i16> %a)
ret <2 x i16> %res
diff --git a/llvm/test/CodeGen/AArch64/clear-dead-implicit-def-impdef.mir b/llvm/test/CodeGen/AArch64/clear-dead-implicit-def-impdef.mir
index 9040937..1592c86 100644
--- a/llvm/test/CodeGen/AArch64/clear-dead-implicit-def-impdef.mir
+++ b/llvm/test/CodeGen/AArch64/clear-dead-implicit-def-impdef.mir
@@ -3,6 +3,8 @@
---
name: func
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
body: |
bb.0:
liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add.ll
index 7b8448d..7cdb10e 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add.ll
@@ -1,23 +1,42 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s --mattr=+complxnum,+neon,+fullfp16 -o - | FileCheck %s
-; RUN: llc < %s --mattr=+complxnum,+neon,+fullfp16,+sve -o - | FileCheck %s
-; RUN: llc < %s --mattr=+complxnum,+neon,+fullfp16,+sve2 -o - | FileCheck %s
+; RUN: llc < %s --mattr=+complxnum,+neon,+fullfp16 -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s --mattr=+complxnum,+neon,+fullfp16,+sve -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s --mattr=+complxnum,+neon,+fullfp16,+sve2 -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s --global-isel --global-isel-abort=2 --mattr=+complxnum,+neon,+fullfp16 -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc < %s --global-isel --global-isel-abort=2 --mattr=+complxnum,+neon,+fullfp16,+sve -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc < %s --global-isel --global-isel-abort=2 --mattr=+complxnum,+neon,+fullfp16,+sve2 -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
target triple = "aarch64"
+; CHECK-GI: warning: Instruction selection used fallback path for complex_add_v16f16
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for complex_add_v32f16
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for complex_add_v16f16_with_intrinsic
+
; Expected to not transform
define <2 x half> @complex_add_v2f16(<2 x half> %a, <2 x half> %b) {
-; CHECK-LABEL: complex_add_v2f16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: mov h2, v0.h[1]
-; CHECK-NEXT: mov h3, v1.h[1]
-; CHECK-NEXT: fsub h1, h1, h2
-; CHECK-NEXT: fadd h0, h3, h0
-; CHECK-NEXT: mov v1.h[1], v0.h[0]
-; CHECK-NEXT: fmov d0, d1
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: complex_add_v2f16:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: mov h2, v0.h[1]
+; CHECK-SD-NEXT: mov h3, v1.h[1]
+; CHECK-SD-NEXT: fsub h1, h1, h2
+; CHECK-SD-NEXT: fadd h0, h3, h0
+; CHECK-SD-NEXT: mov v1.h[1], v0.h[0]
+; CHECK-SD-NEXT: fmov d0, d1
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: complex_add_v2f16:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NEXT: fsub h1, h1, h2
+; CHECK-GI-NEXT: fadd h0, h3, h0
+; CHECK-GI-NEXT: mov v1.h[1], v0.h[0]
+; CHECK-GI-NEXT: fmov d0, d1
+; CHECK-GI-NEXT: ret
entry:
%a.real = shufflevector <2 x half> %a, <2 x half> zeroinitializer, <1 x i32> <i32 0>
%a.imag = shufflevector <2 x half> %a, <2 x half> zeroinitializer, <1 x i32> <i32 1>
@@ -162,17 +181,29 @@ entry:
; Expected not to transform as it is integer
define <16 x i16> @complex_add_v16i16(<16 x i16> %a, <16 x i16> %b) {
-; CHECK-LABEL: complex_add_v16i16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: uzp1 v4.8h, v2.8h, v3.8h
-; CHECK-NEXT: uzp1 v5.8h, v0.8h, v1.8h
-; CHECK-NEXT: uzp2 v0.8h, v0.8h, v1.8h
-; CHECK-NEXT: uzp2 v1.8h, v2.8h, v3.8h
-; CHECK-NEXT: sub v2.8h, v4.8h, v0.8h
-; CHECK-NEXT: add v1.8h, v1.8h, v5.8h
-; CHECK-NEXT: zip1 v0.8h, v2.8h, v1.8h
-; CHECK-NEXT: zip2 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: complex_add_v16i16:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: uzp1 v4.8h, v2.8h, v3.8h
+; CHECK-SD-NEXT: uzp1 v5.8h, v0.8h, v1.8h
+; CHECK-SD-NEXT: uzp2 v0.8h, v0.8h, v1.8h
+; CHECK-SD-NEXT: uzp2 v1.8h, v2.8h, v3.8h
+; CHECK-SD-NEXT: sub v2.8h, v4.8h, v0.8h
+; CHECK-SD-NEXT: add v1.8h, v1.8h, v5.8h
+; CHECK-SD-NEXT: zip1 v0.8h, v2.8h, v1.8h
+; CHECK-SD-NEXT: zip2 v1.8h, v2.8h, v1.8h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: complex_add_v16i16:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: uzp1 v4.8h, v0.8h, v1.8h
+; CHECK-GI-NEXT: uzp2 v0.8h, v0.8h, v1.8h
+; CHECK-GI-NEXT: uzp1 v1.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT: uzp2 v2.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT: sub v1.8h, v1.8h, v0.8h
+; CHECK-GI-NEXT: add v2.8h, v2.8h, v4.8h
+; CHECK-GI-NEXT: zip1 v0.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT: zip2 v1.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT: ret
entry:
%a.real = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
%a.imag = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
diff --git a/llvm/test/CodeGen/AArch64/dllexport.ll b/llvm/test/CodeGen/AArch64/dllexport.ll
index 81ba674..580fb5f 100644
--- a/llvm/test/CodeGen/AArch64/dllexport.ll
+++ b/llvm/test/CodeGen/AArch64/dllexport.ll
@@ -1,5 +1,7 @@
; RUN: llc -mtriple aarch64-windows-gnu -filetype asm -o - %s | FileCheck %s -check-prefix CHECK -check-prefix CHECK-GNU
; RUN: llc -mtriple aarch64-windows-msvc -filetype asm -o - %s | FileCheck %s -check-prefix CHECK -check-prefix CHECK-MSVC
+; RUN: llc -mtriple arm64ec-windows-gnu -filetype asm -o - %s | FileCheck %s -check-prefix CHECK -check-prefix CHECK-GNU-EC
+; RUN: llc -mtriple arm64ec-windows-msvc -filetype asm -o - %s | FileCheck %s -check-prefix CHECK -check-prefix CHECK-MSVC-EC
define void @f() {
ret void
@@ -71,3 +73,40 @@ define weak_odr dllexport void @l() {
; CHECK-MSVC: .ascii " /EXPORT:s"
; CHECK-MSVC: .ascii " /EXPORT:t"
; CHECK-MSVC: .ascii " /EXPORT:u"
+
+; CHECK-GNU-EC-NOT: -export:f
+; CHECK-GNU-EC-NOT: -export:#f,EXPORTAS,f
+; CHECK-GNU-EC: .ascii " -export:#g,EXPORTAS,g
+; CHECK-GNU-EC: .ascii " -export:#h,EXPORTAS,h
+; CHECK-GNU-EC-NOT: -export:i
+; CHECK-GNU-EC-NOT: -export:#i,EXPORTAS,i
+; CHECK-GNU-EC: .ascii " -export:#j,EXPORTAS,j"
+; CHECK-GNU-EC: .ascii " -export:#k,EXPORTAS,k"
+; CHECK-GNU-EC: .ascii " -export:#l,EXPORTAS,l"
+; CHECK-GNU-EC: .ascii " -export:m,data"
+; CHECK-GNU-EC: .ascii " -export:n,data"
+; CHECK-GNU-EC: .ascii " -export:o,data"
+; CHECK-GNU-EC: .ascii " -export:p,data"
+; CHECK-GNU-EC: .ascii " -export:q,data"
+; CHECK-GNU-EC: .ascii " -export:r"
+; CHECK-GNU-EC: .ascii " -export:s"
+; CHECK-GNU-EC: .ascii " -export:t"
+; CHECK-GNU-EC: .ascii " -export:u"
+; CHECK-MSVC-EC-NOT: /EXPORT:f
+; CHECK-MSVC-EC-NOT: /EXPORT:#f,EXPORTAS,f
+; CHECK-MSVC-EC: .ascii " /EXPORT:#g,EXPORTAS,g"
+; CHECK-MSVC-EC: .ascii " /EXPORT:#h,EXPORTAS,h"
+; CHECK-MSVC-EC-NOT: /EXPORT:i
+; CHECK-MSVC-EC-NOT: /EXPORT:#i,EXPORTAS,i
+; CHECK-MSVC-EC: .ascii " /EXPORT:#j,EXPORTAS,j"
+; CHECK-MSVC-EC: .ascii " /EXPORT:#k,EXPORTAS,k"
+; CHECK-MSVC-EC: .ascii " /EXPORT:#l,EXPORTAS,l"
+; CHECK-MSVC-EC: .ascii " /EXPORT:m,DATA"
+; CHECK-MSVC-EC: .ascii " /EXPORT:n,DATA"
+; CHECK-MSVC-EC: .ascii " /EXPORT:o,DATA"
+; CHECK-MSVC-EC: .ascii " /EXPORT:p,DATA"
+; CHECK-MSVC-EC: .ascii " /EXPORT:q,DATA"
+; CHECK-MSVC-EC: .ascii " /EXPORT:r"
+; CHECK-MSVC-EC: .ascii " /EXPORT:s"
+; CHECK-MSVC-EC: .ascii " /EXPORT:t"
+; CHECK-MSVC-EC: .ascii " /EXPORT:u"
diff --git a/llvm/test/CodeGen/AArch64/extbinopload.ll b/llvm/test/CodeGen/AArch64/extbinopload.ll
index 1f68c77..dff4831 100644
--- a/llvm/test/CodeGen/AArch64/extbinopload.ll
+++ b/llvm/test/CodeGen/AArch64/extbinopload.ll
@@ -650,7 +650,7 @@ define <16 x i32> @extrause_load(ptr %p, ptr %q, ptr %r, ptr %s, ptr %z) {
; CHECK-NEXT: add x11, x3, #12
; CHECK-NEXT: str s1, [x4]
; CHECK-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-NEXT: ldp s0, s5, [x2]
+; CHECK-NEXT: ldp s0, s4, [x2]
; CHECK-NEXT: ushll v2.8h, v0.8b, #0
; CHECK-NEXT: umov w9, v2.h[0]
; CHECK-NEXT: umov w10, v2.h[1]
@@ -662,24 +662,25 @@ define <16 x i32> @extrause_load(ptr %p, ptr %q, ptr %r, ptr %s, ptr %z) {
; CHECK-NEXT: ushll v2.8h, v2.8b, #0
; CHECK-NEXT: mov v0.b[10], w9
; CHECK-NEXT: add x9, x1, #4
-; CHECK-NEXT: uzp1 v1.8b, v1.8b, v2.8b
+; CHECK-NEXT: mov v1.d[1], v2.d[0]
; CHECK-NEXT: mov v0.b[11], w10
; CHECK-NEXT: add x10, x1, #12
+; CHECK-NEXT: bic v1.8h, #255, lsl #8
; CHECK-NEXT: ld1 { v0.s }[3], [x3], #4
-; CHECK-NEXT: ldr s4, [x0, #12]
-; CHECK-NEXT: ldp s3, s16, [x0, #4]
-; CHECK-NEXT: ld1 { v5.s }[1], [x3]
-; CHECK-NEXT: ldp s6, s7, [x2, #8]
-; CHECK-NEXT: ld1 { v4.s }[1], [x10]
-; CHECK-NEXT: ld1 { v3.s }[1], [x9]
-; CHECK-NEXT: ld1 { v6.s }[1], [x8]
-; CHECK-NEXT: ld1 { v7.s }[1], [x11]
+; CHECK-NEXT: ldr s3, [x0, #12]
+; CHECK-NEXT: ldp s2, s7, [x0, #4]
+; CHECK-NEXT: ld1 { v4.s }[1], [x3]
+; CHECK-NEXT: ldp s5, s6, [x2, #8]
+; CHECK-NEXT: ld1 { v3.s }[1], [x10]
+; CHECK-NEXT: ld1 { v2.s }[1], [x9]
+; CHECK-NEXT: ld1 { v5.s }[1], [x8]
+; CHECK-NEXT: ld1 { v6.s }[1], [x11]
; CHECK-NEXT: add x8, x1, #8
-; CHECK-NEXT: ld1 { v16.s }[1], [x8]
-; CHECK-NEXT: uaddl v2.8h, v3.8b, v4.8b
-; CHECK-NEXT: ushll v3.8h, v6.8b, #0
-; CHECK-NEXT: uaddl v4.8h, v5.8b, v7.8b
-; CHECK-NEXT: uaddl v1.8h, v1.8b, v16.8b
+; CHECK-NEXT: ld1 { v7.s }[1], [x8]
+; CHECK-NEXT: uaddl v2.8h, v2.8b, v3.8b
+; CHECK-NEXT: ushll v3.8h, v5.8b, #0
+; CHECK-NEXT: uaddl v4.8h, v4.8b, v6.8b
+; CHECK-NEXT: uaddw v1.8h, v1.8h, v7.8b
; CHECK-NEXT: uaddw2 v5.8h, v3.8h, v0.16b
; CHECK-NEXT: ushll v0.4s, v2.4h, #3
; CHECK-NEXT: ushll2 v2.4s, v2.8h, #3
diff --git a/llvm/test/CodeGen/AArch64/extract-vector-elt.ll b/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
new file mode 100644
index 0000000..504222e
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
@@ -0,0 +1,1114 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+
+; CHECK-GI: warning: Instruction selection used fallback path for extract_v4i32_vector_insert
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for extract_v4i32_vector_insert_const
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for extract_v4i32_vector_extract
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for extract_v4i32_vector_extract_const
+
+define i64 @extract_v2i64_undef_index(<2 x i64> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v2i64_undef_index:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov x0, d0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v2i64_undef_index:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: str q0, [sp, #-16]!
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: ldr x0, [sp], #16
+; CHECK-GI-NEXT: ret
+entry:
+ %d = extractelement <2 x i64> %a, i32 undef
+ ret i64 %d
+}
+
+define i64 @extract_v2i64_undef_vector(<2 x i64> %a, i32 %c) {
+; CHECK-LABEL: extract_v2i64_undef_vector:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ret
+entry:
+ %d = extractelement <2 x i64> undef, i32 %c
+ ret i64 %d
+}
+
+define i64 @extract_v2i64_opaque(<2 x i64> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v2i64_opaque:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: bfi x8, x0, #3, #1
+; CHECK-SD-NEXT: ldr x0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v2i64_opaque:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: mov w9, w0
+; CHECK-GI-NEXT: mov x8, sp
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: and x9, x9, #0x1
+; CHECK-GI-NEXT: ldr x0, [x8, x9, lsl #3]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %d = extractelement <2 x i64> %a, i32 %c
+ ret i64 %d
+}
+
+define i64 @extract_v2i64_oob(<2 x i64> %a, i32 %c) {
+; CHECK-LABEL: extract_v2i64_oob:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ret
+entry:
+ %d = extractelement <2 x i64> %a, i32 5
+ ret i64 %d
+}
+
+define i64 @extract_v2i64_freeze(<2 x i64> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v2i64_freeze:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: bfi x8, x0, #3, #1
+; CHECK-SD-NEXT: ldr x0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v2i64_freeze:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: mov w9, w0
+; CHECK-GI-NEXT: mov x8, sp
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: and x9, x9, #0x1
+; CHECK-GI-NEXT: ldr x0, [x8, x9, lsl #3]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %fvector = freeze <2 x i64> %a
+ %d = extractelement <2 x i64> %fvector, i32 %c
+ ret i64 %d
+}
+
+define i64 @extract_v2i64_extract_of_insert(<2 x i64> %a, i64 %element, i64 %c) {
+; CHECK-LABEL: extract_v2i64_extract_of_insert:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ret
+entry:
+ %vector = insertelement <2 x i64> %a, i64 %element, i64 %c
+ %d = extractelement <2 x i64> %vector, i64 %c
+ ret i64 %d
+}
+
+define i64 @extract_v2i64_extract_of_insert_different_const(<2 x i64> %a, i64 %element) {
+; CHECK-SD-LABEL: extract_v2i64_extract_of_insert_different_const:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: mov x0, v0.d[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v2i64_extract_of_insert_different_const:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov d0, v0.d[1]
+; CHECK-GI-NEXT: fmov x0, d0
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = insertelement <2 x i64> %a, i64 %element, i64 0
+ %d = extractelement <2 x i64> %vector, i64 1
+ ret i64 %d
+}
+
+define i64 @extract_v2i64_extract_build_vector_const(<2 x i64> %a, i32 %c) {
+; CHECK-LABEL: extract_v2i64_extract_build_vector_const:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, #11 // =0xb
+; CHECK-NEXT: ret
+entry:
+ %d = extractelement <2 x i64> <i64 42, i64 11>, i32 1
+ ret i64 %d
+}
+
+define i64 @extract_v2i64_extract_build_vector_opaque(<2 x i64> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v2i64_extract_build_vector_opaque:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: adrp x8, .LCPI8_0
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: ldr q0, [x8, :lo12:.LCPI8_0]
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: bfi x8, x0, #3, #1
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr x0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v2i64_extract_build_vector_opaque:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: adrp x8, .LCPI8_0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI8_0]
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: and x8, x8, #0x1
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr x0, [x9, x8, lsl #3]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %d = extractelement <2 x i64> <i64 42, i64 11>, i32 %c
+ ret i64 %d
+}
+
+
+define i64 @extract_v2i32_zext(<2 x i32> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v2i32_zext:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: ushll v0.2d, v0.2s, #0
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: bfi x8, x0, #3, #1
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr x0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v2i32_zext:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: ushll v0.2d, v0.2s, #0
+; CHECK-GI-NEXT: mov w9, w0
+; CHECK-GI-NEXT: mov x8, sp
+; CHECK-GI-NEXT: and x9, x9, #0x1
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr x0, [x8, x9, lsl #3]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %zvector = zext <2 x i32> %a to <2 x i64>
+ %d = extractelement <2 x i64> %zvector, i32 %c
+ ret i64 %d
+}
+
+define i64 @extract_v2double_fptosi(<2 x double> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v2double_fptosi:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: bfi x8, x0, #3, #1
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr x0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v2double_fptosi:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d
+; CHECK-GI-NEXT: mov w9, w0
+; CHECK-GI-NEXT: mov x8, sp
+; CHECK-GI-NEXT: and x9, x9, #0x1
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr x0, [x8, x9, lsl #3]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = fptosi <2 x double> %a to <2 x i64>
+ %d = extractelement <2 x i64> %vector, i32 %c
+ ret i64 %d
+}
+
+define double @extract_v2double_fneg(<2 x double> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v2double_fneg:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: fneg v0.2d, v0.2d
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: bfi x8, x0, #3, #1
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr d0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v2double_fneg:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: fneg v0.2d, v0.2d
+; CHECK-GI-NEXT: mov w9, w0
+; CHECK-GI-NEXT: mov x8, sp
+; CHECK-GI-NEXT: and x9, x9, #0x1
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr d0, [x8, x9, lsl #3]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = fneg <2 x double> %a
+ %d = extractelement <2 x double> %vector, i32 %c
+ ret double %d
+}
+
+define i32 @extract_v4i32_add(<4 x i32> %a, <4 x i32> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_add:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: adrp x8, .LCPI12_0
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI12_0]
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: add v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr w0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_add:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: adrp x8, .LCPI12_0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI12_0]
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr w0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = add <4 x i32> %a, <i32 42, i32 11, i32 17, i32 6>
+ %d = extractelement <4 x i32> %vector, i32 %c
+ ret i32 %d
+}
+
+define float @extract_v4i32_minimum(<4 x float> %a, <4 x float> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_minimum:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: fmin v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr s0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_minimum:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: fmin v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr s0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = call <4 x float> @llvm.minimum.v4float(<4 x float> %a, <4 x float> %b)
+ %d = extractelement <4 x float> %vector, i32 %c
+ ret float %d
+}
+
+define float @extract_v4i32_minimum_build_vector(<4 x float> %a, <4 x float> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_minimum_build_vector:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: adrp x8, .LCPI14_0
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI14_0]
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: fmin v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr s0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_minimum_build_vector:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: adrp x8, .LCPI14_0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI14_0]
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: fmin v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr s0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = call <4 x float> @llvm.minimum.v4float(<4 x float> %a, <4 x float> <float 42.0, float 11.0, float 17.0, float 6.0>)
+ %d = extractelement <4 x float> %vector, i32 %c
+ ret float %d
+}
+
+define float @extract_v4i32_minimum_build_vector_const(<4 x float> %a, <4 x float> %b, i32 %c) {
+; CHECK-LABEL: extract_v4i32_minimum_build_vector_const:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: adrp x8, .LCPI15_0
+; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI15_0]
+; CHECK-NEXT: fmin v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: mov s0, v0.s[1]
+; CHECK-NEXT: ret
+entry:
+ %vector = call <4 x float> @llvm.minimum.v4float(<4 x float> %a, <4 x float> <float 42.0, float 11.0, float 17.0, float 6.0>)
+ %d = extractelement <4 x float> %vector, i32 1
+ ret float %d
+}
+
+define float @extract_v4i32_copysign_build_vector(<4 x float> %a, <4 x float> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_copysign_build_vector:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: adrp x8, .LCPI16_0
+; CHECK-SD-NEXT: mvni v1.4s, #128, lsl #24
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: ldr q2, [x8, :lo12:.LCPI16_0]
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: bif v0.16b, v2.16b, v1.16b
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr s0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_copysign_build_vector:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: mvni v1.4s, #128, lsl #24
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: and v0.16b, v0.16b, v1.16b
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr s0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = call <4 x float> @llvm.copysign.v4float(<4 x float> %a, <4 x float> <float 42.0, float 11.0, float 17.0, float 6.0>)
+ %d = extractelement <4 x float> %vector, i32 %c
+ ret float %d
+}
+
+define float @extract_v4i32_copysign_build_vector_const(<4 x float> %a, <4 x float> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_copysign_build_vector_const:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: adrp x8, .LCPI17_0
+; CHECK-SD-NEXT: mvni v1.4s, #128, lsl #24
+; CHECK-SD-NEXT: ldr q2, [x8, :lo12:.LCPI17_0]
+; CHECK-SD-NEXT: bif v0.16b, v2.16b, v1.16b
+; CHECK-SD-NEXT: mov s0, v0.s[2]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_copysign_build_vector_const:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mvni v1.4s, #128, lsl #24
+; CHECK-GI-NEXT: and v0.16b, v0.16b, v1.16b
+; CHECK-GI-NEXT: mov s0, v0.s[2]
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = call <4 x float> @llvm.copysign.v4float(<4 x float> %a, <4 x float> <float 42.0, float 11.0, float 17.0, float 6.0>)
+ %d = extractelement <4 x float> %vector, i32 2
+ ret float %d
+}
+
+
+define i32 @extract_v4i32_icmp(<4 x i32> %a, <4 x i32> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_icmp:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: adrp x8, .LCPI18_0
+; CHECK-SD-NEXT: movi v2.4s, #1
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI18_0]
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: cmge v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT: and v0.16b, v0.16b, v2.16b
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr w0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_icmp:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: adrp x8, .LCPI18_0
+; CHECK-GI-NEXT: movi v2.4s, #1
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI18_0]
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: cmge v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT: and v0.16b, v0.16b, v2.16b
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr w0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = icmp sle <4 x i32> %a, <i32 42, i32 11, i32 17, i32 6>
+ %zvector = zext <4 x i1> %vector to <4 x i32>
+ %d = extractelement <4 x i32> %zvector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_icmp_const(<4 x i32> %a, <4 x i32> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_icmp_const:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: adrp x8, .LCPI19_0
+; CHECK-SD-NEXT: movi v2.4s, #1
+; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI19_0]
+; CHECK-SD-NEXT: cmge v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT: and v0.16b, v0.16b, v2.16b
+; CHECK-SD-NEXT: mov w0, v0.s[2]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_icmp_const:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: adrp x8, .LCPI19_0
+; CHECK-GI-NEXT: movi v2.4s, #1
+; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI19_0]
+; CHECK-GI-NEXT: cmge v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT: and v0.16b, v0.16b, v2.16b
+; CHECK-GI-NEXT: mov s0, v0.s[2]
+; CHECK-GI-NEXT: fmov w0, s0
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = icmp sle <4 x i32> %a, <i32 42, i32 11, i32 17, i32 6>
+ %zvector = zext <4 x i1> %vector to <4 x i32>
+ %d = extractelement <4 x i32> %zvector, i32 2
+ ret i32 %d
+}
+
+define i32 @extract_v4float_fcmp(<4 x float> %a, <4 x float> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4float_fcmp:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: movi v1.4s, #1
+; CHECK-SD-NEXT: fcmeq v0.4s, v0.4s, v0.4s
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: bic v0.16b, v1.16b, v0.16b
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr w0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4float_fcmp:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: fmov v1.4s, #1.00000000
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: fcmge v2.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT: movi v1.4s, #1
+; CHECK-GI-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-GI-NEXT: bic v0.16b, v1.16b, v0.16b
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr w0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = fcmp uno <4 x float> %a, <float 1.0, float 1.0, float 1.0, float 1.0>
+ %zvector = zext <4 x i1> %vector to <4 x i32>
+ %d = extractelement <4 x i32> %zvector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4float_fcmp_const(<4 x float> %a, <4 x float> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4float_fcmp_const:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: movi v1.4s, #1
+; CHECK-SD-NEXT: fcmeq v0.4s, v0.4s, v0.4s
+; CHECK-SD-NEXT: bic v0.16b, v1.16b, v0.16b
+; CHECK-SD-NEXT: mov w0, v0.s[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4float_fcmp_const:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fmov v1.4s, #1.00000000
+; CHECK-GI-NEXT: fcmge v2.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT: movi v1.4s, #1
+; CHECK-GI-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-GI-NEXT: bic v0.16b, v1.16b, v0.16b
+; CHECK-GI-NEXT: mov s0, v0.s[1]
+; CHECK-GI-NEXT: fmov w0, s0
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = fcmp uno <4 x float> %a, <float 1.0, float 1.0, float 1.0, float 1.0>
+ %zvector = zext <4 x i1> %vector to <4 x i32>
+ %d = extractelement <4 x i32> %zvector, i32 1
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_select(<4 x i32> %a, <4 x i32> %b, i32 %c, <4 x i1> %cond) {
+; CHECK-SD-LABEL: extract_v4i32_select:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: ushll v1.4s, v2.4h, #0
+; CHECK-SD-NEXT: adrp x8, .LCPI22_0
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: ldr q2, [x8, :lo12:.LCPI22_0]
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: shl v1.4s, v1.4s, #31
+; CHECK-SD-NEXT: cmlt v1.4s, v1.4s, #0
+; CHECK-SD-NEXT: bif v0.16b, v2.16b, v1.16b
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr w0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_select:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: ushll v1.4s, v2.4h, #0
+; CHECK-GI-NEXT: adrp x8, .LCPI22_0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI22_0]
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: bif v0.16b, v2.16b, v1.16b
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr w0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = select <4 x i1> %cond, <4 x i32> %a, <4 x i32> <i32 42, i32 11, i32 17, i32 6>
+ %d = extractelement <4 x i32> %vector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_select_const(<4 x i32> %a, <4 x i32> %b, i32 %c, <4 x i1> %cond) {
+; CHECK-SD-LABEL: extract_v4i32_select_const:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: ushll v1.4s, v2.4h, #0
+; CHECK-SD-NEXT: movi v2.4s, #17
+; CHECK-SD-NEXT: shl v1.4s, v1.4s, #31
+; CHECK-SD-NEXT: cmlt v1.4s, v1.4s, #0
+; CHECK-SD-NEXT: bif v0.16b, v2.16b, v1.16b
+; CHECK-SD-NEXT: mov w0, v0.s[2]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_select_const:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: ushll v1.4s, v2.4h, #0
+; CHECK-GI-NEXT: adrp x8, .LCPI23_0
+; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI23_0]
+; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: bif v0.16b, v2.16b, v1.16b
+; CHECK-GI-NEXT: mov s0, v0.s[2]
+; CHECK-GI-NEXT: fmov w0, s0
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = select <4 x i1> %cond, <4 x i32> %a, <4 x i32> <i32 42, i32 11, i32 17, i32 6>
+ %d = extractelement <4 x i32> %vector, i32 2
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_abs(<4 x float> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_abs:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: frintp v0.4s, v0.4s
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: frintm v0.4s, v0.4s
+; CHECK-SD-NEXT: fabs v0.4s, v0.4s
+; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-SD-NEXT: abs v0.4s, v0.4s
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr w0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_abs:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: frintp v0.4s, v0.4s
+; CHECK-GI-NEXT: mov w9, w0
+; CHECK-GI-NEXT: mov x8, sp
+; CHECK-GI-NEXT: and x9, x9, #0x3
+; CHECK-GI-NEXT: frintm v0.4s, v0.4s
+; CHECK-GI-NEXT: fabs v0.4s, v0.4s
+; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-GI-NEXT: abs v0.4s, v0.4s
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr w0, [x8, x9, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %ceil = call <4 x float> @llvm.ceil.v4float(<4 x float> %a)
+ %floor = call <4 x float> @llvm.floor.v4float(<4 x float> %ceil)
+ %fabs = call <4 x float> @llvm.fabs.v4float(<4 x float> %floor)
+ %abs = fptosi <4 x float> %fabs to <4 x i32>
+ %vector = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %abs, i1 0)
+ %d = extractelement <4 x i32> %vector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_abs_const(<4 x float> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_abs_const:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: mov w0, #4 // =0x4
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_abs_const:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: adrp x8, .LCPI25_0
+; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI25_0]
+; CHECK-GI-NEXT: frintp v0.4s, v0.4s
+; CHECK-GI-NEXT: frintm v0.4s, v0.4s
+; CHECK-GI-NEXT: fabs v0.4s, v0.4s
+; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-GI-NEXT: abs v0.4s, v0.4s
+; CHECK-GI-NEXT: mov s0, v0.s[1]
+; CHECK-GI-NEXT: fmov w0, s0
+; CHECK-GI-NEXT: ret
+entry:
+ %ceil = call <4 x float> @llvm.ceil.v4float(<4 x float> <float 1.0, float 4.0, float 3.0, float 2.0>)
+ %floor = call <4 x float> @llvm.floor.v4float(<4 x float> %ceil)
+ %fabs = call <4 x float> @llvm.fabs.v4float(<4 x float> %floor)
+ %abs = fptosi <4 x float> %fabs to <4 x i32>
+ %vector = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %abs, i1 0)
+ %d = extractelement <4 x i32> %vector, i32 1
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_abs_half_const(<4 x float> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_abs_half_const:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: adrp x8, .LCPI26_0
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: ldr q0, [x8, :lo12:.LCPI26_0]
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr w0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_abs_half_const:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: adrp x8, .LCPI26_0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI26_0]
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: frintp v0.4s, v0.4s
+; CHECK-GI-NEXT: frintm v0.4s, v0.4s
+; CHECK-GI-NEXT: fabs v0.4s, v0.4s
+; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-GI-NEXT: abs v0.4s, v0.4s
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr w0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %ceil = call <4 x float> @llvm.ceil.v4float(<4 x float> <float 1.0, float 4.0, float 3.0, float 2.0>)
+ %floor = call <4 x float> @llvm.floor.v4float(<4 x float> %ceil)
+ %fabs = call <4 x float> @llvm.fabs.v4float(<4 x float> %floor)
+ %abs = fptosi <4 x float> %fabs to <4 x i32>
+ %vector = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %abs, i1 0)
+ %d = extractelement <4 x i32> %vector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_vector_insert(<4 x i32> %a, <2 x i32> %b, i32 %c) {
+; CHECK-LABEL: extract_v4i32_vector_insert:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: mov x8, sp
+; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT: bfi x8, x0, #2, #2
+; CHECK-NEXT: mov v1.d[1], v0.d[0]
+; CHECK-NEXT: str q1, [sp]
+; CHECK-NEXT: ldr w0, [x8]
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: ret
+entry:
+ %vector = call <4 x i32> @llvm.vector.insert.v4i32.v2i32(<4 x i32> %a, <2 x i32> %b, i64 0)
+ %d = extractelement <4 x i32> %vector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_vector_insert_const(<4 x i32> %a, <2 x i32> %b, i32 %c) {
+; CHECK-LABEL: extract_v4i32_vector_insert_const:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: mov w0, v1.s[1]
+; CHECK-NEXT: ret
+entry:
+ %vector = call <4 x i32> @llvm.vector.insert.v4i32.v2i32(<4 x i32> %a, <2 x i32> %b, i64 0)
+ %d = extractelement <4 x i32> %vector, i32 1
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_vector_extract(<4 x i32> %a, <2 x i32> %b, i32 %c) {
+; CHECK-LABEL: extract_v4i32_vector_extract:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: mov x8, sp
+; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT: str q0, [sp]
+; CHECK-NEXT: bfi x8, x0, #2, #2
+; CHECK-NEXT: ldr w0, [x8]
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: ret
+entry:
+ %vector = call <4 x i32> @llvm.vector.extract.v2i32.v4i32(<4 x i32> %a, i64 0)
+ %d = extractelement <4 x i32> %vector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_vector_extract_const(<4 x i32> %a, <2 x i32> %b, i32 %c) {
+; CHECK-LABEL: extract_v4i32_vector_extract_const:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+entry:
+ %vector = call <4 x i32> @llvm.vector.extract.v2i32.v4i32(<4 x i32> %a, i64 0)
+ %d = extractelement <4 x i32> %vector, i32 0
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_load(<4 x i32> %a, <2 x i32> %b, i32 %c, ptr %arg) {
+; CHECK-SD-LABEL: extract_v4i32_load:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: and x8, x0, #0x3
+; CHECK-SD-NEXT: ldr w0, [x1, x8, lsl #2]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_load:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: ldr w0, [x1, x8, lsl #2]
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = load <4 x i32>, ptr %arg
+ %d = extractelement <4 x i32> %vector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_load_const(<4 x i32> %a, <2 x i32> %b, i32 %c, ptr %arg) {
+; CHECK-LABEL: extract_v4i32_load_const:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr w0, [x1]
+; CHECK-NEXT: ret
+entry:
+ %vector = load <4 x i32>, ptr %arg
+ %d = extractelement <4 x i32> %vector, i32 0
+ ret i32 %d
+}
+
+define double @extract_v4i32_bitcast(<4 x i32> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_bitcast:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: bfi x8, x0, #3, #1
+; CHECK-SD-NEXT: ldr d0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_bitcast:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: mov w9, w0
+; CHECK-GI-NEXT: mov x8, sp
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: and x9, x9, #0x1
+; CHECK-GI-NEXT: ldr d0, [x8, x9, lsl #3]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = bitcast <4 x i32> %a to <2 x double>
+ %d = extractelement <2 x double> %vector, i32 %c
+ ret double %d
+}
+
+define double @extract_v4i32_bitcast_const(<4 x i32> %a, i32 %c) {
+; CHECK-LABEL: extract_v4i32_bitcast_const:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+entry:
+ %vector = bitcast <4 x i32> %a to <2 x double>
+ %d = extractelement <2 x double> %vector, i32 0
+ ret double %d
+}
+
+define i32 @extract_v4i32_shuffle(<4 x i32> %a, <4 x i32> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_shuffle:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: uzp1 v1.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: mov v1.s[3], v0.s[3]
+; CHECK-SD-NEXT: str q1, [sp]
+; CHECK-SD-NEXT: ldr w0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_shuffle:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: adrp x8, .LCPI35_0
+; CHECK-GI-NEXT: // kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI35_0]
+; CHECK-GI-NEXT: // kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: tbl v0.16b, { v0.16b, v1.16b }, v2.16b
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr w0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 3>
+ %d = extractelement <4 x i32> %vector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_shuffle_const(<4 x i32> %a, <4 x i32> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_shuffle_const:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov w0, s1
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_shuffle_const:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: adrp x8, .LCPI36_0
+; CHECK-GI-NEXT: // kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
+; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI36_0]
+; CHECK-GI-NEXT: // kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
+; CHECK-GI-NEXT: tbl v0.16b, { v0.16b, v1.16b }, v2.16b
+; CHECK-GI-NEXT: mov s0, v0.s[2]
+; CHECK-GI-NEXT: fmov w0, s0
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 3>
+ %d = extractelement <4 x i32> %vector, i32 2
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_splat(<4 x i32> %a, <2 x i32> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_splat:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: movi v0.4s, #11
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr w0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_splat:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: movi v0.4s, #11
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr w0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %d = extractelement <4 x i32> splat (i32 11), i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_splat_const(<4 x i32> %a, <2 x i32> %b, i32 %c) {
+; CHECK-LABEL: extract_v4i32_splat_const:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, #11 // =0xb
+; CHECK-NEXT: ret
+entry:
+ %d = extractelement <4 x i32> splat (i32 11), i32 0
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_vp_add(<4 x i32> %a, <4 x i32> %b, i32 %c, <4 x i1> %mask, i32 %evl) {
+; CHECK-SD-LABEL: extract_v4i32_vp_add:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: add v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr w0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_vp_add:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr w0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> %mask, i32 %evl)
+ %d = extractelement <4 x i32> %vector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_vp_add_const(<4 x i32> %a, <4 x i32> %b, i32 %c, <4 x i1> %mask, i32 %evl) {
+; CHECK-SD-LABEL: extract_v4i32_vp_add_const:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: add v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: mov w0, v0.s[3]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_vp_add_const:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: mov s0, v0.s[3]
+; CHECK-GI-NEXT: fmov w0, s0
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> %mask, i32 %evl)
+ %d = extractelement <4 x i32> %vector, i32 3
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_phi(i64 %val, i32 %limit, ptr %ptr) {
+; CHECK-SD-LABEL: extract_v4i32_phi:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: dup v1.2s, w0
+; CHECK-SD-NEXT: adrp x8, .LCPI41_0
+; CHECK-SD-NEXT: movi v0.2s, #16
+; CHECK-SD-NEXT: ldr d2, [x8, :lo12:.LCPI41_0]
+; CHECK-SD-NEXT: add v1.2s, v1.2s, v2.2s
+; CHECK-SD-NEXT: .LBB41_1: // %loop
+; CHECK-SD-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-SD-NEXT: fmov w8, s1
+; CHECK-SD-NEXT: add v1.2s, v1.2s, v0.2s
+; CHECK-SD-NEXT: cmp w8, w1
+; CHECK-SD-NEXT: add w0, w8, #10
+; CHECK-SD-NEXT: str w0, [x2, w8, sxtw #2]
+; CHECK-SD-NEXT: b.lo .LBB41_1
+; CHECK-SD-NEXT: // %bb.2: // %ret
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_phi:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: adrp x8, .LCPI41_0
+; CHECK-GI-NEXT: dup v0.2d, x0
+; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI41_0]
+; CHECK-GI-NEXT: add v1.2d, v0.2d, v1.2d
+; CHECK-GI-NEXT: movi v0.2s, #16
+; CHECK-GI-NEXT: xtn v1.2s, v1.2d
+; CHECK-GI-NEXT: .LBB41_1: // %loop
+; CHECK-GI-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: fmov w9, s1
+; CHECK-GI-NEXT: add v1.2s, v1.2s, v0.2s
+; CHECK-GI-NEXT: cmp w8, w1
+; CHECK-GI-NEXT: add w0, w9, #10
+; CHECK-GI-NEXT: str w0, [x2, w8, sxtw #2]
+; CHECK-GI-NEXT: b.lo .LBB41_1
+; CHECK-GI-NEXT: // %bb.2: // %ret
+; CHECK-GI-NEXT: ret
+entry:
+ %tempvector = insertelement <2 x i64> undef, i64 %val, i32 0
+ %vector = shufflevector <2 x i64> %tempvector, <2 x i64> undef, <2 x i32> zeroinitializer
+ %0 = add <2 x i64> %vector, <i64 1, i64 2>
+ %1 = trunc <2 x i64> %0 to <2 x i32>
+ br label %loop
+
+loop:
+ %2 = phi <2 x i32> [ %1, %entry ], [ %inc, %loop ]
+ %elt = extractelement <2 x i32> %2, i32 0
+ %end = icmp ult i32 %elt, %limit
+ %3 = add i32 10, %elt
+ %4 = sext i32 %elt to i64
+ %5 = getelementptr i32, ptr %ptr, i64 %4
+ store i32 %3, ptr %5
+ %inc = add <2 x i32> %2, <i32 16, i32 16>
+ br i1 %end, label %loop, label %ret
+
+ret:
+ ret i32 %3
+}
+
+
diff --git a/llvm/test/CodeGen/AArch64/extractvector-oob-load.mir b/llvm/test/CodeGen/AArch64/extractvector-oob-load.mir
index e8c5819..e7e8c93 100644
--- a/llvm/test/CodeGen/AArch64/extractvector-oob-load.mir
+++ b/llvm/test/CodeGen/AArch64/extractvector-oob-load.mir
@@ -22,11 +22,8 @@ body: |
; CHECK-LABEL: name: f
; CHECK: liveins: $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64))
- ; CHECK-NEXT: $x0 = COPY [[LOAD]](s64)
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $x0 = COPY [[DEF]](s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%0:_(p0) = COPY $x0
%3:_(s64) = G_CONSTANT i64 224567957
diff --git a/llvm/test/CodeGen/AArch64/fcmp.ll b/llvm/test/CodeGen/AArch64/fcmp.ll
index 2d0b557..9916aee 100644
--- a/llvm/test/CodeGen/AArch64/fcmp.ll
+++ b/llvm/test/CodeGen/AArch64/fcmp.ll
@@ -1108,61 +1108,54 @@ define <7 x i32> @v7f16_i32(<7 x half> %a, <7 x half> %b, <7 x i32> %d, <7 x i32
;
; CHECK-GI-FP16-LABEL: v7f16_i32:
; CHECK-GI-FP16: // %bb.0: // %entry
-; CHECK-GI-FP16-NEXT: fcmgt v1.8h, v1.8h, v0.8h
-; CHECK-GI-FP16-NEXT: mov w12, #31 // =0x1f
-; CHECK-GI-FP16-NEXT: ldr s4, [sp]
-; CHECK-GI-FP16-NEXT: fmov s2, w12
+; CHECK-GI-FP16-NEXT: fcmgt v0.8h, v1.8h, v0.8h
+; CHECK-GI-FP16-NEXT: mov w10, #31 // =0x1f
+; CHECK-GI-FP16-NEXT: ldr s3, [sp]
+; CHECK-GI-FP16-NEXT: fmov s1, w10
; CHECK-GI-FP16-NEXT: fmov s6, w0
-; CHECK-GI-FP16-NEXT: ldr s5, [sp, #8]
+; CHECK-GI-FP16-NEXT: ldr s4, [sp, #8]
; CHECK-GI-FP16-NEXT: ldr s7, [sp, #24]
; CHECK-GI-FP16-NEXT: ldr s16, [sp, #32]
-; CHECK-GI-FP16-NEXT: umov w9, v1.h[4]
-; CHECK-GI-FP16-NEXT: umov w8, v1.h[0]
-; CHECK-GI-FP16-NEXT: umov w11, v1.h[5]
-; CHECK-GI-FP16-NEXT: umov w10, v1.h[1]
-; CHECK-GI-FP16-NEXT: mov v2.s[1], w12
-; CHECK-GI-FP16-NEXT: umov w13, v1.h[2]
+; CHECK-GI-FP16-NEXT: umov w8, v0.h[4]
+; CHECK-GI-FP16-NEXT: umov w9, v0.h[5]
+; CHECK-GI-FP16-NEXT: mov v1.s[1], w10
; CHECK-GI-FP16-NEXT: mov v6.s[1], w1
; CHECK-GI-FP16-NEXT: mov v7.s[1], v16.s[0]
; CHECK-GI-FP16-NEXT: ldr s16, [sp, #40]
-; CHECK-GI-FP16-NEXT: fmov s3, w9
-; CHECK-GI-FP16-NEXT: fmov s0, w8
-; CHECK-GI-FP16-NEXT: umov w8, v1.h[6]
-; CHECK-GI-FP16-NEXT: mov v2.s[2], w12
-; CHECK-GI-FP16-NEXT: umov w9, v1.h[3]
+; CHECK-GI-FP16-NEXT: fmov s2, w8
+; CHECK-GI-FP16-NEXT: umov w8, v0.h[6]
+; CHECK-GI-FP16-NEXT: mov v1.s[2], w10
+; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: mov v6.s[2], w2
; CHECK-GI-FP16-NEXT: mov v7.s[2], v16.s[0]
-; CHECK-GI-FP16-NEXT: mov v3.s[1], w11
-; CHECK-GI-FP16-NEXT: mov v0.s[1], w10
-; CHECK-GI-FP16-NEXT: mov w10, #-1 // =0xffffffff
-; CHECK-GI-FP16-NEXT: fmov s1, w10
-; CHECK-GI-FP16-NEXT: neg v17.4s, v2.4s
+; CHECK-GI-FP16-NEXT: mov v2.s[1], w9
+; CHECK-GI-FP16-NEXT: mov w9, #-1 // =0xffffffff
+; CHECK-GI-FP16-NEXT: fmov s5, w9
+; CHECK-GI-FP16-NEXT: neg v17.4s, v1.4s
+; CHECK-GI-FP16-NEXT: shl v0.4s, v0.4s, #31
; CHECK-GI-FP16-NEXT: mov v6.s[3], w3
+; CHECK-GI-FP16-NEXT: mov v2.s[2], w8
+; CHECK-GI-FP16-NEXT: fmov w8, s3
+; CHECK-GI-FP16-NEXT: fmov s3, w7
+; CHECK-GI-FP16-NEXT: mov v5.s[1], w9
+; CHECK-GI-FP16-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-FP16-NEXT: mov v3.s[1], w8
+; CHECK-GI-FP16-NEXT: fmov w8, s4
+; CHECK-GI-FP16-NEXT: ldr s4, [sp, #16]
+; CHECK-GI-FP16-NEXT: ushl v1.4s, v2.4s, v1.4s
+; CHECK-GI-FP16-NEXT: fmov s2, w4
+; CHECK-GI-FP16-NEXT: mov v5.s[2], w9
+; CHECK-GI-FP16-NEXT: mov v2.s[1], w5
; CHECK-GI-FP16-NEXT: mov v3.s[2], w8
+; CHECK-GI-FP16-NEXT: sshl v1.4s, v1.4s, v17.4s
; CHECK-GI-FP16-NEXT: fmov w8, s4
-; CHECK-GI-FP16-NEXT: fmov s4, w7
-; CHECK-GI-FP16-NEXT: mov v0.s[2], w13
-; CHECK-GI-FP16-NEXT: mov v1.s[1], w10
-; CHECK-GI-FP16-NEXT: mov v4.s[1], w8
-; CHECK-GI-FP16-NEXT: fmov w8, s5
-; CHECK-GI-FP16-NEXT: ldr s5, [sp, #16]
-; CHECK-GI-FP16-NEXT: ushl v2.4s, v3.4s, v2.4s
-; CHECK-GI-FP16-NEXT: fmov s3, w4
-; CHECK-GI-FP16-NEXT: mov v0.s[3], w9
-; CHECK-GI-FP16-NEXT: mov v1.s[2], w10
-; CHECK-GI-FP16-NEXT: mov v3.s[1], w5
-; CHECK-GI-FP16-NEXT: mov v4.s[2], w8
-; CHECK-GI-FP16-NEXT: sshl v2.4s, v2.4s, v17.4s
-; CHECK-GI-FP16-NEXT: fmov w8, s5
-; CHECK-GI-FP16-NEXT: shl v0.4s, v0.4s, #31
-; CHECK-GI-FP16-NEXT: eor v1.16b, v2.16b, v1.16b
-; CHECK-GI-FP16-NEXT: mov v3.s[2], w6
-; CHECK-GI-FP16-NEXT: mov v4.s[3], w8
-; CHECK-GI-FP16-NEXT: sshr v0.4s, v0.4s, #31
-; CHECK-GI-FP16-NEXT: and v1.16b, v7.16b, v1.16b
-; CHECK-GI-FP16-NEXT: and v2.16b, v3.16b, v2.16b
-; CHECK-GI-FP16-NEXT: bsl v0.16b, v6.16b, v4.16b
-; CHECK-GI-FP16-NEXT: orr v1.16b, v2.16b, v1.16b
+; CHECK-GI-FP16-NEXT: eor v4.16b, v1.16b, v5.16b
+; CHECK-GI-FP16-NEXT: mov v2.s[2], w6
+; CHECK-GI-FP16-NEXT: mov v3.s[3], w8
+; CHECK-GI-FP16-NEXT: and v1.16b, v2.16b, v1.16b
+; CHECK-GI-FP16-NEXT: and v2.16b, v7.16b, v4.16b
+; CHECK-GI-FP16-NEXT: bsl v0.16b, v6.16b, v3.16b
+; CHECK-GI-FP16-NEXT: orr v1.16b, v1.16b, v2.16b
; CHECK-GI-FP16-NEXT: mov s2, v0.s[1]
; CHECK-GI-FP16-NEXT: mov s3, v0.s[2]
; CHECK-GI-FP16-NEXT: mov s4, v0.s[3]
diff --git a/llvm/test/CodeGen/AArch64/fexplog.ll b/llvm/test/CodeGen/AArch64/fexplog.ll
index 519a297..93d3d96 100644
--- a/llvm/test/CodeGen/AArch64/fexplog.ll
+++ b/llvm/test/CodeGen/AArch64/fexplog.ll
@@ -36,6 +36,19 @@ entry:
ret half %c
}
+define <1 x double> @exp_v1f64(<1 x double> %x) {
+; CHECK-LABEL: exp_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl exp
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = call <1 x double> @llvm.exp.v1f64(<1 x double> %x)
+ ret <1 x double> %c
+}
+
define <2 x double> @exp_v2f64(<2 x double> %a) {
; CHECK-SD-LABEL: exp_v2f64:
; CHECK-SD: // %bb.0: // %entry
@@ -1293,6 +1306,19 @@ entry:
ret half %c
}
+define <1 x double> @exp2_v1f64(<1 x double> %x) {
+; CHECK-LABEL: exp2_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl exp2
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = call <1 x double> @llvm.exp2.v1f64(<1 x double> %x)
+ ret <1 x double> %c
+}
+
define <2 x double> @exp2_v2f64(<2 x double> %a) {
; CHECK-SD-LABEL: exp2_v2f64:
; CHECK-SD: // %bb.0: // %entry
@@ -2550,6 +2576,19 @@ entry:
ret half %c
}
+define <1 x double> @log_v1f64(<1 x double> %x) {
+; CHECK-LABEL: log_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl log
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = call <1 x double> @llvm.log.v1f64(<1 x double> %x)
+ ret <1 x double> %c
+}
+
define <2 x double> @log_v2f64(<2 x double> %a) {
; CHECK-SD-LABEL: log_v2f64:
; CHECK-SD: // %bb.0: // %entry
@@ -3807,6 +3846,19 @@ entry:
ret half %c
}
+define <1 x double> @log2_v1f64(<1 x double> %x) {
+; CHECK-LABEL: log2_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl log2
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = call <1 x double> @llvm.log2.v1f64(<1 x double> %x)
+ ret <1 x double> %c
+}
+
define <2 x double> @log2_v2f64(<2 x double> %a) {
; CHECK-SD-LABEL: log2_v2f64:
; CHECK-SD: // %bb.0: // %entry
@@ -5064,6 +5116,19 @@ entry:
ret half %c
}
+define <1 x double> @log10_v1f64(<1 x double> %x) {
+; CHECK-LABEL: log10_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl log10
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = call <1 x double> @llvm.log10.v1f64(<1 x double> %x)
+ ret <1 x double> %c
+}
+
define <2 x double> @log10_v2f64(<2 x double> %a) {
; CHECK-SD-LABEL: log10_v2f64:
; CHECK-SD: // %bb.0: // %entry
diff --git a/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll b/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll
index 1b1cfea..2ad5623 100644
--- a/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll
+++ b/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll
@@ -1,29 +1,50 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define {<2 x half>, <2 x half>} @vector_deinterleave_v2f16_v4f16(<4 x half> %vec) {
-; CHECK-LABEL: vector_deinterleave_v2f16_v4f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: dup v2.2s, v0.s[1]
-; CHECK-NEXT: mov v1.16b, v2.16b
-; CHECK-NEXT: mov v1.h[0], v0.h[1]
-; CHECK-NEXT: mov v0.h[1], v2.h[0]
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vector_deinterleave_v2f16_v4f16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: dup v2.2s, v0.s[1]
+; CHECK-SD-NEXT: mov v1.16b, v2.16b
+; CHECK-SD-NEXT: mov v1.h[0], v0.h[1]
+; CHECK-SD-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vector_deinterleave_v2f16_v4f16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: uzp1 v2.4h, v0.4h, v0.4h
+; CHECK-GI-NEXT: uzp2 v1.4h, v0.4h, v0.4h
+; CHECK-GI-NEXT: mov h0, v2.h[1]
+; CHECK-GI-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NEXT: mov v2.h[1], v0.h[0]
+; CHECK-GI-NEXT: mov v1.h[1], v3.h[0]
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-GI-NEXT: fmov d0, d2
+; CHECK-GI-NEXT: ret
%retval = call {<2 x half>, <2 x half>} @llvm.experimental.vector.deinterleave2.v4f16(<4 x half> %vec)
ret {<2 x half>, <2 x half>} %retval
}
define {<4 x half>, <4 x half>} @vector_deinterleave_v4f16_v8f16(<8 x half> %vec) {
-; CHECK-LABEL: vector_deinterleave_v4f16_v8f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT: uzp1 v2.4h, v0.4h, v1.4h
-; CHECK-NEXT: uzp2 v1.4h, v0.4h, v1.4h
-; CHECK-NEXT: fmov d0, d2
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vector_deinterleave_v4f16_v8f16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT: uzp1 v2.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: uzp2 v1.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: fmov d0, d2
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vector_deinterleave_v4f16_v8f16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: uzp1 v2.8h, v0.8h, v0.8h
+; CHECK-GI-NEXT: uzp2 v1.8h, v0.8h, v0.8h
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-GI-NEXT: fmov d0, d2
+; CHECK-GI-NEXT: ret
%retval = call {<4 x half>, <4 x half>} @llvm.experimental.vector.deinterleave2.v8f16(<8 x half> %vec)
ret {<4 x half>, <4 x half>} %retval
}
@@ -40,13 +61,21 @@ define {<8 x half>, <8 x half>} @vector_deinterleave_v8f16_v16f16(<16 x half> %v
}
define {<2 x float>, <2 x float>} @vector_deinterleave_v2f32_v4f32(<4 x float> %vec) {
-; CHECK-LABEL: vector_deinterleave_v2f32_v4f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT: zip1 v2.2s, v0.2s, v1.2s
-; CHECK-NEXT: zip2 v1.2s, v0.2s, v1.2s
-; CHECK-NEXT: fmov d0, d2
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vector_deinterleave_v2f32_v4f32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT: zip1 v2.2s, v0.2s, v1.2s
+; CHECK-SD-NEXT: zip2 v1.2s, v0.2s, v1.2s
+; CHECK-SD-NEXT: fmov d0, d2
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vector_deinterleave_v2f32_v4f32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: uzp1 v2.4s, v0.4s, v0.4s
+; CHECK-GI-NEXT: uzp2 v1.4s, v0.4s, v0.4s
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-GI-NEXT: fmov d0, d2
+; CHECK-GI-NEXT: ret
%retval = call {<2 x float>, <2 x float>} @llvm.experimental.vector.deinterleave2.v4f32(<4 x float> %vec)
ret {<2 x float>, <2 x float>} %retval
}
diff --git a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
index 071c1ff..eb81aff 100644
--- a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
+++ b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define <4 x half> @interleave2_v4f16(<2 x half> %vec0, <2 x half> %vec1) {
; CHECK-LABEL: interleave2_v4f16:
@@ -11,15 +12,22 @@ define <4 x half> @interleave2_v4f16(<2 x half> %vec0, <2 x half> %vec1) {
}
define <8 x half> @interleave2_v8f16(<4 x half> %vec0, <4 x half> %vec1) {
-; CHECK-LABEL: interleave2_v8f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: adrp x8, .LCPI1_0
-; CHECK-NEXT: mov v0.d[1], v1.d[0]
-; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI1_0]
-; CHECK-NEXT: tbl v0.16b, { v0.16b }, v1.16b
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: interleave2_v8f16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT: adrp x8, .LCPI1_0
+; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI1_0]
+; CHECK-SD-NEXT: tbl v0.16b, { v0.16b }, v1.16b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: interleave2_v8f16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: zip1 v0.8h, v0.8h, v1.8h
+; CHECK-GI-NEXT: ret
%retval = call <8 x half> @llvm.experimental.vector.interleave2.v8f16(<4 x half> %vec0, <4 x half> %vec1)
ret <8 x half> %retval
}
@@ -36,14 +44,21 @@ define <16 x half> @interleave2_v16f16(<8 x half> %vec0, <8 x half> %vec1) {
}
define <4 x float> @interleave2_v4f32(<2 x float> %vec0, <2 x float> %vec1) {
-; CHECK-LABEL: interleave2_v4f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: mov v0.d[1], v1.d[0]
-; CHECK-NEXT: rev64 v1.4s, v0.4s
-; CHECK-NEXT: uzp1 v0.4s, v0.4s, v1.4s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: interleave2_v4f32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: rev64 v1.4s, v0.4s
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: interleave2_v4f32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: zip1 v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: ret
%retval = call <4 x float> @llvm.experimental.vector.interleave2.v4f32(<2 x float> %vec0, <2 x float> %vec1)
ret <4 x float> %retval
}
diff --git a/llvm/test/CodeGen/AArch64/fold-global-offsets.ll b/llvm/test/CodeGen/AArch64/fold-global-offsets.ll
index 897d35a..8de0f0d 100644
--- a/llvm/test/CodeGen/AArch64/fold-global-offsets.ll
+++ b/llvm/test/CodeGen/AArch64/fold-global-offsets.ll
@@ -131,7 +131,7 @@ define i32 @f7() {
; GISEL-NEXT: ret
entry:
- %lshr = lshr i128 bitcast (<2 x i64> <i64 undef, i64 ptrtoint (ptr getelementptr inbounds ({ [9 x ptr], [8 x ptr] }, ptr @x3, i64 0, inrange i32 1, i64 2) to i64)> to i128), 64
+ %lshr = lshr i128 bitcast (<2 x i64> <i64 undef, i64 ptrtoint (ptr getelementptr inbounds ({ [9 x ptr], [8 x ptr] }, ptr @x3, i64 0, i32 1, i64 2) to i64)> to i128), 64
%trunc = trunc i128 %lshr to i64
%inttoptr = inttoptr i64 %trunc to ptr
%gep = getelementptr i32, ptr %inttoptr, i64 5
diff --git a/llvm/test/CodeGen/AArch64/fp-conversion-to-tbl.ll b/llvm/test/CodeGen/AArch64/fp-conversion-to-tbl.ll
index 1ea87bb..0a3b9a0 100644
--- a/llvm/test/CodeGen/AArch64/fp-conversion-to-tbl.ll
+++ b/llvm/test/CodeGen/AArch64/fp-conversion-to-tbl.ll
@@ -73,9 +73,8 @@ define void @fptoui_v8f32_to_v8i8_no_loop(ptr %A, ptr %dst) {
; CHECK-NEXT: ldp q0, q1, [x0]
; CHECK-NEXT: fcvtzs.4s v1, v1
; CHECK-NEXT: fcvtzs.4s v0, v0
-; CHECK-NEXT: xtn.4h v1, v1
-; CHECK-NEXT: xtn.4h v0, v0
-; CHECK-NEXT: uzp1.8b v0, v0, v1
+; CHECK-NEXT: uzp1.8h v0, v0, v1
+; CHECK-NEXT: xtn.8b v0, v0
; CHECK-NEXT: str d0, [x1]
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
index f80a8df..685efbb 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
@@ -1477,6 +1477,61 @@ define fp128 @fpext_f128_f64(double %x) #0 {
ret fp128 %val
}
+; CHECK-LABEL: sin_v1f64:
+; CHECK: bl sin
+define <1 x double> @sin_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+ %val = call <1 x double> @llvm.experimental.constrained.sin.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+; CHECK-LABEL: cos_v1f64:
+; CHECK: bl cos
+define <1 x double> @cos_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+ %val = call <1 x double> @llvm.experimental.constrained.cos.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+; CHECK-LABEL: pow_v1f64:
+; CHECK: bl pow
+define <1 x double> @pow_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+ %val = call <1 x double> @llvm.experimental.constrained.pow.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+; CHECK-LABEL: log_v1f64:
+; CHECK: bl log
+define <1 x double> @log_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+ %val = call <1 x double> @llvm.experimental.constrained.log.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+; CHECK-LABEL: log2_v1f64:
+; CHECK: bl log2
+define <1 x double> @log2_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+ %val = call <1 x double> @llvm.experimental.constrained.log2.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+; CHECK-LABEL: log10_v1f64:
+; CHECK: bl log10
+define <1 x double> @log10_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+ %val = call <1 x double> @llvm.experimental.constrained.log10.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+; CHECK-LABEL: exp_v1f64:
+; CHECK: bl exp
+define <1 x double> @exp_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+ %val = call <1 x double> @llvm.experimental.constrained.exp.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+; CHECK-LABEL: exp2_v1f64:
+; CHECK: bl exp2
+define <1 x double> @exp2_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+ %val = call <1 x double> @llvm.experimental.constrained.exp2.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/AArch64/fpow.ll b/llvm/test/CodeGen/AArch64/fpow.ll
index c2ad1aa..8d40121 100644
--- a/llvm/test/CodeGen/AArch64/fpow.ll
+++ b/llvm/test/CodeGen/AArch64/fpow.ll
@@ -37,6 +37,21 @@ entry:
ret half %c
}
+define <1 x double> @pow_v1f64(<1 x double> %x) {
+; CHECK-LABEL: pow_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: adrp x8, .LCPI3_0
+; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI3_0]
+; CHECK-NEXT: bl pow
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = call <1 x double> @llvm.pow.v1f64(<1 x double> %x, <1 x double> <double 3.140000e+00>)
+ ret <1 x double> %c
+}
+
define <2 x double> @pow_v2f64(<2 x double> %a, <2 x double> %b) {
; CHECK-SD-LABEL: pow_v2f64:
; CHECK-SD: // %bb.0: // %entry
diff --git a/llvm/test/CodeGen/AArch64/fptoi.ll b/llvm/test/CodeGen/AArch64/fptoi.ll
index 67190e8..01585d0 100644
--- a/llvm/test/CodeGen/AArch64/fptoi.ll
+++ b/llvm/test/CodeGen/AArch64/fptoi.ll
@@ -1,13 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
-; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
-
-; CHECK-GI-FP16: warning: Instruction selection used fallback path for fptos_v2f16_v2i16
-; CHECK-GI-FP16-NEXT: warning: Instruction selection used fallback path for fptou_v2f16_v2i16
-; CHECK-GI-FP16-NEXT: warning: Instruction selection used fallback path for fptos_v2f16_v2i8
-; CHECK-GI-FP16-NEXT: warning: Instruction selection used fallback path for fptou_v2f16_v2i8
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
define i64 @fptos_f64_i64(double %a) {
; CHECK-LABEL: fptos_f64_i64:
@@ -1096,30 +1091,17 @@ entry:
}
define <3 x i16> @fptos_v3f64_v3i16(<3 x double> %a) {
-; CHECK-SD-LABEL: fptos_v3f64_v3i16:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
-; CHECK-SD-NEXT: fcvtzs v1.2d, v2.2d
-; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: fptos_v3f64_v3i16:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
-; CHECK-GI-NEXT: fcvtzs v1.2d, v2.2d
-; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-GI-NEXT: uzp1 v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: xtn v0.4h, v0.4s
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: fptos_v3f64_v3i16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-NEXT: fcvtzs v1.2d, v2.2d
+; CHECK-NEXT: fcvtzs v0.2d, v0.2d
+; CHECK-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: xtn v0.4h, v0.4s
+; CHECK-NEXT: ret
entry:
%c = fptosi <3 x double> %a to <3 x i16>
ret <3 x i16> %c
@@ -1134,9 +1116,8 @@ define <3 x i16> @fptou_v3f64_v3i16(<3 x double> %a) {
; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
; CHECK-SD-NEXT: fcvtzs v1.2d, v2.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptou_v3f64_v3i16:
@@ -1160,9 +1141,8 @@ define <4 x i16> @fptos_v4f64_v4i16(<4 x double> %a) {
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptos_v4f64_v4i16:
@@ -1182,9 +1162,8 @@ define <4 x i16> @fptou_v4f64_v4i16(<4 x double> %a) {
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptou_v4f64_v4i16:
@@ -1600,9 +1579,8 @@ define <3 x i8> @fptos_v3f64_v3i8(<3 x double> %a) {
; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
; CHECK-SD-NEXT: fcvtzs v1.2d, v2.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
; CHECK-SD-NEXT: umov w0, v0.h[0]
; CHECK-SD-NEXT: umov w1, v0.h[1]
; CHECK-SD-NEXT: umov w2, v0.h[2]
@@ -1638,9 +1616,8 @@ define <3 x i8> @fptou_v3f64_v3i8(<3 x double> %a) {
; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
; CHECK-SD-NEXT: fcvtzs v1.2d, v2.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
; CHECK-SD-NEXT: umov w0, v0.h[0]
; CHECK-SD-NEXT: umov w1, v0.h[1]
; CHECK-SD-NEXT: umov w2, v0.h[2]
@@ -1672,9 +1649,8 @@ define <4 x i8> @fptos_v4f64_v4i8(<4 x double> %a) {
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptos_v4f64_v4i8:
@@ -1694,9 +1670,8 @@ define <4 x i8> @fptou_v4f64_v4i8(<4 x double> %a) {
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptou_v4f64_v4i8:
@@ -1718,13 +1693,10 @@ define <8 x i8> @fptos_v8f64_v8i8(<8 x double> %a) {
; CHECK-SD-NEXT: fcvtzs v2.2d, v2.2d
; CHECK-SD-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v3.2s, v3.2d
-; CHECK-SD-NEXT: xtn v2.2s, v2.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v2.4h, v2.4h, v3.4h
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
-; CHECK-SD-NEXT: uzp1 v0.8b, v0.8b, v2.8b
+; CHECK-SD-NEXT: uzp1 v2.4s, v2.4s, v3.4s
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-SD-NEXT: xtn v0.8b, v0.8h
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptos_v8f64_v8i8:
@@ -1750,13 +1722,10 @@ define <8 x i8> @fptou_v8f64_v8i8(<8 x double> %a) {
; CHECK-SD-NEXT: fcvtzs v2.2d, v2.2d
; CHECK-SD-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v3.2s, v3.2d
-; CHECK-SD-NEXT: xtn v2.2s, v2.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v2.4h, v2.4h, v3.4h
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
-; CHECK-SD-NEXT: uzp1 v0.8b, v0.8b, v2.8b
+; CHECK-SD-NEXT: uzp1 v2.4s, v2.4s, v3.4s
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-SD-NEXT: xtn v0.8b, v0.8h
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptou_v8f64_v8i8:
@@ -1786,21 +1755,13 @@ define <16 x i8> @fptos_v16f64_v16i8(<16 x double> %a) {
; CHECK-SD-NEXT: fcvtzs v2.2d, v2.2d
; CHECK-SD-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v7.2s, v7.2d
-; CHECK-SD-NEXT: xtn v6.2s, v6.2d
-; CHECK-SD-NEXT: xtn v5.2s, v5.2d
-; CHECK-SD-NEXT: xtn v4.2s, v4.2d
-; CHECK-SD-NEXT: xtn v3.2s, v3.2d
-; CHECK-SD-NEXT: xtn v2.2s, v2.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v6.4h, v6.4h, v7.4h
-; CHECK-SD-NEXT: uzp1 v4.4h, v4.4h, v5.4h
-; CHECK-SD-NEXT: uzp1 v2.4h, v2.4h, v3.4h
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
-; CHECK-SD-NEXT: mov v4.d[1], v6.d[0]
-; CHECK-SD-NEXT: mov v0.d[1], v2.d[0]
-; CHECK-SD-NEXT: uzp1 v0.16b, v0.16b, v4.16b
+; CHECK-SD-NEXT: uzp1 v6.4s, v6.4s, v7.4s
+; CHECK-SD-NEXT: uzp1 v4.4s, v4.4s, v5.4s
+; CHECK-SD-NEXT: uzp1 v2.4s, v2.4s, v3.4s
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: uzp1 v1.8h, v4.8h, v6.8h
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-SD-NEXT: uzp1 v0.16b, v0.16b, v1.16b
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptos_v16f64_v16i8:
@@ -1837,21 +1798,13 @@ define <16 x i8> @fptou_v16f64_v16i8(<16 x double> %a) {
; CHECK-SD-NEXT: fcvtzs v2.2d, v2.2d
; CHECK-SD-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v7.2s, v7.2d
-; CHECK-SD-NEXT: xtn v6.2s, v6.2d
-; CHECK-SD-NEXT: xtn v5.2s, v5.2d
-; CHECK-SD-NEXT: xtn v4.2s, v4.2d
-; CHECK-SD-NEXT: xtn v3.2s, v3.2d
-; CHECK-SD-NEXT: xtn v2.2s, v2.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v6.4h, v6.4h, v7.4h
-; CHECK-SD-NEXT: uzp1 v4.4h, v4.4h, v5.4h
-; CHECK-SD-NEXT: uzp1 v2.4h, v2.4h, v3.4h
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
-; CHECK-SD-NEXT: mov v4.d[1], v6.d[0]
-; CHECK-SD-NEXT: mov v0.d[1], v2.d[0]
-; CHECK-SD-NEXT: uzp1 v0.16b, v0.16b, v4.16b
+; CHECK-SD-NEXT: uzp1 v6.4s, v6.4s, v7.4s
+; CHECK-SD-NEXT: uzp1 v4.4s, v4.4s, v5.4s
+; CHECK-SD-NEXT: uzp1 v2.4s, v2.4s, v3.4s
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: uzp1 v1.8h, v4.8h, v6.8h
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-SD-NEXT: uzp1 v0.16b, v0.16b, v1.16b
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptou_v16f64_v16i8:
@@ -1900,36 +1853,20 @@ define <32 x i8> @fptos_v32f64_v32i8(<32 x double> %a) {
; CHECK-SD-NEXT: fcvtzs v18.2d, v18.2d
; CHECK-SD-NEXT: fcvtzs v17.2d, v17.2d
; CHECK-SD-NEXT: fcvtzs v16.2d, v16.2d
-; CHECK-SD-NEXT: xtn v7.2s, v7.2d
-; CHECK-SD-NEXT: xtn v6.2s, v6.2d
-; CHECK-SD-NEXT: xtn v5.2s, v5.2d
-; CHECK-SD-NEXT: xtn v4.2s, v4.2d
-; CHECK-SD-NEXT: xtn v3.2s, v3.2d
-; CHECK-SD-NEXT: xtn v2.2s, v2.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: xtn v23.2s, v23.2d
-; CHECK-SD-NEXT: xtn v22.2s, v22.2d
-; CHECK-SD-NEXT: xtn v21.2s, v21.2d
-; CHECK-SD-NEXT: xtn v20.2s, v20.2d
-; CHECK-SD-NEXT: xtn v19.2s, v19.2d
-; CHECK-SD-NEXT: xtn v18.2s, v18.2d
-; CHECK-SD-NEXT: xtn v17.2s, v17.2d
-; CHECK-SD-NEXT: xtn v16.2s, v16.2d
-; CHECK-SD-NEXT: uzp1 v6.4h, v6.4h, v7.4h
-; CHECK-SD-NEXT: uzp1 v4.4h, v4.4h, v5.4h
-; CHECK-SD-NEXT: uzp1 v2.4h, v2.4h, v3.4h
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
-; CHECK-SD-NEXT: uzp1 v1.4h, v22.4h, v23.4h
-; CHECK-SD-NEXT: uzp1 v3.4h, v20.4h, v21.4h
-; CHECK-SD-NEXT: uzp1 v5.4h, v18.4h, v19.4h
-; CHECK-SD-NEXT: uzp1 v7.4h, v16.4h, v17.4h
-; CHECK-SD-NEXT: mov v4.d[1], v6.d[0]
-; CHECK-SD-NEXT: mov v0.d[1], v2.d[0]
-; CHECK-SD-NEXT: mov v3.d[1], v1.d[0]
-; CHECK-SD-NEXT: mov v7.d[1], v5.d[0]
+; CHECK-SD-NEXT: uzp1 v6.4s, v6.4s, v7.4s
+; CHECK-SD-NEXT: uzp1 v4.4s, v4.4s, v5.4s
+; CHECK-SD-NEXT: uzp1 v2.4s, v2.4s, v3.4s
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: uzp1 v3.4s, v20.4s, v21.4s
+; CHECK-SD-NEXT: uzp1 v1.4s, v22.4s, v23.4s
+; CHECK-SD-NEXT: uzp1 v5.4s, v18.4s, v19.4s
+; CHECK-SD-NEXT: uzp1 v7.4s, v16.4s, v17.4s
+; CHECK-SD-NEXT: uzp1 v4.8h, v4.8h, v6.8h
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-SD-NEXT: uzp1 v1.8h, v3.8h, v1.8h
+; CHECK-SD-NEXT: uzp1 v2.8h, v7.8h, v5.8h
; CHECK-SD-NEXT: uzp1 v0.16b, v0.16b, v4.16b
-; CHECK-SD-NEXT: uzp1 v1.16b, v7.16b, v3.16b
+; CHECK-SD-NEXT: uzp1 v1.16b, v2.16b, v1.16b
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptos_v32f64_v32i8:
@@ -1997,36 +1934,20 @@ define <32 x i8> @fptou_v32f64_v32i8(<32 x double> %a) {
; CHECK-SD-NEXT: fcvtzs v18.2d, v18.2d
; CHECK-SD-NEXT: fcvtzs v17.2d, v17.2d
; CHECK-SD-NEXT: fcvtzs v16.2d, v16.2d
-; CHECK-SD-NEXT: xtn v7.2s, v7.2d
-; CHECK-SD-NEXT: xtn v6.2s, v6.2d
-; CHECK-SD-NEXT: xtn v5.2s, v5.2d
-; CHECK-SD-NEXT: xtn v4.2s, v4.2d
-; CHECK-SD-NEXT: xtn v3.2s, v3.2d
-; CHECK-SD-NEXT: xtn v2.2s, v2.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: xtn v23.2s, v23.2d
-; CHECK-SD-NEXT: xtn v22.2s, v22.2d
-; CHECK-SD-NEXT: xtn v21.2s, v21.2d
-; CHECK-SD-NEXT: xtn v20.2s, v20.2d
-; CHECK-SD-NEXT: xtn v19.2s, v19.2d
-; CHECK-SD-NEXT: xtn v18.2s, v18.2d
-; CHECK-SD-NEXT: xtn v17.2s, v17.2d
-; CHECK-SD-NEXT: xtn v16.2s, v16.2d
-; CHECK-SD-NEXT: uzp1 v6.4h, v6.4h, v7.4h
-; CHECK-SD-NEXT: uzp1 v4.4h, v4.4h, v5.4h
-; CHECK-SD-NEXT: uzp1 v2.4h, v2.4h, v3.4h
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
-; CHECK-SD-NEXT: uzp1 v1.4h, v22.4h, v23.4h
-; CHECK-SD-NEXT: uzp1 v3.4h, v20.4h, v21.4h
-; CHECK-SD-NEXT: uzp1 v5.4h, v18.4h, v19.4h
-; CHECK-SD-NEXT: uzp1 v7.4h, v16.4h, v17.4h
-; CHECK-SD-NEXT: mov v4.d[1], v6.d[0]
-; CHECK-SD-NEXT: mov v0.d[1], v2.d[0]
-; CHECK-SD-NEXT: mov v3.d[1], v1.d[0]
-; CHECK-SD-NEXT: mov v7.d[1], v5.d[0]
+; CHECK-SD-NEXT: uzp1 v6.4s, v6.4s, v7.4s
+; CHECK-SD-NEXT: uzp1 v4.4s, v4.4s, v5.4s
+; CHECK-SD-NEXT: uzp1 v2.4s, v2.4s, v3.4s
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: uzp1 v3.4s, v20.4s, v21.4s
+; CHECK-SD-NEXT: uzp1 v1.4s, v22.4s, v23.4s
+; CHECK-SD-NEXT: uzp1 v5.4s, v18.4s, v19.4s
+; CHECK-SD-NEXT: uzp1 v7.4s, v16.4s, v17.4s
+; CHECK-SD-NEXT: uzp1 v4.8h, v4.8h, v6.8h
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-SD-NEXT: uzp1 v1.8h, v3.8h, v1.8h
+; CHECK-SD-NEXT: uzp1 v2.8h, v7.8h, v5.8h
; CHECK-SD-NEXT: uzp1 v0.16b, v0.16b, v4.16b
-; CHECK-SD-NEXT: uzp1 v1.16b, v7.16b, v3.16b
+; CHECK-SD-NEXT: uzp1 v1.16b, v2.16b, v1.16b
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptou_v32f64_v32i8:
@@ -3026,9 +2947,8 @@ define <8 x i8> @fptos_v8f32_v8i8(<8 x float> %a) {
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: fcvtzs v1.4s, v1.4s
; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s
-; CHECK-SD-NEXT: xtn v1.4h, v1.4s
-; CHECK-SD-NEXT: xtn v0.4h, v0.4s
-; CHECK-SD-NEXT: uzp1 v0.8b, v0.8b, v1.8b
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-SD-NEXT: xtn v0.8b, v0.8h
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptos_v8f32_v8i8:
@@ -3048,9 +2968,8 @@ define <8 x i8> @fptou_v8f32_v8i8(<8 x float> %a) {
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: fcvtzs v1.4s, v1.4s
; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s
-; CHECK-SD-NEXT: xtn v1.4h, v1.4s
-; CHECK-SD-NEXT: xtn v0.4h, v0.4s
-; CHECK-SD-NEXT: uzp1 v0.8b, v0.8b, v1.8b
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-SD-NEXT: xtn v0.8b, v0.8h
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptou_v8f32_v8i8:
@@ -3072,12 +2991,8 @@ define <16 x i8> @fptos_v16f32_v16i8(<16 x float> %a) {
; CHECK-SD-NEXT: fcvtzs v2.4s, v2.4s
; CHECK-SD-NEXT: fcvtzs v1.4s, v1.4s
; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s
-; CHECK-SD-NEXT: xtn v3.4h, v3.4s
-; CHECK-SD-NEXT: xtn v2.4h, v2.4s
-; CHECK-SD-NEXT: xtn v1.4h, v1.4s
-; CHECK-SD-NEXT: xtn v0.4h, v0.4s
-; CHECK-SD-NEXT: mov v2.d[1], v3.d[0]
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: uzp1 v2.8h, v2.8h, v3.8h
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h
; CHECK-SD-NEXT: uzp1 v0.16b, v0.16b, v2.16b
; CHECK-SD-NEXT: ret
;
@@ -3134,20 +3049,12 @@ define <32 x i8> @fptos_v32f32_v32i8(<32 x float> %a) {
; CHECK-SD-NEXT: fcvtzs v6.4s, v6.4s
; CHECK-SD-NEXT: fcvtzs v5.4s, v5.4s
; CHECK-SD-NEXT: fcvtzs v4.4s, v4.4s
-; CHECK-SD-NEXT: xtn v3.4h, v3.4s
-; CHECK-SD-NEXT: xtn v2.4h, v2.4s
-; CHECK-SD-NEXT: xtn v1.4h, v1.4s
-; CHECK-SD-NEXT: xtn v0.4h, v0.4s
-; CHECK-SD-NEXT: xtn v7.4h, v7.4s
-; CHECK-SD-NEXT: xtn v6.4h, v6.4s
-; CHECK-SD-NEXT: xtn v5.4h, v5.4s
-; CHECK-SD-NEXT: xtn v4.4h, v4.4s
-; CHECK-SD-NEXT: mov v2.d[1], v3.d[0]
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
-; CHECK-SD-NEXT: mov v6.d[1], v7.d[0]
-; CHECK-SD-NEXT: mov v4.d[1], v5.d[0]
+; CHECK-SD-NEXT: uzp1 v2.8h, v2.8h, v3.8h
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-SD-NEXT: uzp1 v1.8h, v6.8h, v7.8h
+; CHECK-SD-NEXT: uzp1 v3.8h, v4.8h, v5.8h
; CHECK-SD-NEXT: uzp1 v0.16b, v0.16b, v2.16b
-; CHECK-SD-NEXT: uzp1 v1.16b, v4.16b, v6.16b
+; CHECK-SD-NEXT: uzp1 v1.16b, v3.16b, v1.16b
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptos_v32f32_v32i8:
@@ -5272,8 +5179,13 @@ define <2 x i16> @fptos_v2f16_v2i16(<2 x half> %a) {
;
; CHECK-GI-FP16-LABEL: fptos_v2f16_v2i16:
; CHECK-GI-FP16: // %bb.0: // %entry
-; CHECK-GI-FP16-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-GI-FP16-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: fcvtzs v0.4h, v0.4h
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-FP16-NEXT: ret
entry:
@@ -5300,8 +5212,13 @@ define <2 x i16> @fptou_v2f16_v2i16(<2 x half> %a) {
;
; CHECK-GI-FP16-LABEL: fptou_v2f16_v2i16:
; CHECK-GI-FP16: // %bb.0: // %entry
-; CHECK-GI-FP16-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-GI-FP16-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: fcvtzu v0.4h, v0.4h
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-FP16-NEXT: ret
entry:
@@ -5746,8 +5663,13 @@ define <2 x i8> @fptos_v2f16_v2i8(<2 x half> %a) {
;
; CHECK-GI-FP16-LABEL: fptos_v2f16_v2i8:
; CHECK-GI-FP16: // %bb.0: // %entry
-; CHECK-GI-FP16-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-GI-FP16-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: fcvtzs v0.4h, v0.4h
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-FP16-NEXT: ret
entry:
@@ -5774,8 +5696,13 @@ define <2 x i8> @fptou_v2f16_v2i8(<2 x half> %a) {
;
; CHECK-GI-FP16-LABEL: fptou_v2f16_v2i8:
; CHECK-GI-FP16: // %bb.0: // %entry
-; CHECK-GI-FP16-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-GI-FP16-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: fcvtzu v0.4h, v0.4h
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-FP16-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/fsincos.ll b/llvm/test/CodeGen/AArch64/fsincos.ll
index 2ab1610..0b34f95 100644
--- a/llvm/test/CodeGen/AArch64/fsincos.ll
+++ b/llvm/test/CodeGen/AArch64/fsincos.ll
@@ -36,6 +36,19 @@ entry:
ret half %c
}
+define <1 x double> @sin_v1f64(<1 x double> %x) {
+; CHECK-LABEL: sin_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl sin
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = call <1 x double> @llvm.sin.v1f64(<1 x double> %x)
+ ret <1 x double> %c
+}
+
define <2 x double> @sin_v2f64(<2 x double> %a) {
; CHECK-SD-LABEL: sin_v2f64:
; CHECK-SD: // %bb.0: // %entry
@@ -1293,6 +1306,19 @@ entry:
ret half %c
}
+define <1 x double> @cos_v1f64(<1 x double> %x) {
+; CHECK-LABEL: cos_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl cos
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = call <1 x double> @llvm.cos.v1f64(<1 x double> %x)
+ ret <1 x double> %c
+}
+
define <2 x double> @cos_v2f64(<2 x double> %a) {
; CHECK-SD-LABEL: cos_v2f64:
; CHECK-SD: // %bb.0: // %entry
diff --git a/llvm/test/CodeGen/AArch64/hadd-combine.ll b/llvm/test/CodeGen/AArch64/hadd-combine.ll
index 2269d75..491bf40 100644
--- a/llvm/test/CodeGen/AArch64/hadd-combine.ll
+++ b/llvm/test/CodeGen/AArch64/hadd-combine.ll
@@ -329,9 +329,29 @@ define <8 x i16> @hadds_i_undef(<8 x i16> %t, <8 x i16> %src1) {
ret <8 x i16> %result
}
+define <8 x i16> @sub_fixedwidth_v4i32(<8 x i16> %a0, <8 x i16> %a1) {
+; CHECK-LABEL: sub_fixedwidth_v4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: urhadd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %or = or <8 x i16> %a0, %a1
+ %xor = xor <8 x i16> %a0, %a1
+ %srl = lshr <8 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = sub <8 x i16> %or, %srl
+ ret <8 x i16> %res
+}
-
-
+define <8 x i16> @srhadd_fixedwidth_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; CHECK-LABEL: srhadd_fixedwidth_v8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: srhadd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %or = or <8 x i16> %a0, %a1
+ %xor = xor <8 x i16> %a0, %a1
+ %srl = ashr <8 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = sub <8 x i16> %or, %srl
+ ret <8 x i16> %res
+}
define <8 x i16> @rhaddu_base(<8 x i16> %src1, <8 x i16> %src2) {
; CHECK-LABEL: rhaddu_base:
@@ -859,6 +879,30 @@ define <4 x i32> @urhadd_v4i32(<4 x i32> %x) {
ret <4 x i32> %r
}
+define <8 x i16> @uhadd_fixedwidth_v4i32(<8 x i16> %a0, <8 x i16> %a1) {
+; CHECK-LABEL: uhadd_fixedwidth_v4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uhadd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %and = and <8 x i16> %a0, %a1
+ %xor = xor <8 x i16> %a0, %a1
+ %srl = lshr <8 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = add <8 x i16> %and, %srl
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @shadd_fixedwidth_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; CHECK-LABEL: shadd_fixedwidth_v8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shadd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %and = and <8 x i16> %a0, %a1
+ %xor = xor <8 x i16> %a0, %a1
+ %srl = ashr <8 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = add <8 x i16> %and, %srl
+ ret <8 x i16> %res
+}
+
declare <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8>, <8 x i8>)
declare <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16>, <4 x i16>)
declare <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32>, <2 x i32>)
diff --git a/llvm/test/CodeGen/AArch64/implicit-def-remat-requires-impdef-check.mir b/llvm/test/CodeGen/AArch64/implicit-def-remat-requires-impdef-check.mir
index aa94a037..47aa34e 100644
--- a/llvm/test/CodeGen/AArch64/implicit-def-remat-requires-impdef-check.mir
+++ b/llvm/test/CodeGen/AArch64/implicit-def-remat-requires-impdef-check.mir
@@ -22,6 +22,7 @@
name: inst_stores_to_dead_spill_implicit_def_impdef
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0:
@@ -59,6 +60,7 @@ body: |
name: inst_stores_to_dead_spill_movimm_impdef
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0:
diff --git a/llvm/test/CodeGen/AArch64/implicit-def-with-impdef-greedy-assert.mir b/llvm/test/CodeGen/AArch64/implicit-def-with-impdef-greedy-assert.mir
index e5395b2..a5d74ef 100644
--- a/llvm/test/CodeGen/AArch64/implicit-def-with-impdef-greedy-assert.mir
+++ b/llvm/test/CodeGen/AArch64/implicit-def-with-impdef-greedy-assert.mir
@@ -4,6 +4,8 @@
---
name: widget
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
jumpTable:
kind: label-difference32
entries:
diff --git a/llvm/test/CodeGen/AArch64/insert-subvector.ll b/llvm/test/CodeGen/AArch64/insert-subvector.ll
index d7656e1..6828fa9 100644
--- a/llvm/test/CodeGen/AArch64/insert-subvector.ll
+++ b/llvm/test/CodeGen/AArch64/insert-subvector.ll
@@ -374,18 +374,115 @@ define <16 x i8> @load_v16i8_8_2(float %tmp, <16 x i8> %b, ptr %a) {
ret <16 x i8> %s2
}
+define <8 x i8> @load_v8i8_2_1(float %tmp, <8 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v8i8_2_1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ldr h2, [x0]
+; CHECK-NEXT: mov v0.h[0], v2.h[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %s2 = shufflevector <8 x i8> %s1, <8 x i8> %b, <8 x i32> <i32 0, i32 1, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %s2
+}
+
+define <8 x i8> @load_v8i8_2_15(float %tmp, <8 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v8i8_2_15:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr h0, [x0]
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: adrp x8, .LCPI33_0
+; CHECK-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI33_0]
+; CHECK-NEXT: tbl v0.8b, { v0.16b }, v1.8b
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %s2 = shufflevector <8 x i8> %s1, <8 x i8> %b, <8 x i32> <i32 8, i32 0, i32 1, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %s2
+}
+
+define <8 x i8> @load_v8i8_2_2(float %tmp, <8 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v8i8_2_2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ldr h2, [x0]
+; CHECK-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %s2 = shufflevector <8 x i8> %s1, <8 x i8> %b, <8 x i32> <i32 8, i32 9, i32 0, i32 1, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %s2
+}
+
+define <8 x i8> @load_v8i8_2_3(float %tmp, <8 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v8i8_2_3:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ldr h2, [x0]
+; CHECK-NEXT: mov v0.h[2], v2.h[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %s2 = shufflevector <8 x i8> %s1, <8 x i8> %b, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 0, i32 1, i32 14, i32 15>
+ ret <8 x i8> %s2
+}
+
+define <8 x i8> @load_v8i8_2_4(float %tmp, <8 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v8i8_2_4:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ldr h2, [x0]
+; CHECK-NEXT: mov v0.h[3], v2.h[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %s2 = shufflevector <8 x i8> %s1, <8 x i8> %b, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 0, i32 1>
+ ret <8 x i8> %s2
+}
+
+define <4 x i8> @load_v4i8_2_1(float %tmp, <4 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v4i8_2_1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr h0, [x0]
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: zip1 v0.8b, v0.8b, v0.8b
+; CHECK-NEXT: mov v0.s[1], v1.s[1]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+ %s2 = shufflevector <4 x i8> %s1, <4 x i8> %b, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+ ret <4 x i8> %s2
+}
+
+define <4 x i8> @load_v4i8_2_2(float %tmp, <4 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v4i8_2_2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr h0, [x0]
+; CHECK-NEXT: zip1 v2.8b, v0.8b, v0.8b
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: mov v0.s[1], v2.s[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+ %s2 = shufflevector <4 x i8> %s1, <4 x i8> %b, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
+ ret <4 x i8> %s2
+}
+
; i16
define <8 x i16> @load_v8i16_2_1(float %tmp, <8 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v8i16_2_1:
; CHECK: // %bb.0:
-; CHECK-NEXT: ldrh w8, [x0]
-; CHECK-NEXT: add x9, x0, #2
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: ld1 { v0.h }[2], [x9]
-; CHECK-NEXT: xtn v2.4h, v0.4s
; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov v0.s[0], v2.s[0]
+; CHECK-NEXT: ld1 { v0.s }[0], [x0]
; CHECK-NEXT: ret
%l = load <2 x i16>, ptr %a
%s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -396,14 +493,10 @@ define <8 x i16> @load_v8i16_2_1(float %tmp, <8 x i16> %b, ptr %a) {
define <8 x i16> @load_v8i16_2_15(float %tmp, <8 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v8i16_2_15:
; CHECK: // %bb.0:
-; CHECK-NEXT: ldrh w8, [x0]
-; CHECK-NEXT: add x9, x0, #2
; CHECK-NEXT: // kill: def $q1 killed $q1 def $q0_q1
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: adrp x8, .LCPI33_0
-; CHECK-NEXT: ld1 { v2.h }[2], [x9]
-; CHECK-NEXT: xtn v0.4h, v2.4s
-; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI33_0]
+; CHECK-NEXT: adrp x8, .LCPI40_0
+; CHECK-NEXT: ldr s0, [x0]
+; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI40_0]
; CHECK-NEXT: tbl v0.16b, { v0.16b, v1.16b }, v2.16b
; CHECK-NEXT: ret
%l = load <2 x i16>, ptr %a
@@ -415,13 +508,8 @@ define <8 x i16> @load_v8i16_2_15(float %tmp, <8 x i16> %b, ptr %a) {
define <8 x i16> @load_v8i16_2_2(float %tmp, <8 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v8i16_2_2:
; CHECK: // %bb.0:
-; CHECK-NEXT: ldrh w8, [x0]
-; CHECK-NEXT: add x9, x0, #2
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: ld1 { v0.h }[2], [x9]
-; CHECK-NEXT: xtn v2.4h, v0.4s
; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov v0.s[1], v2.s[0]
+; CHECK-NEXT: ld1 { v0.s }[1], [x0]
; CHECK-NEXT: ret
%l = load <2 x i16>, ptr %a
%s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -432,13 +520,8 @@ define <8 x i16> @load_v8i16_2_2(float %tmp, <8 x i16> %b, ptr %a) {
define <8 x i16> @load_v8i16_2_3(float %tmp, <8 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v8i16_2_3:
; CHECK: // %bb.0:
-; CHECK-NEXT: ldrh w8, [x0]
-; CHECK-NEXT: add x9, x0, #2
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: ld1 { v0.h }[2], [x9]
-; CHECK-NEXT: xtn v2.4h, v0.4s
; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov v0.s[2], v2.s[0]
+; CHECK-NEXT: ld1 { v0.s }[2], [x0]
; CHECK-NEXT: ret
%l = load <2 x i16>, ptr %a
%s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -449,13 +532,8 @@ define <8 x i16> @load_v8i16_2_3(float %tmp, <8 x i16> %b, ptr %a) {
define <8 x i16> @load_v8i16_2_4(float %tmp, <8 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v8i16_2_4:
; CHECK: // %bb.0:
-; CHECK-NEXT: ldrh w8, [x0]
-; CHECK-NEXT: add x9, x0, #2
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: ld1 { v0.h }[2], [x9]
-; CHECK-NEXT: xtn v2.4h, v0.4s
; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov v0.s[3], v2.s[0]
+; CHECK-NEXT: ld1 { v0.s }[3], [x0]
; CHECK-NEXT: ret
%l = load <2 x i16>, ptr %a
%s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -466,11 +544,8 @@ define <8 x i16> @load_v8i16_2_4(float %tmp, <8 x i16> %b, ptr %a) {
define <4 x i16> @load_v4i16_2_1(float %tmp, <4 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v4i16_2_1:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1 { v0.h }[0], [x0]
-; CHECK-NEXT: add x8, x0, #2
+; CHECK-NEXT: ldr s0, [x0]
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: ld1 { v0.h }[2], [x8]
-; CHECK-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-NEXT: mov v0.s[1], v1.s[1]
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-NEXT: ret
@@ -483,11 +558,8 @@ define <4 x i16> @load_v4i16_2_1(float %tmp, <4 x i16> %b, ptr %a) {
define <4 x i16> @load_v4i16_2_2(float %tmp, <4 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v4i16_2_2:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1 { v0.h }[0], [x0]
-; CHECK-NEXT: add x8, x0, #2
-; CHECK-NEXT: ld1 { v0.h }[2], [x8]
-; CHECK-NEXT: uzp1 v2.4h, v0.4h, v0.4h
; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ldr s2, [x0]
; CHECK-NEXT: mov v0.s[1], v2.s[0]
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/isinf.ll b/llvm/test/CodeGen/AArch64/isinf.ll
index 458bd7e..834417b 100644
--- a/llvm/test/CodeGen/AArch64/isinf.ll
+++ b/llvm/test/CodeGen/AArch64/isinf.ll
@@ -58,22 +58,14 @@ define i32 @replace_isinf_call_f64(double %x) {
define i32 @replace_isinf_call_f128(fp128 %x) {
; CHECK-LABEL: replace_isinf_call_f128:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #32
-; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: str q0, [sp]
-; CHECK-NEXT: ldrb w8, [sp, #15]
-; CHECK-NEXT: and w8, w8, #0x7f
-; CHECK-NEXT: strb w8, [sp, #15]
-; CHECK-NEXT: adrp x8, .LCPI3_0
-; CHECK-NEXT: ldr q0, [sp]
-; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI3_0]
-; CHECK-NEXT: bl __eqtf2
-; CHECK-NEXT: cmp w0, #0
-; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: str q0, [sp, #-16]!
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: ldp x9, x8, [sp], #16
+; CHECK-NEXT: and x8, x8, #0x7fffffffffffffff
+; CHECK-NEXT: eor x8, x8, #0x7fff000000000000
+; CHECK-NEXT: orr x8, x9, x8
+; CHECK-NEXT: cmp x8, #0
; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
%abs = tail call fp128 @llvm.fabs.f128(fp128 %x)
%cmpinf = fcmp oeq fp128 %abs, 0xL00000000000000007FFF000000000000
diff --git a/llvm/test/CodeGen/AArch64/itofp.ll b/llvm/test/CodeGen/AArch64/itofp.ll
index 2164c2a..f5a7b5d 100644
--- a/llvm/test/CodeGen/AArch64/itofp.ll
+++ b/llvm/test/CodeGen/AArch64/itofp.ll
@@ -4,13 +4,6 @@
; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
-; CHECK-GI: warning: Instruction selection used fallback path for stofp_v3i8_v3f64
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v3i8_v3f64
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_v3i8_v3f32
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v3i8_v3f32
-; CHECK-GI-NOFP16-NEXT: warning: Instruction selection used fallback path for stofp_v3i8_v3f16
-; CHECK-GI-NOFP16-NEXT: warning: Instruction selection used fallback path for utofp_v3i8_v3f16
-
define double @stofp_i64_f64(i64 %a) {
; CHECK-LABEL: stofp_i64_f64:
; CHECK: // %bb.0: // %entry
@@ -1754,47 +1747,109 @@ entry:
}
define <3 x double> @stofp_v3i8_v3f64(<3 x i8> %a) {
-; CHECK-LABEL: stofp_v3i8_v3f64:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: fmov s1, w2
-; CHECK-NEXT: mov v0.s[1], w1
-; CHECK-NEXT: shl v1.2s, v1.2s, #24
-; CHECK-NEXT: sshr v1.2s, v1.2s, #24
-; CHECK-NEXT: shl v0.2s, v0.2s, #24
-; CHECK-NEXT: sshll v1.2d, v1.2s, #0
-; CHECK-NEXT: sshr v0.2s, v0.2s, #24
-; CHECK-NEXT: scvtf v2.2d, v1.2d
-; CHECK-NEXT: sshll v0.2d, v0.2s, #0
-; CHECK-NEXT: // kill: def $d2 killed $d2 killed $q2
-; CHECK-NEXT: scvtf v0.2d, v0.2d
-; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: stofp_v3i8_v3f64:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: fmov s1, w2
+; CHECK-SD-NEXT: mov v0.s[1], w1
+; CHECK-SD-NEXT: shl v1.2s, v1.2s, #24
+; CHECK-SD-NEXT: sshr v1.2s, v1.2s, #24
+; CHECK-SD-NEXT: shl v0.2s, v0.2s, #24
+; CHECK-SD-NEXT: sshll v1.2d, v1.2s, #0
+; CHECK-SD-NEXT: sshr v0.2s, v0.2s, #24
+; CHECK-SD-NEXT: scvtf v2.2d, v1.2d
+; CHECK-SD-NEXT: sshll v0.2d, v0.2s, #0
+; CHECK-SD-NEXT: // kill: def $d2 killed $d2 killed $q2
+; CHECK-SD-NEXT: scvtf v0.2d, v0.2d
+; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: stofp_v3i8_v3f64:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fmov s0, w0
+; CHECK-GI-NEXT: fmov s1, w1
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: fmov s1, w2
+; CHECK-GI-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-GI-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov h2, v1.h[1]
+; CHECK-GI-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: mov v1.h[1], v2.h[0]
+; CHECK-GI-NEXT: smov x8, v0.s[0]
+; CHECK-GI-NEXT: smov x9, v0.s[1]
+; CHECK-GI-NEXT: sshll v0.4s, v1.4h, #0
+; CHECK-GI-NEXT: fmov d1, x8
+; CHECK-GI-NEXT: smov x8, v0.s[0]
+; CHECK-GI-NEXT: mov v1.d[1], x9
+; CHECK-GI-NEXT: smov x9, v0.s[1]
+; CHECK-GI-NEXT: fmov d2, x8
+; CHECK-GI-NEXT: scvtf v0.2d, v1.2d
+; CHECK-GI-NEXT: mov v2.d[1], x9
+; CHECK-GI-NEXT: mov d1, v0.d[1]
+; CHECK-GI-NEXT: scvtf v2.2d, v2.2d
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: // kill: def $d2 killed $d2 killed $q2
+; CHECK-GI-NEXT: ret
entry:
%c = sitofp <3 x i8> %a to <3 x double>
ret <3 x double> %c
}
define <3 x double> @utofp_v3i8_v3f64(<3 x i8> %a) {
-; CHECK-LABEL: utofp_v3i8_v3f64:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: movi d1, #0x0000ff000000ff
-; CHECK-NEXT: fmov s2, w2
-; CHECK-NEXT: mov v0.s[1], w1
-; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
-; CHECK-NEXT: and v1.8b, v2.8b, v1.8b
-; CHECK-NEXT: ushll v0.2d, v0.2s, #0
-; CHECK-NEXT: ushll v1.2d, v1.2s, #0
-; CHECK-NEXT: ucvtf v0.2d, v0.2d
-; CHECK-NEXT: ucvtf v2.2d, v1.2d
-; CHECK-NEXT: // kill: def $d2 killed $d2 killed $q2
-; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: utofp_v3i8_v3f64:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: movi d1, #0x0000ff000000ff
+; CHECK-SD-NEXT: fmov s2, w2
+; CHECK-SD-NEXT: mov v0.s[1], w1
+; CHECK-SD-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-SD-NEXT: and v1.8b, v2.8b, v1.8b
+; CHECK-SD-NEXT: ushll v0.2d, v0.2s, #0
+; CHECK-SD-NEXT: ushll v1.2d, v1.2s, #0
+; CHECK-SD-NEXT: ucvtf v0.2d, v0.2d
+; CHECK-SD-NEXT: ucvtf v2.2d, v1.2d
+; CHECK-SD-NEXT: // kill: def $d2 killed $d2 killed $q2
+; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: utofp_v3i8_v3f64:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fmov s0, w0
+; CHECK-GI-NEXT: fmov s1, w1
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: fmov s1, w2
+; CHECK-GI-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-GI-NEXT: movi d1, #0xff00ff00ff00ff
+; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-GI-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov h2, v1.h[1]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: mov v1.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov w8, v0.s[0]
+; CHECK-GI-NEXT: mov w9, v0.s[1]
+; CHECK-GI-NEXT: ushll v0.4s, v1.4h, #0
+; CHECK-GI-NEXT: fmov d1, x8
+; CHECK-GI-NEXT: mov w8, v0.s[0]
+; CHECK-GI-NEXT: mov v1.d[1], x9
+; CHECK-GI-NEXT: mov w9, v0.s[1]
+; CHECK-GI-NEXT: fmov d2, x8
+; CHECK-GI-NEXT: ucvtf v0.2d, v1.2d
+; CHECK-GI-NEXT: mov v2.d[1], x9
+; CHECK-GI-NEXT: mov d1, v0.d[1]
+; CHECK-GI-NEXT: ucvtf v2.2d, v2.2d
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: // kill: def $d2 killed $d2 killed $q2
+; CHECK-GI-NEXT: ret
entry:
%c = uitofp <3 x i8> %a to <3 x double>
ret <3 x double> %c
@@ -3372,31 +3427,71 @@ entry:
}
define <3 x float> @stofp_v3i8_v3f32(<3 x i8> %a) {
-; CHECK-LABEL: stofp_v3i8_v3f32:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: mov v0.h[1], w1
-; CHECK-NEXT: mov v0.h[2], w2
-; CHECK-NEXT: shl v0.4h, v0.4h, #8
-; CHECK-NEXT: sshr v0.4h, v0.4h, #8
-; CHECK-NEXT: sshll v0.4s, v0.4h, #0
-; CHECK-NEXT: scvtf v0.4s, v0.4s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: stofp_v3i8_v3f32:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: mov v0.h[1], w1
+; CHECK-SD-NEXT: mov v0.h[2], w2
+; CHECK-SD-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-SD-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-SD-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-SD-NEXT: scvtf v0.4s, v0.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: stofp_v3i8_v3f32:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fmov s0, w0
+; CHECK-GI-NEXT: fmov s1, w1
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: fmov s1, w2
+; CHECK-GI-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-GI-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov v1.h[1], v3.h[0]
+; CHECK-GI-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: sshll v1.4s, v1.4h, #0
+; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT: scvtf v0.4s, v0.4s
+; CHECK-GI-NEXT: ret
entry:
%c = sitofp <3 x i8> %a to <3 x float>
ret <3 x float> %c
}
define <3 x float> @utofp_v3i8_v3f32(<3 x i8> %a) {
-; CHECK-LABEL: utofp_v3i8_v3f32:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: mov v0.h[1], w1
-; CHECK-NEXT: mov v0.h[2], w2
-; CHECK-NEXT: bic v0.4h, #255, lsl #8
-; CHECK-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-NEXT: ucvtf v0.4s, v0.4s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: utofp_v3i8_v3f32:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: mov v0.h[1], w1
+; CHECK-SD-NEXT: mov v0.h[2], w2
+; CHECK-SD-NEXT: bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-SD-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: utofp_v3i8_v3f32:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fmov s0, w0
+; CHECK-GI-NEXT: fmov s1, w1
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: fmov s1, w2
+; CHECK-GI-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-GI-NEXT: movi d1, #0xff00ff00ff00ff
+; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov v1.h[1], v3.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0
+; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-GI-NEXT: ret
entry:
%c = uitofp <3 x i8> %a to <3 x float>
ret <3 x float> %c
@@ -5521,7 +5616,8 @@ define <2 x half> @stofp_v2i8_v2f16(<2 x i8> %a) {
; CHECK-GI-FP16: // %bb.0: // %entry
; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-GI-FP16-NEXT: mov s1, v0.s[1]
-; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GI-FP16-NEXT: xtn v0.4h, v0.4s
; CHECK-GI-FP16-NEXT: shl v0.4h, v0.4h, #8
; CHECK-GI-FP16-NEXT: sshr v0.4h, v0.4h, #8
; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
@@ -5580,7 +5676,14 @@ define <2 x half> @utofp_v2i8_v2f16(<2 x i8> %a) {
;
; CHECK-GI-FP16-LABEL: utofp_v2i8_v2f16:
; CHECK-GI-FP16: // %bb.0: // %entry
+; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-FP16-NEXT: mov s1, v0.s[1]
+; CHECK-GI-FP16-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GI-FP16-NEXT: xtn v0.4h, v0.4s
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
; CHECK-GI-FP16-NEXT: movi d1, #0x0000ff000000ff
+; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: and v0.8b, v0.8b, v1.8b
; CHECK-GI-FP16-NEXT: mov s1, v0.s[1]
; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
@@ -5620,11 +5723,20 @@ define <3 x half> @stofp_v3i8_v3f16(<3 x i8> %a) {
; CHECK-GI-NOFP16-LABEL: stofp_v3i8_v3f16:
; CHECK-GI-NOFP16: // %bb.0: // %entry
; CHECK-GI-NOFP16-NEXT: fmov s0, w0
-; CHECK-GI-NOFP16-NEXT: mov v0.h[1], w1
-; CHECK-GI-NOFP16-NEXT: mov v0.h[2], w2
+; CHECK-GI-NOFP16-NEXT: fmov s1, w1
+; CHECK-GI-NOFP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NOFP16-NEXT: fmov s1, w2
+; CHECK-GI-NOFP16-NEXT: mov v0.h[2], v1.h[0]
; CHECK-GI-NOFP16-NEXT: shl v0.4h, v0.4h, #8
; CHECK-GI-NOFP16-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-GI-NOFP16-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NOFP16-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NOFP16-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NOFP16-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NOFP16-NEXT: mov v1.h[1], v3.h[0]
; CHECK-GI-NOFP16-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-GI-NOFP16-NEXT: sshll v1.4s, v1.4h, #0
+; CHECK-GI-NOFP16-NEXT: mov v0.d[1], v1.d[0]
; CHECK-GI-NOFP16-NEXT: scvtf v0.4s, v0.4s
; CHECK-GI-NOFP16-NEXT: fcvtn v0.4h, v0.4s
; CHECK-GI-NOFP16-NEXT: ret
@@ -5633,11 +5745,10 @@ define <3 x half> @stofp_v3i8_v3f16(<3 x i8> %a) {
; CHECK-GI-FP16: // %bb.0: // %entry
; CHECK-GI-FP16-NEXT: fmov s0, w0
; CHECK-GI-FP16-NEXT: fmov s1, w1
-; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: mov v0.b[1], v1.b[0]
; CHECK-GI-FP16-NEXT: fmov s1, w2
-; CHECK-GI-FP16-NEXT: mov v0.h[2], v1.h[0]
-; CHECK-GI-FP16-NEXT: shl v0.4h, v0.4h, #8
-; CHECK-GI-FP16-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-GI-FP16-NEXT: mov v0.b[2], v1.b[0]
+; CHECK-GI-FP16-NEXT: sshll v0.8h, v0.8b, #0
; CHECK-GI-FP16-NEXT: scvtf v0.4h, v0.4h
; CHECK-GI-FP16-NEXT: ret
entry:
@@ -5669,10 +5780,20 @@ define <3 x half> @utofp_v3i8_v3f16(<3 x i8> %a) {
; CHECK-GI-NOFP16-LABEL: utofp_v3i8_v3f16:
; CHECK-GI-NOFP16: // %bb.0: // %entry
; CHECK-GI-NOFP16-NEXT: fmov s0, w0
-; CHECK-GI-NOFP16-NEXT: mov v0.h[1], w1
-; CHECK-GI-NOFP16-NEXT: mov v0.h[2], w2
-; CHECK-GI-NOFP16-NEXT: bic v0.4h, #255, lsl #8
+; CHECK-GI-NOFP16-NEXT: fmov s1, w1
+; CHECK-GI-NOFP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NOFP16-NEXT: fmov s1, w2
+; CHECK-GI-NOFP16-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-GI-NOFP16-NEXT: movi d1, #0xff00ff00ff00ff
+; CHECK-GI-NOFP16-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-GI-NOFP16-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NOFP16-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NOFP16-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NOFP16-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NOFP16-NEXT: mov v1.h[1], v3.h[0]
; CHECK-GI-NOFP16-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NOFP16-NEXT: ushll v1.4s, v1.4h, #0
+; CHECK-GI-NOFP16-NEXT: mov v0.d[1], v1.d[0]
; CHECK-GI-NOFP16-NEXT: ucvtf v0.4s, v0.4s
; CHECK-GI-NOFP16-NEXT: fcvtn v0.4h, v0.4s
; CHECK-GI-NOFP16-NEXT: ret
@@ -5681,11 +5802,10 @@ define <3 x half> @utofp_v3i8_v3f16(<3 x i8> %a) {
; CHECK-GI-FP16: // %bb.0: // %entry
; CHECK-GI-FP16-NEXT: fmov s0, w0
; CHECK-GI-FP16-NEXT: fmov s1, w1
-; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: mov v0.b[1], v1.b[0]
; CHECK-GI-FP16-NEXT: fmov s1, w2
-; CHECK-GI-FP16-NEXT: mov v0.h[2], v1.h[0]
-; CHECK-GI-FP16-NEXT: movi d1, #0xff00ff00ff00ff
-; CHECK-GI-FP16-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-GI-FP16-NEXT: mov v0.b[2], v1.b[0]
+; CHECK-GI-FP16-NEXT: ushll v0.8h, v0.8b, #0
; CHECK-GI-FP16-NEXT: ucvtf v0.4h, v0.4h
; CHECK-GI-FP16-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/llvm.exp10.ll b/llvm/test/CodeGen/AArch64/llvm.exp10.ll
index 56f4272..51d17ad 100644
--- a/llvm/test/CodeGen/AArch64/llvm.exp10.ll
+++ b/llvm/test/CodeGen/AArch64/llvm.exp10.ll
@@ -532,11 +532,18 @@ define double @exp10_f64(double %x) {
ret double %r
}
-; FIXME: Broken
-; define <1 x double> @exp10_v1f64(<1 x double> %x) {
-; %r = call <1 x double> @llvm.exp10.v1f64(<1 x double> %x)
-; ret <1 x double> %r
-; }
+define <1 x double> @exp10_v1f64(<1 x double> %x) {
+; CHECK-LABEL: exp10_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl exp10
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %r = call <1 x double> @llvm.exp10.v1f64(<1 x double> %x)
+ ret <1 x double> %r
+}
define <2 x double> @exp10_v2f64(<2 x double> %x) {
; SDAG-LABEL: exp10_v2f64:
diff --git a/llvm/test/CodeGen/AArch64/load.ll b/llvm/test/CodeGen/AArch64/load.ll
index 39143e5..c3c0ec5 100644
--- a/llvm/test/CodeGen/AArch64/load.ll
+++ b/llvm/test/CodeGen/AArch64/load.ll
@@ -159,7 +159,8 @@ define <2 x i16> @load_v2i16(ptr %ptr){
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldr h0, [x0]
; CHECK-GI-NEXT: ldr h1, [x0, #2]
-; CHECK-GI-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-NEXT: ret
%a = load <2 x i16>, ptr %ptr
diff --git a/llvm/test/CodeGen/AArch64/machine-cp-sub-reg.mir b/llvm/test/CodeGen/AArch64/machine-cp-sub-reg.mir
index 23cf1dc..5b379c2 100644
--- a/llvm/test/CodeGen/AArch64/machine-cp-sub-reg.mir
+++ b/llvm/test/CodeGen/AArch64/machine-cp-sub-reg.mir
@@ -10,7 +10,6 @@ body: |
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: liveins: $w0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $x8 = ORRXrs $xzr, $x0, 0, implicit $w0
; CHECK-NEXT: $w8 = ORRWrs $wzr, $w0, 0, implicit-def $x8
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
diff --git a/llvm/test/CodeGen/AArch64/misched-bundle.mir b/llvm/test/CodeGen/AArch64/misched-bundle.mir
new file mode 100644
index 0000000..a947c04
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/misched-bundle.mir
@@ -0,0 +1,195 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a510 -run-pass=machine-scheduler -debug-only=machine-scheduler %s -o - 2>&1 | FileCheck %s
+# REQUIRES: asserts
+
+# CHECK: SU(0): renamable $z0 = LD1H renamable $p0, renamable $x1, renamable $x10 :: (load unknown-size, align 1)
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 4
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 3
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 7
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(6): Out Latency=1
+# CHECK-NEXT: SU(6): Data Latency=3 Reg=$z0
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(1): renamable $z1 = LD1H renamable $p0, renamable $x2, renamable $x10 :: (load unknown-size, align 1)
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 4
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 3
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 7
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(7): Out Latency=1
+# CHECK-NEXT: SU(6): Data Latency=3 Reg=$z1
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(2): renamable $z2 = LD1H renamable $p0, renamable $x0, renamable $x10 :: (load unknown-size, align 1)
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 3
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 3
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 7
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(6): Data Latency=3 Reg=$z2
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(3): renamable $z3 = LD1H renamable $p0, renamable $x11, renamable $x10 :: (load unknown-size, align 1)
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 3
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 3
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 0
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(7): Data Latency=0 Reg=$z3
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(4): renamable $z4 = LD1H renamable $p0, renamable $x12, renamable $x10 :: (load unknown-size, align 1)
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 3
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 3
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 0
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(7): Data Latency=0 Reg=$z4
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(5): renamable $z5 = LD1H renamable $p0, renamable $x13, renamable $x10 :: (load unknown-size, align 1)
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 3
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 3
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 0
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(7): Data Latency=0 Reg=$z5
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(6): $z0 = FMAD_ZPmZZ_H renamable $p0, killed $z0(tied-def 0), killed renamable $z1, killed renamable $z2
+# CHECK-NEXT: # preds left : 4
+# CHECK-NEXT: # succs left : 2
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 4
+# CHECK-NEXT: Depth : 3
+# CHECK-NEXT: Height : 4
+# CHECK-NEXT: Predecessors:
+# CHECK-NEXT: SU(2): Data Latency=3 Reg=$z2
+# CHECK-NEXT: SU(1): Data Latency=3 Reg=$z1
+# CHECK-NEXT: SU(0): Out Latency=1
+# CHECK-NEXT: SU(0): Data Latency=3 Reg=$z0
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(8): Data Latency=4 Reg=$z0
+# CHECK-NEXT: SU(7): Anti Latency=0
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(7): BUNDLE implicit-def $z1, implicit-def $q1, implicit-def $d1, implicit-def $s1, implicit-def $h1, implicit-def $b1, implicit $z5, implicit $p0, implicit killed $z4, implicit killed $z3
+# CHECK-NEXT: # preds left : 5
+# CHECK-NEXT: # succs left : 1
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 1
+# CHECK-NEXT: Depth : 3
+# CHECK-NEXT: Height : 0
+# CHECK-NEXT: Predecessors:
+# CHECK-NEXT: SU(6): Anti Latency=0
+# CHECK-NEXT: SU(5): Data Latency=0 Reg=$z5
+# CHECK-NEXT: SU(4): Data Latency=0 Reg=$z4
+# CHECK-NEXT: SU(3): Data Latency=0 Reg=$z3
+# CHECK-NEXT: SU(1): Out Latency=1
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(9): Data Latency=0 Reg=$z1
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(8): ST1H killed renamable $z0, renamable $p0, renamable $x0, renamable $x10 :: (store unknown-size, align 1)
+# CHECK-NEXT: # preds left : 7
+# CHECK-NEXT: # succs left : 1
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 1
+# CHECK-NEXT: Depth : 7
+# CHECK-NEXT: Height : 0
+# CHECK-NEXT: Predecessors:
+# CHECK-NEXT: SU(6): Data Latency=4 Reg=$z0
+# CHECK-NEXT: SU(5): Ord Latency=0 Memory
+# CHECK-NEXT: SU(4): Ord Latency=0 Memory
+# CHECK-NEXT: SU(3): Ord Latency=0 Memory
+# CHECK-NEXT: SU(2): Ord Latency=0 Memory
+# CHECK-NEXT: SU(1): Ord Latency=0 Memory
+# CHECK-NEXT: SU(0): Ord Latency=0 Memory
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(9): ST1H killed renamable $z1, renamable $p0, renamable $x13, renamable $x10 :: (store unknown-size, align 1)
+# CHECK-NEXT: # preds left : 8
+# CHECK-NEXT: # succs left : 0
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 1
+# CHECK-NEXT: Depth : 7
+# CHECK-NEXT: Height : 0
+# CHECK-NEXT: Predecessors:
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: SU(7): Data Latency=0 Reg=$z1
+# CHECK-NEXT: SU(5): Ord Latency=0 Memory
+# CHECK-NEXT: SU(4): Ord Latency=0 Memory
+# CHECK-NEXT: SU(3): Ord Latency=0 Memory
+# CHECK-NEXT: SU(2): Ord Latency=0 Memory
+# CHECK-NEXT: SU(1): Ord Latency=0 Memory
+# CHECK-NEXT: SU(0): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: ExitSU: RET_ReallyLR
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 0
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 0
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 0
+
+---
+name: test
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $p0, $x0, $x1, $x2, $x10, $x11, $x12, $x13
+
+ ; CHECK-LABEL: name: test
+ ; CHECK: liveins: $p0, $x0, $x1, $x2, $x10, $x11, $x12, $x13
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $z0 = LD1H renamable $p0, renamable $x1, renamable $x10 :: (load unknown-size, align 1)
+ ; CHECK-NEXT: renamable $z1 = LD1H renamable $p0, renamable $x2, renamable $x10 :: (load unknown-size, align 1)
+ ; CHECK-NEXT: renamable $z2 = LD1H renamable $p0, renamable $x0, renamable $x10 :: (load unknown-size, align 1)
+ ; CHECK-NEXT: $z0 = FMAD_ZPmZZ_H renamable $p0, killed $z0, renamable $z1, killed renamable $z2
+ ; CHECK-NEXT: renamable $z3 = LD1H renamable $p0, renamable $x11, renamable $x10 :: (load unknown-size, align 1)
+ ; CHECK-NEXT: renamable $z4 = LD1H renamable $p0, renamable $x12, renamable $x10 :: (load unknown-size, align 1)
+ ; CHECK-NEXT: renamable $z5 = LD1H renamable $p0, renamable $x13, renamable $x10 :: (load unknown-size, align 1)
+ ; CHECK-NEXT: ST1H killed renamable $z0, renamable $p0, renamable $x0, renamable $x10 :: (store unknown-size, align 1)
+ ; CHECK-NEXT: BUNDLE implicit-def $z1, implicit-def $q1, implicit-def $d1, implicit-def $s1, implicit-def $h1, implicit-def $b1, implicit $z5, implicit $p0, implicit $z4, implicit $z3 {
+ ; CHECK-NEXT: $z1 = MOVPRFX_ZZ $z5
+ ; CHECK-NEXT: $z1 = FMLA_ZPmZZ_H renamable $p0, internal $z1, renamable $z4, renamable $z3
+ ; CHECK-NEXT: }
+ ; CHECK-NEXT: ST1H renamable $z1, renamable $p0, renamable $x13, renamable $x10 :: (store unknown-size, align 1)
+ ; CHECK-NEXT: RET_ReallyLR
+
+ renamable $z0 = LD1H renamable $p0, renamable $x1, renamable $x10 :: (load unknown-size)
+ renamable $z1 = LD1H renamable $p0, renamable $x2, renamable $x10 :: (load unknown-size)
+ renamable $z2 = LD1H renamable $p0, renamable $x0, renamable $x10 :: (load unknown-size)
+ renamable $z3 = LD1H renamable $p0, renamable $x11, renamable $x10 :: (load unknown-size)
+ renamable $z4 = LD1H renamable $p0, renamable $x12, renamable $x10 :: (load unknown-size)
+ renamable $z5 = LD1H renamable $p0, renamable $x13, renamable $x10 :: (load unknown-size)
+ $z0 = FMAD_ZPmZZ_H renamable $p0, killed $z0, killed renamable $z1, killed renamable $z2
+ BUNDLE implicit-def $z1, implicit-def $q1, implicit-def $d1, implicit-def $s1, implicit-def $h1, implicit-def $b1, implicit $z5, implicit $p0, implicit killed $z4, implicit killed $z3 {
+ $z1 = MOVPRFX_ZZ $z5
+ $z1 = FMLA_ZPmZZ_H renamable $p0, internal killed $z1, killed renamable $z4, killed renamable $z3
+ }
+ ST1H killed renamable $z0, renamable $p0, renamable $x0, renamable $x10 :: (store unknown-size)
+ ST1H killed renamable $z1, renamable $p0, renamable $x13, renamable $x10 :: (store unknown-size)
+ RET_ReallyLR
+
+...
diff --git a/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll b/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
index 0162065..57f220f 100644
--- a/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
@@ -1117,8 +1117,15 @@ define <4 x i16> @vselect_constant_cond_zero_v4i16(<4 x i16> %a) {
;
; CHECK-GI-LABEL: vselect_constant_cond_zero_v4i16:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: adrp x8, .LCPI84_0
-; CHECK-GI-NEXT: ldr d1, [x8, :lo12:.LCPI84_0]
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: mov w9, #0 // =0x0
+; CHECK-GI-NEXT: fmov s1, w8
+; CHECK-GI-NEXT: fmov s2, w9
+; CHECK-GI-NEXT: mov v3.16b, v1.16b
+; CHECK-GI-NEXT: mov v3.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov v3.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v3.b[3], v1.b[0]
+; CHECK-GI-NEXT: ushll v1.8h, v3.8b, #0
; CHECK-GI-NEXT: shl v1.4h, v1.4h, #15
; CHECK-GI-NEXT: sshr v1.4h, v1.4h, #15
; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b
@@ -1137,8 +1144,16 @@ define <4 x i32> @vselect_constant_cond_zero_v4i32(<4 x i32> %a) {
;
; CHECK-GI-LABEL: vselect_constant_cond_zero_v4i32:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: adrp x8, .LCPI85_0
-; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI85_0]
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: mov w9, #0 // =0x0
+; CHECK-GI-NEXT: fmov s1, w8
+; CHECK-GI-NEXT: fmov s2, w9
+; CHECK-GI-NEXT: mov v3.16b, v1.16b
+; CHECK-GI-NEXT: mov v3.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov v2.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v1.4s, v3.4h, #0
+; CHECK-GI-NEXT: ushll v2.4s, v2.4h, #0
+; CHECK-GI-NEXT: mov v1.d[1], v2.d[0]
; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
; CHECK-GI-NEXT: and v0.16b, v0.16b, v1.16b
@@ -1181,8 +1196,15 @@ define <4 x i16> @vselect_constant_cond_v4i16(<4 x i16> %a, <4 x i16> %b) {
;
; CHECK-GI-LABEL: vselect_constant_cond_v4i16:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: adrp x8, .LCPI87_0
-; CHECK-GI-NEXT: ldr d2, [x8, :lo12:.LCPI87_0]
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: mov w9, #0 // =0x0
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: fmov s3, w9
+; CHECK-GI-NEXT: mov v4.16b, v2.16b
+; CHECK-GI-NEXT: mov v4.b[1], v3.b[0]
+; CHECK-GI-NEXT: mov v4.b[2], v3.b[0]
+; CHECK-GI-NEXT: mov v4.b[3], v2.b[0]
+; CHECK-GI-NEXT: ushll v2.8h, v4.8b, #0
; CHECK-GI-NEXT: shl v2.4h, v2.4h, #15
; CHECK-GI-NEXT: sshr v2.4h, v2.4h, #15
; CHECK-GI-NEXT: bif v0.8b, v1.8b, v2.8b
@@ -1201,8 +1223,16 @@ define <4 x i32> @vselect_constant_cond_v4i32(<4 x i32> %a, <4 x i32> %b) {
;
; CHECK-GI-LABEL: vselect_constant_cond_v4i32:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: adrp x8, .LCPI88_0
-; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI88_0]
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: mov w9, #0 // =0x0
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: fmov s3, w9
+; CHECK-GI-NEXT: mov v4.16b, v2.16b
+; CHECK-GI-NEXT: mov v4.h[1], v3.h[0]
+; CHECK-GI-NEXT: mov v3.h[1], v2.h[0]
+; CHECK-GI-NEXT: ushll v2.4s, v4.4h, #0
+; CHECK-GI-NEXT: ushll v3.4s, v3.4h, #0
+; CHECK-GI-NEXT: mov v2.d[1], v3.d[0]
; CHECK-GI-NEXT: shl v2.4s, v2.4s, #31
; CHECK-GI-NEXT: sshr v2.4s, v2.4s, #31
; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b
diff --git a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
index 632b6b3..dbb5dfeb 100644
--- a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
@@ -2870,6 +2870,107 @@ define <2 x i64> @fcmune2xdouble(<2 x double> %A, <2 x double> %B) {
ret <2 x i64> %tmp4
}
+define <2 x i32> @fcmal2xfloat(<2 x float> %A, <2 x float> %B) {
+; CHECK-SD-LABEL: fcmal2xfloat:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v0.2d, #0xffffffffffffffff
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: fcmal2xfloat:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: movi v0.2s, #1
+; CHECK-GI-NEXT: shl v0.2s, v0.2s, #31
+; CHECK-GI-NEXT: sshr v0.2s, v0.2s, #31
+; CHECK-GI-NEXT: ret
+ %tmp3 = fcmp true <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmal4xfloat(<4 x float> %A, <4 x float> %B) {
+; CHECK-SD-LABEL: fcmal4xfloat:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v0.2d, #0xffffffffffffffff
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: fcmal4xfloat:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: mov v1.16b, v0.16b
+; CHECK-GI-NEXT: mov v1.h[1], v0.h[0]
+; CHECK-GI-NEXT: mov v0.h[1], v0.h[0]
+; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: mov v1.d[1], v0.d[0]
+; CHECK-GI-NEXT: shl v0.4s, v1.4s, #31
+; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-NEXT: ret
+ %tmp3 = fcmp true <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmal2xdouble(<2 x double> %A, <2 x double> %B) {
+; CHECK-SD-LABEL: fcmal2xdouble:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v0.2d, #0xffffffffffffffff
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: fcmal2xdouble:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: adrp x8, .LCPI221_0
+; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI221_0]
+; CHECK-GI-NEXT: shl v0.2d, v0.2d, #63
+; CHECK-GI-NEXT: sshr v0.2d, v0.2d, #63
+; CHECK-GI-NEXT: ret
+ %tmp3 = fcmp true <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmnv2xfloat(<2 x float> %A, <2 x float> %B) {
+; CHECK-LABEL: fcmnv2xfloat:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.2d, #0000000000000000
+; CHECK-NEXT: ret
+ %tmp3 = fcmp false <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmnv4xfloat(<4 x float> %A, <4 x float> %B) {
+; CHECK-SD-LABEL: fcmnv4xfloat:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v0.2d, #0000000000000000
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: fcmnv4xfloat:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #0 // =0x0
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: mov v1.16b, v0.16b
+; CHECK-GI-NEXT: mov v1.h[1], v0.h[0]
+; CHECK-GI-NEXT: mov v0.h[1], v0.h[0]
+; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: mov v1.d[1], v0.d[0]
+; CHECK-GI-NEXT: shl v0.4s, v1.4s, #31
+; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-NEXT: ret
+ %tmp3 = fcmp false <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmnv2xdouble(<2 x double> %A, <2 x double> %B) {
+; CHECK-LABEL: fcmnv2xdouble:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.2d, #0000000000000000
+; CHECK-NEXT: ret
+ %tmp3 = fcmp false <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
define <2 x i32> @fcmoeqz2xfloat(<2 x float> %A) {
; CHECK-LABEL: fcmoeqz2xfloat:
; CHECK: // %bb.0:
diff --git a/llvm/test/CodeGen/AArch64/neon-truncstore.ll b/llvm/test/CodeGen/AArch64/neon-truncstore.ll
index b677d077..5d78ad2 100644
--- a/llvm/test/CodeGen/AArch64/neon-truncstore.ll
+++ b/llvm/test/CodeGen/AArch64/neon-truncstore.ll
@@ -104,7 +104,7 @@ define void @v4i32_v4i8(<4 x i32> %a, ptr %result) {
; CHECK-LABEL: v4i32_v4i8:
; CHECK: // %bb.0:
; CHECK-NEXT: xtn v0.4h, v0.4s
-; CHECK-NEXT: xtn v0.8b, v0.8h
+; CHECK-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-NEXT: str s0, [x0]
; CHECK-NEXT: ret
%b = trunc <4 x i32> %a to <4 x i8>
@@ -170,8 +170,7 @@ define void @v2i16_v2i8(<2 x i16> %a, ptr %result) {
define void @v4i16_v4i8(<4 x i16> %a, ptr %result) {
; CHECK-LABEL: v4i16_v4i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: xtn v0.8b, v0.8h
+; CHECK-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-NEXT: str s0, [x0]
; CHECK-NEXT: ret
%b = trunc <4 x i16> %a to <4 x i8>
diff --git a/llvm/test/CodeGen/AArch64/overflow.ll b/llvm/test/CodeGen/AArch64/overflow.ll
index 444aaeb..977141f 100644
--- a/llvm/test/CodeGen/AArch64/overflow.ll
+++ b/llvm/test/CodeGen/AArch64/overflow.ll
@@ -19,20 +19,12 @@ entry:
}
define zeroext i1 @saddo1.i32.fold(i32 %v1, i32 %v2, ptr %res) {
-; SDAG-LABEL: saddo1.i32.fold:
-; SDAG: // %bb.0: // %entry
-; SDAG-NEXT: mov w8, #20 // =0x14
-; SDAG-NEXT: mov w0, wzr
-; SDAG-NEXT: str w8, [x2]
-; SDAG-NEXT: ret
-;
-; GISEL-LABEL: saddo1.i32.fold:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: mov w8, #9 // =0x9
-; GISEL-NEXT: adds w8, w8, #11
-; GISEL-NEXT: cset w0, vs
-; GISEL-NEXT: str w8, [x2]
-; GISEL-NEXT: ret
+; CHECK-LABEL: saddo1.i32.fold:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #20 // =0x14
+; CHECK-NEXT: mov w0, wzr
+; CHECK-NEXT: str w8, [x2]
+; CHECK-NEXT: ret
entry:
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 9, i32 11)
%val = extractvalue {i32, i1} %t, 0
@@ -72,21 +64,10 @@ entry:
}
define i32 @saddo.select.i64(i32 %v1, i32 %v2, i1 %v3, i64 %v4, i64 %v5) {
-; SDAG-LABEL: saddo.select.i64:
-; SDAG: // %bb.0: // %entry
-; SDAG-NEXT: mov w0, w1
-; SDAG-NEXT: ret
-;
-; GISEL-LABEL: saddo.select.i64:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: mov w8, #13 // =0xd
-; GISEL-NEXT: and x9, x3, #0xc
-; GISEL-NEXT: and x8, x4, x8
-; GISEL-NEXT: cmn x9, x8
-; GISEL-NEXT: cset w8, vs
-; GISEL-NEXT: tst w8, #0x1
-; GISEL-NEXT: csel w0, w0, w1, ne
-; GISEL-NEXT: ret
+; CHECK-LABEL: saddo.select.i64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, w1
+; CHECK-NEXT: ret
entry:
%lhs = and i64 %v4, 12
%rhs = and i64 %v5, 13
@@ -97,22 +78,10 @@ entry:
}
define i32 @uaddo.select.i64(i32 %v1, i32 %v2, i1 %v3, i64 %v4, i64 %v5) {
-; SDAG-LABEL: uaddo.select.i64:
-; SDAG: // %bb.0: // %entry
-; SDAG-NEXT: mov w0, w1
-; SDAG-NEXT: ret
-;
-; GISEL-LABEL: uaddo.select.i64:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: mov w8, #9 // =0x9
-; GISEL-NEXT: mov w9, #10 // =0xa
-; GISEL-NEXT: and x8, x3, x8
-; GISEL-NEXT: and x9, x4, x9
-; GISEL-NEXT: cmn x8, x9
-; GISEL-NEXT: cset w8, hs
-; GISEL-NEXT: tst w8, #0x1
-; GISEL-NEXT: csel w0, w0, w1, ne
-; GISEL-NEXT: ret
+; CHECK-LABEL: uaddo.select.i64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, w1
+; CHECK-NEXT: ret
entry:
%lhs = and i64 %v4, 9
%rhs = and i64 %v5, 10
@@ -123,18 +92,11 @@ entry:
}
define zeroext i1 @saddo.canon.i32(i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5, ptr %res) {
-; SDAG-LABEL: saddo.canon.i32:
-; SDAG: // %bb.0: // %entry
-; SDAG-NEXT: mov w0, wzr
-; SDAG-NEXT: str w4, [x5]
-; SDAG-NEXT: ret
-;
-; GISEL-LABEL: saddo.canon.i32:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: adds w8, wzr, w4
-; GISEL-NEXT: cset w0, vs
-; GISEL-NEXT: str w8, [x5]
-; GISEL-NEXT: ret
+; CHECK-LABEL: saddo.canon.i32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, wzr
+; CHECK-NEXT: str w4, [x5]
+; CHECK-NEXT: ret
entry:
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 0, i32 %v5)
%val = extractvalue {i32, i1} %t, 0
@@ -143,13 +105,19 @@ entry:
ret i1 %obit
}
define zeroext i1 @saddo.add.i32(i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5, ptr %res) {
-; CHECK-LABEL: saddo.add.i32:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: add w8, w4, #100
-; CHECK-NEXT: subs w8, w8, #100
-; CHECK-NEXT: cset w0, vs
-; CHECK-NEXT: str w8, [x5]
-; CHECK-NEXT: ret
+; SDAG-LABEL: saddo.add.i32:
+; SDAG: // %bb.0: // %entry
+; SDAG-NEXT: add w8, w4, #100
+; SDAG-NEXT: subs w8, w8, #100
+; SDAG-NEXT: cset w0, vs
+; SDAG-NEXT: str w8, [x5]
+; SDAG-NEXT: ret
+;
+; GISEL-LABEL: saddo.add.i32:
+; GISEL: // %bb.0: // %entry
+; GISEL-NEXT: mov w0, wzr
+; GISEL-NEXT: str w4, [x5]
+; GISEL-NEXT: ret
entry:
%lhs = add nsw i32 %v5, 100
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %lhs, i32 -100)
@@ -160,13 +128,20 @@ entry:
}
define zeroext i1 @uaddo.add.i32(i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5, ptr %res) {
-; CHECK-LABEL: uaddo.add.i32:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: add w8, w4, #5
-; CHECK-NEXT: adds w8, w8, #5
-; CHECK-NEXT: cset w0, hs
-; CHECK-NEXT: str w8, [x5]
-; CHECK-NEXT: ret
+; SDAG-LABEL: uaddo.add.i32:
+; SDAG: // %bb.0: // %entry
+; SDAG-NEXT: add w8, w4, #5
+; SDAG-NEXT: adds w8, w8, #5
+; SDAG-NEXT: cset w0, hs
+; SDAG-NEXT: str w8, [x5]
+; SDAG-NEXT: ret
+;
+; GISEL-LABEL: uaddo.add.i32:
+; GISEL: // %bb.0: // %entry
+; GISEL-NEXT: adds w8, w4, #10
+; GISEL-NEXT: cset w0, hs
+; GISEL-NEXT: str w8, [x5]
+; GISEL-NEXT: ret
entry:
%lhs = add nuw i32 %v5, 5
%t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %lhs, i32 5)
diff --git a/llvm/test/CodeGen/AArch64/peephole-movd.mir b/llvm/test/CodeGen/AArch64/peephole-movd.mir
new file mode 100644
index 0000000..bd7f0ab
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/peephole-movd.mir
@@ -0,0 +1,60 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -run-pass=aarch64-mi-peephole-opt -o - -mtriple=aarch64-unknown-linux -verify-machineinstrs %s | FileCheck %s
+
+---
+name: remove_kill_flags
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $w0
+ ; CHECK-LABEL: name: remove_kill_flags
+ ; CHECK: liveins: $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[MOVIv2d_ns:%[0-9]+]]:fpr128 = MOVIv2d_ns 0
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY [[MOVIv2d_ns]].dsub
+ ; CHECK-NEXT: [[UQSHLv8i8_shift:%[0-9]+]]:fpr64 = UQSHLv8i8_shift killed [[COPY]], 1
+ ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:fpr128 = SUBREG_TO_REG 0, [[UQSHLv8i8_shift]], %subreg.dsub
+ ; CHECK-NEXT: [[TBLv8i8One:%[0-9]+]]:fpr64 = TBLv8i8One killed [[SUBREG_TO_REG]], [[UQSHLv8i8_shift]]
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[UQSHLv8i8_shift]], %subreg.dsub
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:fpr128 = MOVIv2d_ns 0
+ %1:fpr64 = COPY %0.dsub:fpr128
+ %2:fpr64 = UQSHLv8i8_shift killed %1:fpr64, 1
+ %3:fpr64 = FMOVDr %2:fpr64
+ %4:fpr128 = SUBREG_TO_REG 0, killed %3:fpr64, %subreg.dsub
+ %5:fpr64 = TBLv8i8One killed %4:fpr128, %2:fpr64
+ %7:fpr128 = IMPLICIT_DEF
+ %6:fpr128 = INSERT_SUBREG %7:fpr128, killed %2:fpr64, %subreg.dsub
+ RET_ReallyLR implicit $w0
+...
+---
+name: remove_kill_flags2
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $w0
+ ; CHECK-LABEL: name: remove_kill_flags2
+ ; CHECK: liveins: $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[MOVIv2d_ns:%[0-9]+]]:fpr128 = MOVIv2d_ns 0
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY [[MOVIv2d_ns]].dsub
+ ; CHECK-NEXT: [[UQSHLv8i8_shift:%[0-9]+]]:fpr64 = UQSHLv8i8_shift killed [[COPY]], 1
+ ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:fpr128 = SUBREG_TO_REG 0, [[UQSHLv8i8_shift]], %subreg.dsub
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[UQSHLv8i8_shift]], %subreg.dsub
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[UQSHLv8i8_shift]], %subreg.dsub
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:fpr128 = MOVIv2d_ns 0
+ %1:fpr64 = COPY %0.dsub:fpr128
+ %2:fpr64 = UQSHLv8i8_shift killed %1:fpr64, 1
+ %3:fpr64 = FMOVDr %2:fpr64
+ %4:fpr128 = SUBREG_TO_REG 0, %3:fpr64, %subreg.dsub
+ %7:fpr128 = IMPLICIT_DEF
+ %6:fpr128 = INSERT_SUBREG %7:fpr128, killed %2:fpr64, %subreg.dsub
+ %9:fpr128 = IMPLICIT_DEF
+ %8:fpr128 = INSERT_SUBREG %9:fpr128, killed %3:fpr64, %subreg.dsub
+ RET_ReallyLR implicit $w0
+...
+
diff --git a/llvm/test/CodeGen/AArch64/pr86717.ll b/llvm/test/CodeGen/AArch64/pr86717.ll
new file mode 100644
index 0000000..aa8be95
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/pr86717.ll
@@ -0,0 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
+
+define <16 x i8> @f(i32 %0) {
+; CHECK-LABEL: f:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: movi v0.2d, #0000000000000000
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: mov x9, sp
+; CHECK-NEXT: sub w8, w8, w0
+; CHECK-NEXT: bfxil x9, x8, #0, #4
+; CHECK-NEXT: mov w8, #3 // =0x3
+; CHECK-NEXT: str q0, [sp]
+; CHECK-NEXT: strb w8, [x9]
+; CHECK-NEXT: ldr q0, [sp], #16
+; CHECK-NEXT: ret
+ %2 = sub nuw i32 1, %0
+ %3 = insertelement <16 x i8> zeroinitializer, i8 3, i32 %2
+ ret <16 x i8> %3
+}
diff --git a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
index 932b230..934ff44 100644
--- a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
+++ b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
@@ -147,10 +147,10 @@ define dso_local void @run_test() local_unnamed_addr uwtable {
; CHECK-NEXT: mov v19.16b, v23.16b
; CHECK-NEXT: mov v3.d[1], x20
; CHECK-NEXT: mov v23.16b, v27.16b
-; CHECK-NEXT: mov v27.16b, v9.16b
-; CHECK-NEXT: mul x15, x4, x5
; CHECK-NEXT: add v27.2d, v9.2d, v1.2d
+; CHECK-NEXT: mul x15, x4, x5
; CHECK-NEXT: str q11, [sp, #80] // 16-byte Folded Spill
+; CHECK-NEXT: mov v11.16b, v15.16b
; CHECK-NEXT: mov v4.d[1], x22
; CHECK-NEXT: add v19.2d, v19.2d, v1.2d
; CHECK-NEXT: add v7.2d, v7.2d, v1.2d
@@ -171,9 +171,7 @@ define dso_local void @run_test() local_unnamed_addr uwtable {
; CHECK-NEXT: mov v10.16b, v26.16b
; CHECK-NEXT: mov v14.d[1], x13
; CHECK-NEXT: mov v22.16b, v31.16b
-; CHECK-NEXT: mov v20.16b, v8.16b
; CHECK-NEXT: ldp q26, q31, [sp] // 32-byte Folded Reload
-; CHECK-NEXT: mov v11.16b, v15.16b
; CHECK-NEXT: mov v0.d[1], x12
; CHECK-NEXT: add v13.2d, v13.2d, v14.2d
; CHECK-NEXT: add v31.2d, v31.2d, v14.2d
diff --git a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
index 5f905d9..6f1ae02 100644
--- a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
@@ -145,7 +145,7 @@ define void @v4i8(ptr %px, ptr %py, ptr %pz) nounwind {
; CHECK-NEXT: shl v0.4h, v0.4h, #8
; CHECK-NEXT: sqadd v0.4h, v0.4h, v1.4h
; CHECK-NEXT: sshr v0.4h, v0.4h, #8
-; CHECK-NEXT: xtn v0.8b, v0.8h
+; CHECK-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-NEXT: str s0, [x2]
; CHECK-NEXT: ret
%x = load <4 x i8>, ptr %px
diff --git a/llvm/test/CodeGen/AArch64/setcc_knownbits.ll b/llvm/test/CodeGen/AArch64/setcc_knownbits.ll
index bb9546a..8be63b0 100644
--- a/llvm/test/CodeGen/AArch64/setcc_knownbits.ll
+++ b/llvm/test/CodeGen/AArch64/setcc_knownbits.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=aarch64 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define i1 @load_bv_v4i8(i1 zeroext %a) {
; CHECK-LABEL: load_bv_v4i8:
@@ -11,18 +12,31 @@ define i1 @load_bv_v4i8(i1 zeroext %a) {
}
define noundef i1 @logger(i32 noundef %logLevel, ptr %ea, ptr %pll) {
-; CHECK-LABEL: logger:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr w8, [x2]
-; CHECK-NEXT: cmp w8, w0
-; CHECK-NEXT: b.ls .LBB1_2
-; CHECK-NEXT: // %bb.1:
-; CHECK-NEXT: mov w0, wzr
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB1_2: // %land.rhs
-; CHECK-NEXT: ldr x8, [x1]
-; CHECK-NEXT: ldrb w0, [x8]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: logger:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: ldr w8, [x2]
+; CHECK-SD-NEXT: cmp w8, w0
+; CHECK-SD-NEXT: b.ls .LBB1_2
+; CHECK-SD-NEXT: // %bb.1:
+; CHECK-SD-NEXT: mov w0, wzr
+; CHECK-SD-NEXT: ret
+; CHECK-SD-NEXT: .LBB1_2: // %land.rhs
+; CHECK-SD-NEXT: ldr x8, [x1]
+; CHECK-SD-NEXT: ldrb w0, [x8]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: logger:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: ldr w8, [x2]
+; CHECK-GI-NEXT: cmp w8, w0
+; CHECK-GI-NEXT: mov w0, wzr
+; CHECK-GI-NEXT: b.hi .LBB1_2
+; CHECK-GI-NEXT: // %bb.1: // %land.rhs
+; CHECK-GI-NEXT: ldr x8, [x1]
+; CHECK-GI-NEXT: ldrb w8, [x8]
+; CHECK-GI-NEXT: and w0, w8, #0x1
+; CHECK-GI-NEXT: .LBB1_2: // %land.end
+; CHECK-GI-NEXT: ret
entry:
%0 = load i32, ptr %pll, align 4
%cmp.not = icmp ugt i32 %0, %logLevel
@@ -44,12 +58,18 @@ land.end: ; preds = %land.rhs, %entry
declare i64 @llvm.ctlz.i64(i64 %in, i1)
define i1 @lshr_ctlz_undef_cmpeq_one_i64(i64 %in) {
-; CHECK-LABEL: lshr_ctlz_undef_cmpeq_one_i64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: clz x8, x0
-; CHECK-NEXT: lsr x0, x8, #6
-; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: lshr_ctlz_undef_cmpeq_one_i64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: clz x8, x0
+; CHECK-SD-NEXT: lsr x0, x8, #6
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: lshr_ctlz_undef_cmpeq_one_i64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: clz x8, x0
+; CHECK-GI-NEXT: lsr w0, w8, #6
+; CHECK-GI-NEXT: ret
%ctlz = call i64 @llvm.ctlz.i64(i64 %in, i1 -1)
%lshr = lshr i64 %ctlz, 6
%icmp = icmp eq i64 %lshr, 1
@@ -57,17 +77,30 @@ define i1 @lshr_ctlz_undef_cmpeq_one_i64(i64 %in) {
}
define i32 @PR17487(i1 %tobool) {
-; CHECK-LABEL: PR17487:
-; CHECK: // %bb.0:
-; CHECK-NEXT: dup v0.2s, w0
-; CHECK-NEXT: mov w8, #1 // =0x1
-; CHECK-NEXT: dup v1.2d, x8
-; CHECK-NEXT: ushll v0.2d, v0.2s, #0
-; CHECK-NEXT: bic v0.16b, v1.16b, v0.16b
-; CHECK-NEXT: mov x8, v0.d[1]
-; CHECK-NEXT: cmp x8, #1
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: PR17487:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: dup v0.2s, w0
+; CHECK-SD-NEXT: mov w8, #1 // =0x1
+; CHECK-SD-NEXT: dup v1.2d, x8
+; CHECK-SD-NEXT: ushll v0.2d, v0.2s, #0
+; CHECK-SD-NEXT: bic v0.16b, v1.16b, v0.16b
+; CHECK-SD-NEXT: mov x8, v0.d[1]
+; CHECK-SD-NEXT: cmp x8, #1
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: PR17487:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-GI-NEXT: mov v0.d[1], x0
+; CHECK-GI-NEXT: adrp x8, .LCPI3_0
+; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI3_0]
+; CHECK-GI-NEXT: bic v0.16b, v1.16b, v0.16b
+; CHECK-GI-NEXT: mov d0, v0.d[1]
+; CHECK-GI-NEXT: fmov x8, d0
+; CHECK-GI-NEXT: cmp x8, #1
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
%tmp = insertelement <2 x i1> undef, i1 %tobool, i32 1
%tmp1 = zext <2 x i1> %tmp to <2 x i64>
%tmp2 = xor <2 x i64> %tmp1, <i64 1, i64 1>
diff --git a/llvm/test/CodeGen/AArch64/sext.ll b/llvm/test/CodeGen/AArch64/sext.ll
index 61f04fb..3e0d5dd8 100644
--- a/llvm/test/CodeGen/AArch64/sext.ll
+++ b/llvm/test/CodeGen/AArch64/sext.ll
@@ -280,13 +280,12 @@ define <3 x i64> @sext_v3i8_v3i64(<3 x i8> %a) {
;
; CHECK-GI-LABEL: sext_v3i8_v3i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
-; CHECK-GI-NEXT: fmov d0, x0
-; CHECK-GI-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-GI-NEXT: fmov s0, w0
; CHECK-GI-NEXT: // kill: def $w2 killed $w2 def $x2
; CHECK-GI-NEXT: sxtb x8, w2
; CHECK-GI-NEXT: fmov d2, x8
-; CHECK-GI-NEXT: mov v0.d[1], x1
+; CHECK-GI-NEXT: mov v0.s[1], w1
+; CHECK-GI-NEXT: ushll v0.2d, v0.2s, #0
; CHECK-GI-NEXT: shl v0.2d, v0.2d, #56
; CHECK-GI-NEXT: sshr v0.2d, v0.2d, #56
; CHECK-GI-NEXT: mov d1, v0.d[1]
@@ -444,13 +443,12 @@ define <3 x i64> @sext_v3i10_v3i64(<3 x i10> %a) {
;
; CHECK-GI-LABEL: sext_v3i10_v3i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
-; CHECK-GI-NEXT: fmov d0, x0
-; CHECK-GI-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-GI-NEXT: fmov s0, w0
; CHECK-GI-NEXT: // kill: def $w2 killed $w2 def $x2
; CHECK-GI-NEXT: sbfx x8, x2, #0, #10
; CHECK-GI-NEXT: fmov d2, x8
-; CHECK-GI-NEXT: mov v0.d[1], x1
+; CHECK-GI-NEXT: mov v0.s[1], w1
+; CHECK-GI-NEXT: ushll v0.2d, v0.2s, #0
; CHECK-GI-NEXT: shl v0.2d, v0.2d, #54
; CHECK-GI-NEXT: sshr v0.2d, v0.2d, #54
; CHECK-GI-NEXT: mov d1, v0.d[1]
diff --git a/llvm/test/CodeGen/AArch64/shift.ll b/llvm/test/CodeGen/AArch64/shift.ll
index 5287839..9c8d3e0 100644
--- a/llvm/test/CodeGen/AArch64/shift.ll
+++ b/llvm/test/CodeGen/AArch64/shift.ll
@@ -1,13 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
-
-; CHECK-GI: warning: Instruction selection used fallback path for shl_v4i8
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for shl_v2i16
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for ashr_v4i8
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for ashr_v2i16
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for lshr_v4i8
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for lshr_v2i16
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define i1 @shl_i1(i1 %0, i1 %1){
; CHECK-SD-LABEL: shl_i1:
@@ -530,11 +523,38 @@ define <2 x i64> @lshr_v2i64(<2 x i64> %0, <2 x i64> %1){
; ===== Vector Larger/Smaller than Legal =====
define <4 x i8> @shl_v4i8(<4 x i8> %0, <4 x i8> %1){
-; CHECK-LABEL: shl_v4i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: bic v1.4h, #255, lsl #8
-; CHECK-NEXT: ushl v0.4h, v0.4h, v1.4h
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: shl_v4i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: bic v1.4h, #255, lsl #8
+; CHECK-SD-NEXT: ushl v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: shl_v4i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NEXT: mov h4, v0.h[2]
+; CHECK-GI-NEXT: mov h5, v0.h[3]
+; CHECK-GI-NEXT: mov h6, v1.h[3]
+; CHECK-GI-NEXT: mov v0.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov h2, v1.h[2]
+; CHECK-GI-NEXT: mov v1.b[1], v3.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v4.b[0]
+; CHECK-GI-NEXT: mov v1.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v5.b[0]
+; CHECK-GI-NEXT: mov v1.b[3], v6.b[0]
+; CHECK-GI-NEXT: ushl v0.8b, v0.8b, v1.8b
+; CHECK-GI-NEXT: mov b1, v0.b[1]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b3, v0.b[3]
+; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v3.b[0]
+; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%3 = shl <4 x i8> %0, %1
ret <4 x i8> %3
}
@@ -556,12 +576,27 @@ define <32 x i8> @shl_v32i8(<32 x i8> %0, <32 x i8> %1){
}
define <2 x i16> @shl_v2i16(<2 x i16> %0, <2 x i16> %1){
-; CHECK-LABEL: shl_v2i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: movi d2, #0x00ffff0000ffff
-; CHECK-NEXT: and v1.8b, v1.8b, v2.8b
-; CHECK-NEXT: ushl v0.2s, v0.2s, v1.2s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: shl_v2i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi d2, #0x00ffff0000ffff
+; CHECK-SD-NEXT: and v1.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT: ushl v0.2s, v0.2s, v1.2s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: shl_v2i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov s2, v0.s[1]
+; CHECK-GI-NEXT: mov s3, v1.s[1]
+; CHECK-GI-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov v1.h[1], v3.h[0]
+; CHECK-GI-NEXT: ushl v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%3 = shl <2 x i16> %0, %1
ret <2 x i16> %3
}
@@ -633,14 +668,42 @@ define <4 x i64> @shl_v4i64(<4 x i64> %0, <4 x i64> %1){
}
define <4 x i8> @ashr_v4i8(<4 x i8> %0, <4 x i8> %1){
-; CHECK-LABEL: ashr_v4i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: shl v0.4h, v0.4h, #8
-; CHECK-NEXT: bic v1.4h, #255, lsl #8
-; CHECK-NEXT: sshr v0.4h, v0.4h, #8
-; CHECK-NEXT: neg v1.4h, v1.4h
-; CHECK-NEXT: sshl v0.4h, v0.4h, v1.4h
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: ashr_v4i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-SD-NEXT: bic v1.4h, #255, lsl #8
+; CHECK-SD-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-SD-NEXT: neg v1.4h, v1.4h
+; CHECK-SD-NEXT: sshl v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: ashr_v4i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov h2, v1.h[1]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov h3, v0.h[1]
+; CHECK-GI-NEXT: mov h4, v1.h[2]
+; CHECK-GI-NEXT: mov h5, v1.h[3]
+; CHECK-GI-NEXT: mov h6, v0.h[3]
+; CHECK-GI-NEXT: mov v1.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov h2, v0.h[2]
+; CHECK-GI-NEXT: mov v0.b[1], v3.b[0]
+; CHECK-GI-NEXT: mov v1.b[2], v4.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v1.b[3], v5.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v6.b[0]
+; CHECK-GI-NEXT: neg v1.8b, v1.8b
+; CHECK-GI-NEXT: sshl v0.8b, v0.8b, v1.8b
+; CHECK-GI-NEXT: mov b1, v0.b[1]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b3, v0.b[3]
+; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v3.b[0]
+; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%3 = ashr <4 x i8> %0, %1
ret <4 x i8> %3
}
@@ -658,15 +721,31 @@ define <32 x i8> @ashr_v32i8(<32 x i8> %0, <32 x i8> %1){
}
define <2 x i16> @ashr_v2i16(<2 x i16> %0, <2 x i16> %1){
-; CHECK-LABEL: ashr_v2i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: movi d2, #0x00ffff0000ffff
-; CHECK-NEXT: shl v0.2s, v0.2s, #16
-; CHECK-NEXT: sshr v0.2s, v0.2s, #16
-; CHECK-NEXT: and v1.8b, v1.8b, v2.8b
-; CHECK-NEXT: neg v1.2s, v1.2s
-; CHECK-NEXT: sshl v0.2s, v0.2s, v1.2s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: ashr_v2i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi d2, #0x00ffff0000ffff
+; CHECK-SD-NEXT: shl v0.2s, v0.2s, #16
+; CHECK-SD-NEXT: sshr v0.2s, v0.2s, #16
+; CHECK-SD-NEXT: and v1.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT: neg v1.2s, v1.2s
+; CHECK-SD-NEXT: sshl v0.2s, v0.2s, v1.2s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: ashr_v2i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov s2, v1.s[1]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov s3, v0.s[1]
+; CHECK-GI-NEXT: mov v1.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov v0.h[1], v3.h[0]
+; CHECK-GI-NEXT: neg v1.4h, v1.4h
+; CHECK-GI-NEXT: sshl v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%3 = ashr <2 x i16> %0, %1
ret <2 x i16> %3
}
@@ -727,13 +806,41 @@ define <4 x i64> @ashr_v4i64(<4 x i64> %0, <4 x i64> %1){
}
define <4 x i8> @lshr_v4i8(<4 x i8> %0, <4 x i8> %1){
-; CHECK-LABEL: lshr_v4i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: bic v1.4h, #255, lsl #8
-; CHECK-NEXT: bic v0.4h, #255, lsl #8
-; CHECK-NEXT: neg v1.4h, v1.4h
-; CHECK-NEXT: ushl v0.4h, v0.4h, v1.4h
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: lshr_v4i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: bic v1.4h, #255, lsl #8
+; CHECK-SD-NEXT: bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT: neg v1.4h, v1.4h
+; CHECK-SD-NEXT: ushl v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: lshr_v4i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov h2, v1.h[1]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov h3, v0.h[1]
+; CHECK-GI-NEXT: mov h4, v1.h[2]
+; CHECK-GI-NEXT: mov h5, v1.h[3]
+; CHECK-GI-NEXT: mov h6, v0.h[3]
+; CHECK-GI-NEXT: mov v1.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov h2, v0.h[2]
+; CHECK-GI-NEXT: mov v0.b[1], v3.b[0]
+; CHECK-GI-NEXT: mov v1.b[2], v4.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v1.b[3], v5.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v6.b[0]
+; CHECK-GI-NEXT: neg v1.8b, v1.8b
+; CHECK-GI-NEXT: ushl v0.8b, v0.8b, v1.8b
+; CHECK-GI-NEXT: mov b1, v0.b[1]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b3, v0.b[3]
+; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v3.b[0]
+; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%3 = lshr <4 x i8> %0, %1
ret <4 x i8> %3
}
@@ -751,14 +858,30 @@ define <32 x i8> @lshr_v32i8(<32 x i8> %0, <32 x i8> %1){
}
define <2 x i16> @lshr_v2i16(<2 x i16> %0, <2 x i16> %1){
-; CHECK-LABEL: lshr_v2i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: movi d2, #0x00ffff0000ffff
-; CHECK-NEXT: and v1.8b, v1.8b, v2.8b
-; CHECK-NEXT: and v0.8b, v0.8b, v2.8b
-; CHECK-NEXT: neg v1.2s, v1.2s
-; CHECK-NEXT: ushl v0.2s, v0.2s, v1.2s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: lshr_v2i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi d2, #0x00ffff0000ffff
+; CHECK-SD-NEXT: and v1.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT: and v0.8b, v0.8b, v2.8b
+; CHECK-SD-NEXT: neg v1.2s, v1.2s
+; CHECK-SD-NEXT: ushl v0.2s, v0.2s, v1.2s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: lshr_v2i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov s2, v1.s[1]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov s3, v0.s[1]
+; CHECK-GI-NEXT: mov v1.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov v0.h[1], v3.h[0]
+; CHECK-GI-NEXT: neg v1.4h, v1.4h
+; CHECK-GI-NEXT: ushl v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%3 = lshr <2 x i16> %0, %1
ret <2 x i16> %3
}
diff --git a/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll b/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll
index 0ef6478..fb571ef 100644
--- a/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll
+++ b/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll
@@ -353,13 +353,17 @@ define <8 x i8> @shuffle4_v8i8_v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x
define <8 x i16> @shuffle4_v4i8_zext(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) {
; CHECK-LABEL: shuffle4_v4i8_zext:
; CHECK: // %bb.0:
-; CHECK-NEXT: uzp1 v0.8b, v0.8b, v1.8b
-; CHECK-NEXT: uzp1 v1.8b, v2.8b, v3.8b
+; CHECK-NEXT: fmov d5, d2
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: // kill: def $d3 killed $d3 def $q3
; CHECK-NEXT: adrp x8, .LCPI8_0
-; CHECK-NEXT: ushll v2.8h, v0.8b, #0
+; CHECK-NEXT: fmov d4, d0
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI8_0]
-; CHECK-NEXT: ushll v3.8h, v1.8b, #0
-; CHECK-NEXT: tbl v0.16b, { v2.16b, v3.16b }, v0.16b
+; CHECK-NEXT: mov v4.d[1], v1.d[0]
+; CHECK-NEXT: mov v5.d[1], v3.d[0]
+; CHECK-NEXT: bic v4.8h, #255, lsl #8
+; CHECK-NEXT: bic v5.8h, #255, lsl #8
+; CHECK-NEXT: tbl v0.16b, { v4.16b, v5.16b }, v0.16b
; CHECK-NEXT: ret
%x = shufflevector <4 x i8> %a, <4 x i8> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%y = shufflevector <4 x i8> %c, <4 x i8> %d, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
diff --git a/llvm/test/CodeGen/AArch64/shufflevector.ll b/llvm/test/CodeGen/AArch64/shufflevector.ll
index d79f3ae..b1131f2 100644
--- a/llvm/test/CodeGen/AArch64/shufflevector.ll
+++ b/llvm/test/CodeGen/AArch64/shufflevector.ll
@@ -202,7 +202,7 @@ define i32 @shufflevector_v4i8(<4 x i8> %a, <4 x i8> %b){
; CHECK-SD-NEXT: ext v0.8b, v1.8b, v0.8b, #6
; CHECK-SD-NEXT: zip1 v1.4h, v1.4h, v0.4h
; CHECK-SD-NEXT: ext v0.8b, v0.8b, v1.8b, #4
-; CHECK-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-SD-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-SD-NEXT: fmov w0, s0
; CHECK-SD-NEXT: add sp, sp, #16
; CHECK-SD-NEXT: ret
@@ -390,7 +390,7 @@ define i32 @shufflevector_v4i8_zeroes(<4 x i8> %a, <4 x i8> %b){
; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-SD-NEXT: dup v0.4h, v0.h[0]
-; CHECK-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-SD-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-SD-NEXT: fmov w0, s0
; CHECK-SD-NEXT: add sp, sp, #16
; CHECK-SD-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/sme-avoid-coalescing-locally-streaming.ll b/llvm/test/CodeGen/AArch64/sme-avoid-coalescing-locally-streaming.ll
new file mode 100644
index 0000000..cd5046a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sme-avoid-coalescing-locally-streaming.ll
@@ -0,0 +1,120 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mattr=+sme -stop-after=finalize-isel < %s | FileCheck %s --check-prefix=CHECK-COALESCER-BARRIER
+; RUN: llc -mattr=+sme -stop-after=virtregrewriter < %s | FileCheck %s --check-prefix=CHECK-REGALLOC
+
+target triple = "aarch64"
+
+define void @dont_coalesce_args(<2 x i64> %a) "aarch64_pstate_sm_body" nounwind {
+ ; CHECK-COALESCER-BARRIER-LABEL: name: dont_coalesce_args
+ ; CHECK-COALESCER-BARRIER: bb.0 (%ir-block.0):
+ ; CHECK-COALESCER-BARRIER-NEXT: liveins: $q0
+ ; CHECK-COALESCER-BARRIER-NEXT: {{ $}}
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COALESCER_BARRIER_FPR128_:%[0-9]+]]:fpr128 = COALESCER_BARRIER_FPR128 [[COPY]]
+ ; CHECK-COALESCER-BARRIER-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-COALESCER-BARRIER-NEXT: [[DEF:%[0-9]+]]:zpr = IMPLICIT_DEF
+ ; CHECK-COALESCER-BARRIER-NEXT: [[INSERT_SUBREG:%[0-9]+]]:zpr = INSERT_SUBREG [[DEF]], [[COALESCER_BARRIER_FPR128_]], %subreg.zsub
+ ; CHECK-COALESCER-BARRIER-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: $z0 = COPY [[INSERT_SUBREG]]
+ ; CHECK-COALESCER-BARRIER-NEXT: BL @scalable_args, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit $z0, implicit-def $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-COALESCER-BARRIER-NEXT: RET_ReallyLR
+ ;
+ ; CHECK-REGALLOC-LABEL: name: dont_coalesce_args
+ ; CHECK-REGALLOC: bb.0 (%ir-block.0):
+ ; CHECK-REGALLOC-NEXT: liveins: $q0
+ ; CHECK-REGALLOC-NEXT: {{ $}}
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = COALESCER_BARRIER_FPR128 killed renamable $q0
+ ; CHECK-REGALLOC-NEXT: STRQui killed renamable $q0, %stack.0, 0 :: (store (s128) into %stack.0)
+ ; CHECK-REGALLOC-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = LDRQui %stack.0, 0 :: (load (s128) from %stack.0)
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = KILL killed renamable $q0, implicit-def $z0
+ ; CHECK-REGALLOC-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-REGALLOC-NEXT: BL @scalable_args, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit $z0, implicit-def $sp
+ ; CHECK-REGALLOC-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-REGALLOC-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-REGALLOC-NEXT: RET_ReallyLR
+ %sa = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> poison, <2 x i64> %a, i64 0)
+ call void @scalable_args(<vscale x 2 x i64> %sa)
+ ret void
+}
+
+define <2 x i64> @dont_coalesce_res() "aarch64_pstate_sm_body" nounwind {
+ ; CHECK-COALESCER-BARRIER-LABEL: name: dont_coalesce_res
+ ; CHECK-COALESCER-BARRIER: bb.0 (%ir-block.0):
+ ; CHECK-COALESCER-BARRIER-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-COALESCER-BARRIER-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: BL @scalable_res, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $z0
+ ; CHECK-COALESCER-BARRIER-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COPY:%[0-9]+]]:zpr = COPY $z0
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY [[COPY]].zsub
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COALESCER_BARRIER_FPR128_:%[0-9]+]]:fpr128 = COALESCER_BARRIER_FPR128 [[COPY1]]
+ ; CHECK-COALESCER-BARRIER-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit-def $q0, implicit $vg, implicit-def $vg
+ ; CHECK-COALESCER-BARRIER-NEXT: $q0 = COPY [[COALESCER_BARRIER_FPR128_]]
+ ; CHECK-COALESCER-BARRIER-NEXT: RET_ReallyLR implicit $q0
+ ;
+ ; CHECK-REGALLOC-LABEL: name: dont_coalesce_res
+ ; CHECK-REGALLOC: bb.0 (%ir-block.0):
+ ; CHECK-REGALLOC-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-REGALLOC-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-REGALLOC-NEXT: BL @scalable_res, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $z0
+ ; CHECK-REGALLOC-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = KILL renamable $q0, implicit killed $z0
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = COALESCER_BARRIER_FPR128 killed renamable $q0
+ ; CHECK-REGALLOC-NEXT: STRQui killed renamable $q0, %stack.0, 0 :: (store (s128) into %stack.0)
+ ; CHECK-REGALLOC-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit-def dead $q0, implicit $vg, implicit-def $vg
+ ; CHECK-REGALLOC-NEXT: $q0 = LDRQui %stack.0, 0 :: (load (s128) from %stack.0)
+ ; CHECK-REGALLOC-NEXT: RET_ReallyLR implicit $q0
+ %sa = call <vscale x 2 x i64> @scalable_res()
+ %res = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> %sa, i64 0)
+ ret <2 x i64> %res
+}
+
+define <2 x i64> @dont_coalesce_arg_that_is_also_res(<2 x i64> %a) "aarch64_pstate_sm_body" nounwind {
+ ; CHECK-COALESCER-BARRIER-LABEL: name: dont_coalesce_arg_that_is_also_res
+ ; CHECK-COALESCER-BARRIER: bb.0 (%ir-block.0):
+ ; CHECK-COALESCER-BARRIER-NEXT: liveins: $q0
+ ; CHECK-COALESCER-BARRIER-NEXT: {{ $}}
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COALESCER_BARRIER_FPR128_:%[0-9]+]]:fpr128 = COALESCER_BARRIER_FPR128 [[COPY]]
+ ; CHECK-COALESCER-BARRIER-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-COALESCER-BARRIER-NEXT: [[DEF:%[0-9]+]]:zpr = IMPLICIT_DEF
+ ; CHECK-COALESCER-BARRIER-NEXT: [[INSERT_SUBREG:%[0-9]+]]:zpr = INSERT_SUBREG [[DEF]], [[COALESCER_BARRIER_FPR128_]], %subreg.zsub
+ ; CHECK-COALESCER-BARRIER-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: $z0 = COPY [[INSERT_SUBREG]]
+ ; CHECK-COALESCER-BARRIER-NEXT: BL @scalable_args, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit $z0, implicit-def $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COALESCER_BARRIER_FPR128_1:%[0-9]+]]:fpr128 = COALESCER_BARRIER_FPR128 [[COALESCER_BARRIER_FPR128_]]
+ ; CHECK-COALESCER-BARRIER-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit-def $q0, implicit $vg, implicit-def $vg
+ ; CHECK-COALESCER-BARRIER-NEXT: $q0 = COPY [[COALESCER_BARRIER_FPR128_1]]
+ ; CHECK-COALESCER-BARRIER-NEXT: RET_ReallyLR implicit $q0
+ ;
+ ; CHECK-REGALLOC-LABEL: name: dont_coalesce_arg_that_is_also_res
+ ; CHECK-REGALLOC: bb.0 (%ir-block.0):
+ ; CHECK-REGALLOC-NEXT: liveins: $q0
+ ; CHECK-REGALLOC-NEXT: {{ $}}
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = COALESCER_BARRIER_FPR128 killed renamable $q0
+ ; CHECK-REGALLOC-NEXT: STRQui killed renamable $q0, %stack.0, 0 :: (store (s128) into %stack.0)
+ ; CHECK-REGALLOC-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = LDRQui %stack.0, 0 :: (load (s128) from %stack.0)
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = KILL killed renamable $q0, implicit-def $z0
+ ; CHECK-REGALLOC-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-REGALLOC-NEXT: BL @scalable_args, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit $z0, implicit-def $sp
+ ; CHECK-REGALLOC-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = LDRQui %stack.0, 0 :: (load (s128) from %stack.0)
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = COALESCER_BARRIER_FPR128 killed renamable $q0
+ ; CHECK-REGALLOC-NEXT: STRQui killed renamable $q0, %stack.0, 0 :: (store (s128) into %stack.0)
+ ; CHECK-REGALLOC-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit-def dead $q0, implicit $vg, implicit-def $vg
+ ; CHECK-REGALLOC-NEXT: $q0 = LDRQui %stack.0, 0 :: (load (s128) from %stack.0)
+ ; CHECK-REGALLOC-NEXT: RET_ReallyLR implicit $q0
+ %sa = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> poison, <2 x i64> %a, i64 0)
+ call void @scalable_args(<vscale x 2 x i64> %sa)
+ ret <2 x i64> %a
+}
+
+declare void @scalable_args(<vscale x 2 x i64>) "aarch64_pstate_sm_enabled"
+declare <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64>, <2 x i64>, i64)
+
+declare <vscale x 2 x i64> @scalable_res() "aarch64_pstate_sm_enabled"
+declare <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64>, i64)
diff --git a/llvm/test/CodeGen/AArch64/sme-call-streaming-compatible-to-normal-fn-wihout-sme-attr.ll b/llvm/test/CodeGen/AArch64/sme-call-streaming-compatible-to-normal-fn-wihout-sme-attr.ll
index 3fa1ee5..dba3227 100644
--- a/llvm/test/CodeGen/AArch64/sme-call-streaming-compatible-to-normal-fn-wihout-sme-attr.ll
+++ b/llvm/test/CodeGen/AArch64/sme-call-streaming-compatible-to-normal-fn-wihout-sme-attr.ll
@@ -38,4 +38,43 @@ define void @streaming_compatible() #0 {
declare void @non_streaming()
+
+; Verify that COALESCER_BARRIER is also supported without +sme.
+
+define void @streaming_compatible_arg(float %f) #0 {
+; CHECK-LABEL: streaming_compatible_arg:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub sp, sp, #96
+; CHECK-NEXT: stp d15, d14, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: stp d13, d12, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: stp d11, d10, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
+; CHECK-NEXT: str s0, [sp, #12] // 4-byte Folded Spill
+; CHECK-NEXT: bl __arm_sme_state
+; CHECK-NEXT: ldr s0, [sp, #12] // 4-byte Folded Reload
+; CHECK-NEXT: and x19, x0, #0x1
+; CHECK-NEXT: str s0, [sp, #12] // 4-byte Folded Spill
+; CHECK-NEXT: tbz w19, #0, .LBB1_2
+; CHECK-NEXT: // %bb.1:
+; CHECK-NEXT: smstop sm
+; CHECK-NEXT: .LBB1_2:
+; CHECK-NEXT: ldr s0, [sp, #12] // 4-byte Folded Reload
+; CHECK-NEXT: bl non_streaming
+; CHECK-NEXT: tbz w19, #0, .LBB1_4
+; CHECK-NEXT: // %bb.3:
+; CHECK-NEXT: smstart sm
+; CHECK-NEXT: .LBB1_4:
+; CHECK-NEXT: ldp x30, x19, [sp, #80] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d9, d8, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d11, d10, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d13, d12, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d15, d14, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #96
+; CHECK-NEXT: ret
+ call void @non_streaming(float %f)
+ ret void
+}
+
+
attributes #0 = { nounwind "aarch64_pstate_sm_compatible" }
diff --git a/llvm/test/CodeGen/AArch64/sme-machine-licm-vg.mir b/llvm/test/CodeGen/AArch64/sme-machine-licm-vg.mir
new file mode 100644
index 0000000..e6cce9a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sme-machine-licm-vg.mir
@@ -0,0 +1,64 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=aarch64--linux-gnu -run-pass=early-machinelicm %s -verify-machineinstrs -o - | FileCheck %s
+---
+name: test_should_hoist_pfalse
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: test_should_hoist_pfalse
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64all = COPY [[COPY1]]
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr64all = COPY [[COPY]]
+ ; CHECK-NEXT: [[PFALSE:%[0-9]+]]:ppr = PFALSE implicit $vg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:gpr64common = PHI [[COPY2]], %bb.0, %5, %bb.1
+ ; CHECK-NEXT: [[PHI1:%[0-9]+]]:gpr64sp = PHI [[COPY3]], %bb.0, %7, %bb.1
+ ; CHECK-NEXT: STR_PXI [[PFALSE]], [[PHI]], 0
+ ; CHECK-NEXT: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[PHI1]], 1, 0, implicit-def $nzcv
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr64all = COPY [[SUBSXri]]
+ ; CHECK-NEXT: [[INCD_XPiI:%[0-9]+]]:gpr64 = INCD_XPiI [[PHI]], 31, 1
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gpr64all = COPY [[INCD_XPiI]]
+ ; CHECK-NEXT: Bcc 1, %bb.1, implicit $nzcv
+ ; CHECK-NEXT: B %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-NEXT: RET_ReallyLR
+ bb.0:
+ successors: %bb.1
+ liveins: $x0, $x1
+
+ %5:gpr64 = COPY $x1
+ %4:gpr64 = COPY $x0
+ MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ %6:gpr64all = COPY %4
+ %7:gpr64all = COPY %5
+
+ bb.1:
+ successors: %bb.2, %bb.1
+
+ %0:gpr64common = PHI %6, %bb.0, %3, %bb.1
+ %1:gpr64sp = PHI %7, %bb.0, %2, %bb.1
+ %8:ppr = PFALSE implicit $vg
+ STR_PXI killed %8, %0, 0
+ %9:gpr64 = SUBSXri %1, 1, 0, implicit-def $nzcv
+ %2:gpr64all = COPY %9
+ %10:gpr64 = INCD_XPiI %0, 31, 1
+ %3:gpr64all = COPY %10
+
+
+ Bcc 1, %bb.1, implicit $nzcv
+ B %bb.2
+
+ bb.2:
+ MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ RET_ReallyLR
+...
diff --git a/llvm/test/CodeGen/AArch64/sme-streaming-body-streaming-compatible-interface.ll b/llvm/test/CodeGen/AArch64/sme-streaming-body-streaming-compatible-interface.ll
index d675733..6e262cc 100644
--- a/llvm/test/CodeGen/AArch64/sme-streaming-body-streaming-compatible-interface.ll
+++ b/llvm/test/CodeGen/AArch64/sme-streaming-body-streaming-compatible-interface.ll
@@ -8,27 +8,31 @@ declare void @streaming_compatible_callee() "aarch64_pstate_sm_compatible";
define float @sm_body_sm_compatible_simple() "aarch64_pstate_sm_compatible" "aarch64_pstate_sm_body" nounwind {
; CHECK-LABEL: sm_body_sm_compatible_simple:
; CHECK: // %bb.0:
-; CHECK-NEXT: stp d15, d14, [sp, #-80]! // 16-byte Folded Spill
-; CHECK-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill
-; CHECK-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill
-; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #96
+; CHECK-NEXT: stp d15, d14, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: stp d13, d12, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: stp d11, d10, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill
; CHECK-NEXT: bl __arm_sme_state
; CHECK-NEXT: and x8, x0, #0x1
; CHECK-NEXT: tbnz w8, #0, .LBB0_2
; CHECK-NEXT: // %bb.1:
; CHECK-NEXT: smstart sm
; CHECK-NEXT: .LBB0_2:
+; CHECK-NEXT: fmov s0, wzr
+; CHECK-NEXT: str s0, [sp, #12] // 4-byte Folded Spill
; CHECK-NEXT: tbnz w8, #0, .LBB0_4
; CHECK-NEXT: // %bb.3:
; CHECK-NEXT: smstop sm
; CHECK-NEXT: .LBB0_4:
-; CHECK-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT: fmov s0, wzr
-; CHECK-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload
-; CHECK-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: ldp d15, d14, [sp], #80 // 16-byte Folded Reload
+; CHECK-NEXT: ldp d9, d8, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT: ldr s0, [sp, #12] // 4-byte Folded Reload
+; CHECK-NEXT: ldp d11, d10, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #80] // 8-byte Folded Reload
+; CHECK-NEXT: ldp d13, d12, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d15, d14, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #96
; CHECK-NEXT: ret
ret float zeroinitializer
}
diff --git a/llvm/test/CodeGen/AArch64/sme-streaming-body.ll b/llvm/test/CodeGen/AArch64/sme-streaming-body.ll
index 9387554..08dec22 100644
--- a/llvm/test/CodeGen/AArch64/sme-streaming-body.ll
+++ b/llvm/test/CodeGen/AArch64/sme-streaming-body.ll
@@ -87,29 +87,27 @@ if.end:
define <2 x i64> @locally_streaming_caller_no_callee(<2 x i64> %a) "aarch64_pstate_sm_body" nounwind {
; CHECK-LABEL: locally_streaming_caller_no_callee:
; CHECK: // %bb.0:
-; CHECK-NEXT: stp d15, d14, [sp, #-80]! // 16-byte Folded Spill
-; CHECK-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill
-; CHECK-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill
-; CHECK-NEXT: str x29, [sp, #64] // 8-byte Folded Spill
-; CHECK-NEXT: addsvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
-; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #80
+; CHECK-NEXT: stp d15, d14, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: stp d13, d12, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: stp d11, d10, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
; CHECK-NEXT: smstart sm
; CHECK-NEXT: index z0.d, #0, #1
-; CHECK-NEXT: ldr z1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
; CHECK-NEXT: add z0.d, z0.d, z1.d
; CHECK-NEXT: add z0.d, z0.d, #41 // =0x29
-; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: smstop sm
-; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
-; CHECK-NEXT: addsvl sp, sp, #1
-; CHECK-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT: ldr x29, [sp, #64] // 8-byte Folded Reload
-; CHECK-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: ldp d15, d14, [sp], #80 // 16-byte Folded Reload
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: smstop sm
+; CHECK-NEXT: ldp d9, d8, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d11, d10, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d13, d12, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d15, d14, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #80
; CHECK-NEXT: ret
%add = add <2 x i64> %a, <i64 41, i64 42>;
diff --git a/llvm/test/CodeGen/AArch64/sme-write-vg.ll b/llvm/test/CodeGen/AArch64/sme-write-vg.ll
new file mode 100644
index 0000000..577606d
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sme-write-vg.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mattr=+sme -stop-after=finalize-isel < %s | FileCheck %s
+
+target triple = "aarch64"
+
+; Check that we don't define VG for 'smstart za' and 'smstop za'
+define void @smstart_za() "aarch64_new_za" nounwind {
+ ; CHECK-LABEL: name: smstart_za
+ ; CHECK-NOT: implicit-def {{[^,]*}}$vg
+ ret void
+}
+
+; Check that we do define VG for 'smstart sm' and 'smstop sm'
+define void @smstart_sm() nounwind {
+ ; CHECK-LABEL: name: smstart_sm
+ ; CHECK: MSRpstatesvcrImm1 1, 1,
+ ; CHECK-SAME: implicit-def {{[^,]*}}$vg
+ ; CHECK: MSRpstatesvcrImm1 1, 0,
+ ; CHECK-SAME: implicit-def {{[^,]*}}$vg
+ call void @require_sm()
+ ret void
+}
+
+declare void @require_sm() "aarch64_pstate_sm_enabled"
+declare void @require_za() "aarch64_inout_za"
diff --git a/llvm/test/CodeGen/AArch64/soft-float-abi.ll b/llvm/test/CodeGen/AArch64/soft-float-abi.ll
new file mode 100644
index 0000000..291c387
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/soft-float-abi.ll
@@ -0,0 +1,161 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple aarch64-none-eabi < %s -mattr=-fp-armv8 | FileCheck %s
+
+; See also clang/test/CodeGen/aarch64-soft-float-abi.c, which tests the clang
+; parts of the soft-float ABI.
+
+; FP types up to 64-bit are passed in a general purpose register.
+define half @test0(half %a, half %b) {
+; CHECK-LABEL: test0:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, w1
+; CHECK-NEXT: ret
+entry:
+ ret half %b
+}
+
+define bfloat @test1(i32 %a, bfloat %b) {
+; CHECK-LABEL: test1:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, w1
+; CHECK-NEXT: ret
+entry:
+ ret bfloat %b
+}
+
+define float @test2(i64 %a, float %b) {
+; CHECK-LABEL: test2:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, w1
+; CHECK-NEXT: ret
+entry:
+ ret float %b
+}
+
+define double @test3(half %a, double %b) {
+; CHECK-LABEL: test3:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov x0, x1
+; CHECK-NEXT: ret
+entry:
+ ret double %b
+}
+
+; fp128 is passed in a pair of GPRs.
+define fp128 @test4(fp128 %a, fp128 %b) {
+; CHECK-LABEL: test4:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov x1, x3
+; CHECK-NEXT: mov x0, x2
+; CHECK-NEXT: ret
+entry:
+ ret fp128 %b
+}
+
+; fp128 is passed in an aligned pair of GPRs, leaving one register unused is
+; necessary.
+define fp128 @test5(float %a, fp128 %b) {
+; CHECK-LABEL: test5:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov x1, x3
+; CHECK-NEXT: mov x0, x2
+; CHECK-NEXT: ret
+entry:
+ ret fp128 %b
+}
+
+; If the alignment of an fp128 leaves a register unused, it remains unused even
+; if a later argument could fit in it.
+define i64 @test6(i64 %a, fp128 %b, i64 %c) {
+; CHECK-LABEL: test6:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov x0, x4
+; CHECK-NEXT: ret
+entry:
+ ret i64 %c
+}
+
+; HFAs are all bit-casted to integer types in the frontend when using the
+; soft-float ABI, so they get passed in the same way as non-homeogeneous
+; aggregates. The IR is identical to the equivalent integer types, so nothing
+; to test here.
+
+; The PCS for vector and HVA types is not defined by the soft-float ABI because
+; these types are only defined by the ACLE when vector hardware is available,
+; so nothing to test here.
+
+; The front-end generates IR for va_arg which always reads from the integer
+; register save area, and never the floating-point register save area. The
+; layout of the va_list type remains the same, the floating-point related
+; fields are unused. The only change needed in the backend is in va_start, to
+; not attempt to save the floating-point registers or set the FP fields in the
+; va_list.
+%struct.__va_list = type { ptr, ptr, ptr, i32, i32 }
+declare void @llvm.va_start(ptr)
+define double @test20(i32 %a, ...) {
+; CHECK-LABEL: test20:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #96
+; CHECK-NEXT: .cfi_def_cfa_offset 96
+; CHECK-NEXT: mov w8, #-56 // =0xffffffc8
+; CHECK-NEXT: add x10, sp, #8
+; CHECK-NEXT: add x9, sp, #96
+; CHECK-NEXT: str x8, [sp, #88]
+; CHECK-NEXT: add x10, x10, #56
+; CHECK-NEXT: ldrsw x8, [sp, #88]
+; CHECK-NEXT: stp x1, x2, [sp, #8]
+; CHECK-NEXT: stp x3, x4, [sp, #24]
+; CHECK-NEXT: stp x5, x6, [sp, #40]
+; CHECK-NEXT: stp x7, x9, [sp, #56]
+; CHECK-NEXT: str x10, [sp, #72]
+; CHECK-NEXT: tbz w8, #31, .LBB7_3
+; CHECK-NEXT: // %bb.1: // %vaarg.maybe_reg
+; CHECK-NEXT: add w9, w8, #8
+; CHECK-NEXT: cmn w8, #8
+; CHECK-NEXT: str w9, [sp, #88]
+; CHECK-NEXT: b.gt .LBB7_3
+; CHECK-NEXT: // %bb.2: // %vaarg.in_reg
+; CHECK-NEXT: ldr x9, [sp, #72]
+; CHECK-NEXT: add x8, x9, x8
+; CHECK-NEXT: b .LBB7_4
+; CHECK-NEXT: .LBB7_3: // %vaarg.on_stack
+; CHECK-NEXT: ldr x8, [sp, #64]
+; CHECK-NEXT: add x9, x8, #8
+; CHECK-NEXT: str x9, [sp, #64]
+; CHECK-NEXT: .LBB7_4: // %vaarg.end
+; CHECK-NEXT: ldr x0, [x8]
+; CHECK-NEXT: add sp, sp, #96
+; CHECK-NEXT: ret
+entry:
+ %vl = alloca %struct.__va_list, align 8
+ call void @llvm.va_start(ptr nonnull %vl)
+ %gr_offs_p = getelementptr inbounds %struct.__va_list, ptr %vl, i64 0, i32 3
+ %gr_offs = load i32, ptr %gr_offs_p, align 8
+ %0 = icmp sgt i32 %gr_offs, -1
+ br i1 %0, label %vaarg.on_stack, label %vaarg.maybe_reg
+
+vaarg.maybe_reg: ; preds = %entry
+ %new_reg_offs = add nsw i32 %gr_offs, 8
+ store i32 %new_reg_offs, ptr %gr_offs_p, align 8
+ %inreg = icmp slt i32 %gr_offs, -7
+ br i1 %inreg, label %vaarg.in_reg, label %vaarg.on_stack
+
+vaarg.in_reg: ; preds = %vaarg.maybe_reg
+ %reg_top_p = getelementptr inbounds %struct.__va_list, ptr %vl, i64 0, i32 1
+ %reg_top = load ptr, ptr %reg_top_p, align 8
+ %1 = sext i32 %gr_offs to i64
+ %2 = getelementptr inbounds i8, ptr %reg_top, i64 %1
+ br label %vaarg.end
+
+vaarg.on_stack: ; preds = %vaarg.maybe_reg, %entry
+ %stack = load ptr, ptr %vl, align 8
+ %new_stack = getelementptr inbounds i8, ptr %stack, i64 8
+ store ptr %new_stack, ptr %vl, align 8
+ br label %vaarg.end
+
+vaarg.end: ; preds = %vaarg.on_stack, %vaarg.in_reg
+ %vaargs.addr = phi ptr [ %2, %vaarg.in_reg ], [ %stack, %vaarg.on_stack ]
+ %3 = load double, ptr %vaargs.addr, align 8
+ ret double %3
+}
+
diff --git a/llvm/test/CodeGen/AArch64/srem-vec-crash.ll b/llvm/test/CodeGen/AArch64/srem-vec-crash.ll
new file mode 100644
index 0000000..0fce8de
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/srem-vec-crash.ll
@@ -0,0 +1,15 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64-unknown-unknown < %s | FileCheck %s
+
+define i32 @pr84830(i1 %arg) {
+; CHECK-LABEL: pr84830:
+; CHECK: // %bb.0: // %bb
+; CHECK-NEXT: mov w0, #1 // =0x1
+; CHECK-NEXT: ret
+bb:
+ %new0 = srem i1 %arg, true
+ %last = zext i1 %new0 to i32
+ %i = icmp ne i32 %last, 0
+ %i1 = select i1 %i, i32 0, i32 1
+ ret i32 %i1
+}
diff --git a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
index acec3e7..d1f843a 100644
--- a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
@@ -146,7 +146,7 @@ define void @v4i8(ptr %px, ptr %py, ptr %pz) nounwind {
; CHECK-NEXT: shl v0.4h, v0.4h, #8
; CHECK-NEXT: sqsub v0.4h, v0.4h, v1.4h
; CHECK-NEXT: sshr v0.4h, v0.4h, #8
-; CHECK-NEXT: xtn v0.8b, v0.8h
+; CHECK-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-NEXT: str s0, [x2]
; CHECK-NEXT: ret
%x = load <4 x i8>, ptr %px
diff --git a/llvm/test/CodeGen/AArch64/stack-probing-no-scratch-reg.mir b/llvm/test/CodeGen/AArch64/stack-probing-no-scratch-reg.mir
index f2d79bd..a9c9b5f 100644
--- a/llvm/test/CodeGen/AArch64/stack-probing-no-scratch-reg.mir
+++ b/llvm/test/CodeGen/AArch64/stack-probing-no-scratch-reg.mir
@@ -29,6 +29,7 @@ tracksRegLiveness: true
liveins:
- { reg: '$w0', virtual-reg: '' }
frameInfo:
+ adjustsStack: true
localFrameSize: 150000
stack:
- { id: 0, name: a, type: default, offset: 0, size: 150000, alignment: 8,
diff --git a/llvm/test/CodeGen/AArch64/stack-probing-shrink-wrap.mir b/llvm/test/CodeGen/AArch64/stack-probing-shrink-wrap.mir
index 83aa90d..985ec35 100644
--- a/llvm/test/CodeGen/AArch64/stack-probing-shrink-wrap.mir
+++ b/llvm/test/CodeGen/AArch64/stack-probing-shrink-wrap.mir
@@ -31,6 +31,7 @@ tracksRegLiveness: true
liveins:
- { reg: '$w0', virtual-reg: '' }
frameInfo:
+ adjustsStack: true
localFrameSize: 150000
stack:
- { id: 0, name: a, type: default, offset: 0, size: 150000, alignment: 8,
diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll b/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll
index d8969fc..22d177c 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll
@@ -20,10 +20,10 @@ entry:
; CHECK-LABEL: define void @OneVarNoInit(
; CHECK-DAG: [[X:%.*]] = alloca { i32, [12 x i8] }, align 16
; CHECK-DAG: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.{{.*}}(ptr [[X]], {{.*}}, i64 0)
-; CHECK-DAG: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[TX]])
+; CHECK-DAG: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]])
; CHECK-DAG: call void @llvm.aarch64.settag(ptr [[TX]], i64 16)
; CHECK-DAG: call void @use(ptr nonnull [[TX]])
-; CHECK-DAG: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[TX]])
+; CHECK-DAG: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]])
define void @OneVarInitConst() sanitize_memtag {
entry:
diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll b/llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll
index 6eb7201..5d1c91e 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll
@@ -1,20 +1,18 @@
; Test that storage for allocas with disjoint lifetimes is reused with stack
; tagging.
-; RUN: opt -S -aarch64-stack-tagging %s -o - | \
-; RUN: llc -no-stack-coloring=false -o - | \
+; RUN: llc --mattr=+mte -no-stack-coloring=false -stack-tagging-use-stack-safety=0 -o - %s | \
; RUN: FileCheck %s --check-prefix=COLOR
-; RUN: opt -S -aarch64-stack-tagging %s -o - | \
-; RUN: llc -no-stack-coloring=true -o - | \
+; RUN: llc --mattr=+mte -no-stack-coloring=true -stack-tagging-use-stack-safety=0 -o - %s | \
; RUN: FileCheck %s --check-prefix=NOCOLOR
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
-target triple = "aarch64-unknown-linux-android29"
+target triple = "aarch64"
; COLOR: sub sp, sp, #192
-; NOCOLOR: sub sp, sp, #320
+; NOCOLOR: sub sp, sp, #336
-define i32 @myCall_w2(i32 %in) sanitize_hwaddress {
+define i32 @myCall_w2(i32 %in) sanitize_memtag {
entry:
%a = alloca [17 x ptr], align 8
%a2 = alloca [16 x ptr], align 8
diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll b/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll
index 06f8cd5..aa9cccc 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll
@@ -27,7 +27,7 @@ S1:
; CHECK: call void @llvm.aarch64.settag(ptr %w, i64 48)
; CHECK-NOT: settag{{.*}}%v
call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %w) #1
-; CHECK: call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %w.tag)
+; CHECK: call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %w)
%b1 = icmp eq i32 %t1, 0
br i1 %b1, label %S2, label %S3
; CHECK-NOT: settag
diff --git a/llvm/test/CodeGen/AArch64/stackmap.ll b/llvm/test/CodeGen/AArch64/stackmap.ll
index ce7dcc4a..995d254 100644
--- a/llvm/test/CodeGen/AArch64/stackmap.ll
+++ b/llvm/test/CodeGen/AArch64/stackmap.ll
@@ -9,11 +9,11 @@
; CHECK-NEXT: .byte 0
; CHECK-NEXT: .hword 0
; Num Functions
-; CHECK-NEXT: .word 14
+; CHECK-NEXT: .word 15
; Num LargeConstants
; CHECK-NEXT: .word 4
; Num Callsites
-; CHECK-NEXT: .word 18
+; CHECK-NEXT: .word 22
; Functions and stack size
; CHECK-NEXT: .xword constantargs
@@ -49,6 +49,9 @@
; CHECK-NEXT: .xword longid
; CHECK-NEXT: .xword 16
; CHECK-NEXT: .xword 4
+; CHECK-NEXT: .xword statepoint_longid
+; CHECK-NEXT: .xword 16
+; CHECK-NEXT: .xword 4
; CHECK-NEXT: .xword clobberLR
; CHECK-NEXT: .xword 112
; CHECK-NEXT: .xword 1
@@ -443,6 +446,26 @@ entry:
ret void
}
+; Test a 64-bit ID for statepoint.
+;
+; CHECK: .xword 4294967295
+; CHECK-LABEL: .word .L{{.*}}-statepoint_longid
+; CHECK: .xword 4294967296
+; CHECK-LABEL: .word .L{{.*}}-statepoint_longid
+; CHECK: .xword 9223372036854775807
+; CHECK-LABEL: .word .L{{.*}}-statepoint_longid
+; CHECK: .xword -1
+; CHECK-LABEL: .word .L{{.*}}-statepoint_longid
+define void @statepoint_longid() gc "statepoint-example" {
+entry:
+ %safepoint_token1 = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 4294967295, i32 0, ptr elementtype(void ()) @return_void, i32 0, i32 0, i32 0, i32 0)
+ %safepoint_token2 = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 4294967296, i32 0, ptr elementtype(void ()) @return_void, i32 0, i32 0, i32 0, i32 0)
+ %safepoint_token3 = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 9223372036854775807, i32 0, ptr elementtype(void ()) @return_void, i32 0, i32 0, i32 0, i32 0)
+ %safepoint_token4 = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 -1, i32 0, ptr elementtype(void ()) @return_void, i32 0, i32 0, i32 0, i32 0)
+ ret void
+}
+declare void @return_void()
+
; Map a value when R11 is the only free register.
; The scratch register should not be used for a live stackmap value.
;
@@ -463,8 +486,8 @@ define void @clobberLR(i32 %a) {
ret void
}
-; A stack frame which needs to be realigned at runtime (to meet alignment
-; criteria for values on the stack) does not have a fixed frame size.
+; A stack frame which needs to be realigned at runtime (to meet alignment
+; criteria for values on the stack) does not have a fixed frame size.
; CHECK-LABEL: .word .L{{.*}}-needsStackRealignment
; CHECK-NEXT: .hword 0
; 0 locations
@@ -537,3 +560,4 @@ define void @floats(float %f, double %g) {
declare void @llvm.experimental.stackmap(i64, i32, ...)
declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
+declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...)
diff --git a/llvm/test/CodeGen/AArch64/strictfp_f16_abi_promote.ll b/llvm/test/CodeGen/AArch64/strictfp_f16_abi_promote.ll
index 9fa5208..3db802a 100644
--- a/llvm/test/CodeGen/AArch64/strictfp_f16_abi_promote.ll
+++ b/llvm/test/CodeGen/AArch64/strictfp_f16_abi_promote.ll
@@ -273,7 +273,7 @@ define void @outgoing_v4f16_return(ptr %ptr) #0 {
; NOFP16-NEXT: strh w0, [x19]
; NOFP16-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
; NOFP16-NEXT: ret
- %val = call <4 x half> @v4f16_result()
+ %val = call <4 x half> @v4f16_result() #0
store <4 x half> %val, ptr %ptr
ret void
}
@@ -297,7 +297,7 @@ define void @outgoing_v8f16_return(ptr %ptr) #0 {
; NOFP16-NEXT: strh w0, [x19]
; NOFP16-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
; NOFP16-NEXT: ret
- %val = call <8 x half> @v8f16_result()
+ %val = call <8 x half> @v8f16_result() #0
store <8 x half> %val, ptr %ptr
ret void
}
@@ -312,7 +312,7 @@ define half @call_split_type_used_outside_block_v8f16() #0 {
; NOFP16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; NOFP16-NEXT: ret
bb0:
- %split.ret.type = call <8 x half> @v8f16_result()
+ %split.ret.type = call <8 x half> @v8f16_result() #0
br label %bb1
bb1:
diff --git a/llvm/test/CodeGen/AArch64/tbl-loops.ll b/llvm/test/CodeGen/AArch64/tbl-loops.ll
index 4f8a4f7..0ad9900 100644
--- a/llvm/test/CodeGen/AArch64/tbl-loops.ll
+++ b/llvm/test/CodeGen/AArch64/tbl-loops.ll
@@ -41,8 +41,8 @@ define void @loop1(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
; CHECK-NEXT: fcvtzs v2.4s, v2.4s
; CHECK-NEXT: xtn v1.4h, v1.4s
; CHECK-NEXT: xtn v2.4h, v2.4s
-; CHECK-NEXT: xtn v1.8b, v1.8h
-; CHECK-NEXT: xtn v2.8b, v2.8h
+; CHECK-NEXT: uzp1 v1.8b, v1.8b, v0.8b
+; CHECK-NEXT: uzp1 v2.8b, v2.8b, v0.8b
; CHECK-NEXT: mov v1.s[1], v2.s[0]
; CHECK-NEXT: stur d1, [x12, #-4]
; CHECK-NEXT: add x12, x12, #8
diff --git a/llvm/test/CodeGen/AArch64/trunc-to-tbl.ll b/llvm/test/CodeGen/AArch64/trunc-to-tbl.ll
index ba367b0..18cd4cc 100644
--- a/llvm/test/CodeGen/AArch64/trunc-to-tbl.ll
+++ b/llvm/test/CodeGen/AArch64/trunc-to-tbl.ll
@@ -710,23 +710,23 @@ define void @trunc_v11i64_to_v11i8_in_loop(ptr %A, ptr %dst) {
; CHECK-NEXT: LBB6_1: ; %loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldp q4, q1, [x0, #48]
-; CHECK-NEXT: add x9, x1, #8
-; CHECK-NEXT: ldp q3, q2, [x0]
-; CHECK-NEXT: subs x8, x8, #1
+; CHECK-NEXT: add x9, x1, #10
; CHECK-NEXT: ldr d0, [x0, #80]
+; CHECK-NEXT: ldp q3, q2, [x0]
; CHECK-NEXT: ldr q5, [x0, #32]
+; CHECK-NEXT: subs x8, x8, #1
; CHECK-NEXT: add x0, x0, #128
-; CHECK-NEXT: uzp1.4s v4, v5, v4
-; CHECK-NEXT: uzp1.4s v2, v3, v2
; CHECK-NEXT: uzp1.4s v0, v1, v0
-; CHECK-NEXT: uzp1.8h v1, v2, v4
+; CHECK-NEXT: uzp1.4s v1, v5, v4
+; CHECK-NEXT: uzp1.4s v2, v3, v2
; CHECK-NEXT: xtn.4h v0, v0
-; CHECK-NEXT: uzp1.16b v1, v1, v0
-; CHECK-NEXT: xtn.8b v0, v0
-; CHECK-NEXT: st1.h { v1 }[4], [x9]
-; CHECK-NEXT: add x9, x1, #10
-; CHECK-NEXT: st1.b { v0 }[2], [x9]
-; CHECK-NEXT: str d1, [x1], #16
+; CHECK-NEXT: uzp1.8h v1, v2, v1
+; CHECK-NEXT: uzp1.8b v2, v0, v0
+; CHECK-NEXT: uzp1.16b v0, v1, v0
+; CHECK-NEXT: st1.b { v2 }[2], [x9]
+; CHECK-NEXT: add x9, x1, #8
+; CHECK-NEXT: st1.h { v0 }[4], [x9]
+; CHECK-NEXT: str d0, [x1], #16
; CHECK-NEXT: b.eq LBB6_1
; CHECK-NEXT: ; %bb.2: ; %exit
; CHECK-NEXT: ret
@@ -755,7 +755,7 @@ define void @trunc_v11i64_to_v11i8_in_loop(ptr %A, ptr %dst) {
; CHECK-BE-NEXT: xtn v0.4h, v0.4s
; CHECK-BE-NEXT: uzp1 v1.8h, v1.8h, v2.8h
; CHECK-BE-NEXT: uzp1 v1.16b, v1.16b, v0.16b
-; CHECK-BE-NEXT: xtn v0.8b, v0.8h
+; CHECK-BE-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-BE-NEXT: rev16 v2.16b, v1.16b
; CHECK-BE-NEXT: rev64 v1.16b, v1.16b
; CHECK-BE-NEXT: st1 { v0.b }[2], [x9]
@@ -790,7 +790,7 @@ define void @trunc_v11i64_to_v11i8_in_loop(ptr %A, ptr %dst) {
; CHECK-DISABLE-NEXT: xtn v0.4h, v0.4s
; CHECK-DISABLE-NEXT: uzp1 v1.8h, v1.8h, v2.8h
; CHECK-DISABLE-NEXT: uzp1 v1.16b, v1.16b, v0.16b
-; CHECK-DISABLE-NEXT: xtn v0.8b, v0.8h
+; CHECK-DISABLE-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-DISABLE-NEXT: rev16 v2.16b, v1.16b
; CHECK-DISABLE-NEXT: rev64 v1.16b, v1.16b
; CHECK-DISABLE-NEXT: st1 { v0.b }[2], [x9]
diff --git a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
index e05c65d..f0bbed5 100644
--- a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
@@ -142,7 +142,7 @@ define void @v4i8(ptr %px, ptr %py, ptr %pz) nounwind {
; CHECK-NEXT: movi d0, #0xff00ff00ff00ff
; CHECK-NEXT: uaddl v1.8h, v1.8b, v2.8b
; CHECK-NEXT: umin v0.4h, v1.4h, v0.4h
-; CHECK-NEXT: xtn v0.8b, v0.8h
+; CHECK-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-NEXT: str s0, [x2]
; CHECK-NEXT: ret
%x = load <4 x i8>, ptr %px
diff --git a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
index 05f43e7..82c0327 100644
--- a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
@@ -143,7 +143,7 @@ define void @v4i8(ptr %px, ptr %py, ptr %pz) nounwind {
; CHECK-NEXT: ushll v0.8h, v0.8b, #0
; CHECK-NEXT: ushll v1.8h, v1.8b, #0
; CHECK-NEXT: uqsub v0.4h, v0.4h, v1.4h
-; CHECK-NEXT: xtn v0.8b, v0.8h
+; CHECK-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-NEXT: str s0, [x2]
; CHECK-NEXT: ret
%x = load <4 x i8>, ptr %px
diff --git a/llvm/test/CodeGen/AArch64/vcvt-oversize.ll b/llvm/test/CodeGen/AArch64/vcvt-oversize.ll
index 380bdbc..6119405 100644
--- a/llvm/test/CodeGen/AArch64/vcvt-oversize.ll
+++ b/llvm/test/CodeGen/AArch64/vcvt-oversize.ll
@@ -9,9 +9,8 @@ define <8 x i8> @float_to_i8(ptr %in) {
; CHECK-NEXT: fadd v0.4s, v0.4s, v0.4s
; CHECK-NEXT: fcvtzs v0.4s, v0.4s
; CHECK-NEXT: fcvtzs v1.4s, v1.4s
-; CHECK-NEXT: xtn v0.4h, v0.4s
-; CHECK-NEXT: xtn v1.4h, v1.4s
-; CHECK-NEXT: uzp1 v0.8b, v1.8b, v0.8b
+; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: xtn v0.8b, v0.8h
; CHECK-NEXT: ret
%l = load <8 x float>, ptr %in
%scale = fmul <8 x float> %l, <float 2.0, float 2.0, float 2.0, float 2.0, float 2.0, float 2.0, float 2.0, float 2.0>
diff --git a/llvm/test/CodeGen/AArch64/vec-combine-compare-truncate-store.ll b/llvm/test/CodeGen/AArch64/vec-combine-compare-truncate-store.ll
index 9c6ab8d..dd7a9c6 100644
--- a/llvm/test/CodeGen/AArch64/vec-combine-compare-truncate-store.ll
+++ b/llvm/test/CodeGen/AArch64/vec-combine-compare-truncate-store.ll
@@ -210,7 +210,7 @@ define void @no_combine_for_non_bool_truncate(<4 x i32> %vec, ptr %out) {
; CHECK-LABEL: no_combine_for_non_bool_truncate:
; CHECK: ; %bb.0:
; CHECK-NEXT: xtn.4h v0, v0
-; CHECK-NEXT: xtn.8b v0, v0
+; CHECK-NEXT: uzp1.8b v0, v0, v0
; CHECK-NEXT: str s0, [x0]
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll b/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
index 90328f7..71d55df 100644
--- a/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
+++ b/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
@@ -410,7 +410,7 @@ define void @store_trunc_from_64bits(ptr %src, ptr %dst) {
; BE-NEXT: ldrh w8, [x0, #4]
; BE-NEXT: rev32 v0.4h, v0.4h
; BE-NEXT: mov v0.h[2], w8
-; BE-NEXT: xtn v0.8b, v0.8h
+; BE-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; BE-NEXT: rev32 v0.16b, v0.16b
; BE-NEXT: str s0, [sp, #12]
; BE-NEXT: ldrh w9, [sp, #12]
@@ -456,7 +456,7 @@ define void @store_trunc_add_from_64bits(ptr %src, ptr %dst) {
; BE-NEXT: add x8, x8, :lo12:.LCPI11_0
; BE-NEXT: ld1 { v1.4h }, [x8]
; BE-NEXT: add v0.4h, v0.4h, v1.4h
-; BE-NEXT: xtn v1.8b, v0.8h
+; BE-NEXT: uzp1 v1.8b, v0.8b, v0.8b
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #12]
@@ -638,7 +638,7 @@ define void @shift_trunc_store(ptr %src, ptr %dst) {
; BE-NEXT: .cfi_def_cfa_offset 16
; BE-NEXT: ld1 { v0.4s }, [x0]
; BE-NEXT: shrn v0.4h, v0.4s, #16
-; BE-NEXT: xtn v1.8b, v0.8h
+; BE-NEXT: uzp1 v1.8b, v0.8b, v0.8b
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #12]
@@ -672,7 +672,7 @@ define void @shift_trunc_store_default_align(ptr %src, ptr %dst) {
; BE-NEXT: .cfi_def_cfa_offset 16
; BE-NEXT: ld1 { v0.4s }, [x0]
; BE-NEXT: shrn v0.4h, v0.4s, #16
-; BE-NEXT: xtn v1.8b, v0.8h
+; BE-NEXT: uzp1 v1.8b, v0.8b, v0.8b
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #12]
@@ -706,7 +706,7 @@ define void @shift_trunc_store_align_4(ptr %src, ptr %dst) {
; BE-NEXT: .cfi_def_cfa_offset 16
; BE-NEXT: ld1 { v0.4s }, [x0]
; BE-NEXT: shrn v0.4h, v0.4s, #16
-; BE-NEXT: xtn v1.8b, v0.8h
+; BE-NEXT: uzp1 v1.8b, v0.8b, v0.8b
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #12]
@@ -741,7 +741,7 @@ define void @shift_trunc_store_const_offset_1(ptr %src, ptr %dst) {
; BE-NEXT: .cfi_def_cfa_offset 16
; BE-NEXT: ld1 { v0.4s }, [x0]
; BE-NEXT: shrn v0.4h, v0.4s, #16
-; BE-NEXT: xtn v1.8b, v0.8h
+; BE-NEXT: uzp1 v1.8b, v0.8b, v0.8b
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #12]
@@ -777,7 +777,7 @@ define void @shift_trunc_store_const_offset_3(ptr %src, ptr %dst) {
; BE-NEXT: .cfi_def_cfa_offset 16
; BE-NEXT: ld1 { v0.4s }, [x0]
; BE-NEXT: shrn v0.4h, v0.4s, #16
-; BE-NEXT: xtn v1.8b, v0.8h
+; BE-NEXT: uzp1 v1.8b, v0.8b, v0.8b
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #12]
@@ -801,7 +801,7 @@ define void @shift_trunc_volatile_store(ptr %src, ptr %dst) {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: shrn.4h v0, v0, #16
-; CHECK-NEXT: xtn.8b v1, v0
+; CHECK-NEXT: uzp1.8b v1, v0, v0
; CHECK-NEXT: umov.h w8, v0[2]
; CHECK-NEXT: str s1, [sp, #12]
; CHECK-NEXT: ldrh w9, [sp, #12]
@@ -816,7 +816,7 @@ define void @shift_trunc_volatile_store(ptr %src, ptr %dst) {
; BE-NEXT: .cfi_def_cfa_offset 16
; BE-NEXT: ld1 { v0.4s }, [x0]
; BE-NEXT: shrn v0.4h, v0.4s, #16
-; BE-NEXT: xtn v1.8b, v0.8h
+; BE-NEXT: uzp1 v1.8b, v0.8b, v0.8b
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #12]
@@ -868,7 +868,7 @@ define void @load_v3i8_zext_to_3xi32_add_trunc_store(ptr %src) {
; BE-NEXT: ushll v0.8h, v0.8b, #0
; BE-NEXT: ld1 { v0.b }[4], [x9]
; BE-NEXT: add v0.4h, v0.4h, v1.4h
-; BE-NEXT: xtn v1.8b, v0.8h
+; BE-NEXT: uzp1 v1.8b, v0.8b, v0.8b
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #8]
@@ -921,7 +921,7 @@ define void @load_v3i8_sext_to_3xi32_add_trunc_store(ptr %src) {
; BE-NEXT: ushll v0.8h, v0.8b, #0
; BE-NEXT: ld1 { v0.b }[4], [x9]
; BE-NEXT: add v0.4h, v0.4h, v1.4h
-; BE-NEXT: xtn v1.8b, v0.8h
+; BE-NEXT: uzp1 v1.8b, v0.8b, v0.8b
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #8]
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-add.ll b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
index 66b4946..66ef436 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-add.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
@@ -4,11 +4,6 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-BASE
; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel -global-isel-abort=2 %s -o - -mattr=+dotprod 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-DOT
-; CHECK-GI-BASE: warning: Instruction selection used fallback path for test_udot_v24i8
-; CHECK-GI-BASE-NEXT: warning: Instruction selection used fallback path for test_udot_v48i8
-; CHECK-GI-BASE-NEXT: warning: Instruction selection used fallback path for test_sdot_v24i8
-; CHECK-GI-BASE-NEXT: warning: Instruction selection used fallback path for test_sdot_v48i8
-
define i32 @addv_v2i32(<2 x i32> %a) {
; CHECK-LABEL: addv_v2i32:
; CHECK: // %bb.0: // %entry
@@ -2070,126 +2065,50 @@ define i32 @test_udot_v24i8(ptr %p1, ptr %p2) {
; CHECK-GI-BASE: // %bb.0: // %entry
; CHECK-GI-BASE-NEXT: ldr q0, [x0]
; CHECK-GI-BASE-NEXT: ldr q1, [x1]
-; CHECK-GI-BASE-NEXT: ldr d4, [x0, #16]
-; CHECK-GI-BASE-NEXT: ldr d5, [x1, #16]
-; CHECK-GI-BASE-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: ushll v3.8h, v1.8b, #0
+; CHECK-GI-BASE-NEXT: ldr d2, [x0, #16]
+; CHECK-GI-BASE-NEXT: ldr d3, [x1, #16]
+; CHECK-GI-BASE-NEXT: ushll v4.8h, v0.8b, #0
; CHECK-GI-BASE-NEXT: ushll2 v0.8h, v0.16b, #0
+; CHECK-GI-BASE-NEXT: ushll v5.8h, v1.8b, #0
+; CHECK-GI-BASE-NEXT: ushll v2.8h, v2.8b, #0
; CHECK-GI-BASE-NEXT: ushll2 v1.8h, v1.16b, #0
-; CHECK-GI-BASE-NEXT: umull v6.4s, v3.4h, v2.4h
-; CHECK-GI-BASE-NEXT: umull2 v2.4s, v3.8h, v2.8h
-; CHECK-GI-BASE-NEXT: ushll v3.8h, v4.8b, #0
-; CHECK-GI-BASE-NEXT: ushll v4.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: umlal2 v2.4s, v4.8h, v3.8h
-; CHECK-GI-BASE-NEXT: umlal v6.4s, v4.4h, v3.4h
-; CHECK-GI-BASE-NEXT: umlal2 v2.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: umlal v6.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: add v0.4s, v6.4s, v2.4s
+; CHECK-GI-BASE-NEXT: ushll v3.8h, v3.8b, #0
+; CHECK-GI-BASE-NEXT: umull v6.4s, v5.4h, v4.4h
+; CHECK-GI-BASE-NEXT: umull2 v4.4s, v5.8h, v4.8h
+; CHECK-GI-BASE-NEXT: umull2 v5.4s, v1.8h, v0.8h
+; CHECK-GI-BASE-NEXT: umull v7.4s, v3.4h, v2.4h
+; CHECK-GI-BASE-NEXT: umull v0.4s, v1.4h, v0.4h
+; CHECK-GI-BASE-NEXT: umull2 v1.4s, v3.8h, v2.8h
+; CHECK-GI-BASE-NEXT: addv s2, v6.4s
+; CHECK-GI-BASE-NEXT: addv s3, v4.4s
+; CHECK-GI-BASE-NEXT: addv s4, v5.4s
+; CHECK-GI-BASE-NEXT: addv s5, v7.4s
; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: addv s1, v1.4s
+; CHECK-GI-BASE-NEXT: fmov w8, s2
+; CHECK-GI-BASE-NEXT: fmov w9, s3
+; CHECK-GI-BASE-NEXT: fmov w10, s4
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w10, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w11, s1
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_udot_v24i8:
; CHECK-GI-DOT: // %bb.0: // %entry
-; CHECK-GI-DOT-NEXT: ldr b1, [x0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #1]
; CHECK-GI-DOT-NEXT: movi v0.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: ldr b2, [x1]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #1]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #8]
-; CHECK-GI-DOT-NEXT: mov v1.b[1], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #2]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #8]
-; CHECK-GI-DOT-NEXT: mov v2.b[1], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #2]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #17]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #17]
-; CHECK-GI-DOT-NEXT: mov v1.b[2], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #3]
-; CHECK-GI-DOT-NEXT: mov v2.b[2], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #3]
-; CHECK-GI-DOT-NEXT: mov v1.b[3], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #4]
-; CHECK-GI-DOT-NEXT: mov v2.b[3], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #4]
-; CHECK-GI-DOT-NEXT: mov v1.b[4], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #5]
-; CHECK-GI-DOT-NEXT: mov v2.b[4], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #5]
-; CHECK-GI-DOT-NEXT: mov v1.b[5], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #6]
-; CHECK-GI-DOT-NEXT: mov v2.b[5], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #6]
-; CHECK-GI-DOT-NEXT: mov v1.b[6], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #7]
-; CHECK-GI-DOT-NEXT: mov v2.b[6], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #7]
-; CHECK-GI-DOT-NEXT: mov v1.b[7], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #16]
-; CHECK-GI-DOT-NEXT: mov v2.b[7], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #16]
-; CHECK-GI-DOT-NEXT: mov v3.b[1], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #18]
-; CHECK-GI-DOT-NEXT: mov v4.b[1], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #18]
-; CHECK-GI-DOT-NEXT: mov v1.b[8], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #9]
-; CHECK-GI-DOT-NEXT: mov v2.b[8], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #9]
-; CHECK-GI-DOT-NEXT: mov v3.b[2], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #19]
-; CHECK-GI-DOT-NEXT: mov v4.b[2], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #19]
-; CHECK-GI-DOT-NEXT: mov v1.b[9], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #10]
-; CHECK-GI-DOT-NEXT: mov v2.b[9], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #10]
-; CHECK-GI-DOT-NEXT: mov v3.b[3], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #20]
-; CHECK-GI-DOT-NEXT: mov v4.b[3], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #20]
-; CHECK-GI-DOT-NEXT: mov v1.b[10], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #11]
-; CHECK-GI-DOT-NEXT: mov v2.b[10], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #11]
-; CHECK-GI-DOT-NEXT: mov v3.b[4], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #21]
-; CHECK-GI-DOT-NEXT: mov v4.b[4], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #21]
-; CHECK-GI-DOT-NEXT: mov v1.b[11], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #12]
-; CHECK-GI-DOT-NEXT: mov v2.b[11], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #12]
-; CHECK-GI-DOT-NEXT: mov v3.b[5], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #22]
-; CHECK-GI-DOT-NEXT: mov v4.b[5], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #22]
-; CHECK-GI-DOT-NEXT: mov v1.b[12], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #13]
-; CHECK-GI-DOT-NEXT: mov v2.b[12], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #13]
-; CHECK-GI-DOT-NEXT: mov v3.b[6], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #23]
-; CHECK-GI-DOT-NEXT: mov v4.b[6], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #23]
-; CHECK-GI-DOT-NEXT: mov v1.b[13], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #14]
-; CHECK-GI-DOT-NEXT: mov v2.b[13], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #14]
-; CHECK-GI-DOT-NEXT: mov v3.b[7], v7.b[0]
-; CHECK-GI-DOT-NEXT: mov v4.b[7], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[14], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #15]
-; CHECK-GI-DOT-NEXT: mov v2.b[14], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #15]
-; CHECK-GI-DOT-NEXT: fmov d3, d3
-; CHECK-GI-DOT-NEXT: fmov d4, d4
-; CHECK-GI-DOT-NEXT: mov v1.b[15], v5.b[0]
-; CHECK-GI-DOT-NEXT: movi v5.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: mov v2.b[15], v6.b[0]
-; CHECK-GI-DOT-NEXT: udot v0.4s, v4.16b, v3.16b
-; CHECK-GI-DOT-NEXT: udot v5.4s, v2.16b, v1.16b
-; CHECK-GI-DOT-NEXT: add v0.4s, v5.4s, v0.4s
+; CHECK-GI-DOT-NEXT: movi v1.2d, #0000000000000000
+; CHECK-GI-DOT-NEXT: ldr q2, [x0]
+; CHECK-GI-DOT-NEXT: ldr d3, [x0, #16]
+; CHECK-GI-DOT-NEXT: ldr q4, [x1]
+; CHECK-GI-DOT-NEXT: ldr d5, [x1, #16]
+; CHECK-GI-DOT-NEXT: udot v1.4s, v4.16b, v2.16b
+; CHECK-GI-DOT-NEXT: udot v0.4s, v5.16b, v3.16b
+; CHECK-GI-DOT-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-GI-DOT-NEXT: addv s0, v0.4s
; CHECK-GI-DOT-NEXT: fmov w0, s0
; CHECK-GI-DOT-NEXT: ret
@@ -2257,243 +2176,91 @@ define i32 @test_udot_v48i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_udot_v48i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: ldp q0, q4, [x1]
-; CHECK-GI-BASE-NEXT: ldr q2, [x0, #32]
-; CHECK-GI-BASE-NEXT: ldp q1, q3, [x0]
-; CHECK-GI-BASE-NEXT: ldr q7, [x1, #32]
-; CHECK-GI-BASE-NEXT: ushll2 v16.8h, v2.16b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v6.8h, v0.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v17.8h, v7.16b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v5.8h, v1.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: umull2 v18.4s, v6.8h, v5.8h
-; CHECK-GI-BASE-NEXT: umull v19.4s, v0.4h, v1.4h
-; CHECK-GI-BASE-NEXT: umull v5.4s, v6.4h, v5.4h
-; CHECK-GI-BASE-NEXT: umull2 v0.4s, v0.8h, v1.8h
-; CHECK-GI-BASE-NEXT: ushll v1.8h, v2.8b, #0
-; CHECK-GI-BASE-NEXT: ushll v2.8h, v7.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v6.8h, v3.16b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v7.8h, v4.16b, #0
-; CHECK-GI-BASE-NEXT: umlal2 v18.4s, v17.8h, v16.8h
-; CHECK-GI-BASE-NEXT: umlal v5.4s, v17.4h, v16.4h
-; CHECK-GI-BASE-NEXT: umlal v19.4s, v2.4h, v1.4h
-; CHECK-GI-BASE-NEXT: umlal2 v0.4s, v2.8h, v1.8h
-; CHECK-GI-BASE-NEXT: ushll v1.8h, v3.8b, #0
-; CHECK-GI-BASE-NEXT: ushll v2.8h, v4.8b, #0
-; CHECK-GI-BASE-NEXT: umlal2 v18.4s, v7.8h, v6.8h
-; CHECK-GI-BASE-NEXT: umlal v5.4s, v7.4h, v6.4h
-; CHECK-GI-BASE-NEXT: umlal v19.4s, v2.4h, v1.4h
-; CHECK-GI-BASE-NEXT: umlal2 v0.4s, v2.8h, v1.8h
-; CHECK-GI-BASE-NEXT: add v1.4s, v19.4s, v5.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v18.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-GI-BASE-NEXT: ldp q0, q3, [x1]
+; CHECK-GI-BASE-NEXT: ldr q6, [x1, #32]
+; CHECK-GI-BASE-NEXT: ldp q1, q2, [x0]
+; CHECK-GI-BASE-NEXT: ldr q17, [x0, #32]
+; CHECK-GI-BASE-NEXT: ushll v4.8h, v0.8b, #0
+; CHECK-GI-BASE-NEXT: ushll2 v0.8h, v0.16b, #0
+; CHECK-GI-BASE-NEXT: ushll v7.8h, v3.8b, #0
+; CHECK-GI-BASE-NEXT: ushll v5.8h, v1.8b, #0
+; CHECK-GI-BASE-NEXT: ushll2 v1.8h, v1.16b, #0
+; CHECK-GI-BASE-NEXT: ushll v16.8h, v2.8b, #0
+; CHECK-GI-BASE-NEXT: ushll2 v3.8h, v3.16b, #0
+; CHECK-GI-BASE-NEXT: ushll2 v2.8h, v2.16b, #0
+; CHECK-GI-BASE-NEXT: umull v18.4s, v4.4h, v5.4h
+; CHECK-GI-BASE-NEXT: umull2 v4.4s, v4.8h, v5.8h
+; CHECK-GI-BASE-NEXT: umull2 v19.4s, v0.8h, v1.8h
+; CHECK-GI-BASE-NEXT: umull v20.4s, v7.4h, v16.4h
+; CHECK-GI-BASE-NEXT: umull v0.4s, v0.4h, v1.4h
+; CHECK-GI-BASE-NEXT: ushll v5.8h, v6.8b, #0
+; CHECK-GI-BASE-NEXT: ushll v1.8h, v17.8b, #0
+; CHECK-GI-BASE-NEXT: umull2 v7.4s, v7.8h, v16.8h
+; CHECK-GI-BASE-NEXT: ushll2 v6.8h, v6.16b, #0
+; CHECK-GI-BASE-NEXT: ushll2 v17.8h, v17.16b, #0
+; CHECK-GI-BASE-NEXT: addv s16, v18.4s
+; CHECK-GI-BASE-NEXT: addv s4, v4.4s
+; CHECK-GI-BASE-NEXT: umull v18.4s, v3.4h, v2.4h
+; CHECK-GI-BASE-NEXT: umull2 v2.4s, v3.8h, v2.8h
+; CHECK-GI-BASE-NEXT: addv s3, v19.4s
+; CHECK-GI-BASE-NEXT: umull v19.4s, v5.4h, v1.4h
+; CHECK-GI-BASE-NEXT: umull2 v1.4s, v5.8h, v1.8h
+; CHECK-GI-BASE-NEXT: addv s5, v20.4s
; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: addv s7, v7.4s
+; CHECK-GI-BASE-NEXT: umull v20.4s, v6.4h, v17.4h
+; CHECK-GI-BASE-NEXT: umull2 v6.4s, v6.8h, v17.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s16
+; CHECK-GI-BASE-NEXT: fmov w9, s4
+; CHECK-GI-BASE-NEXT: fmov w10, s3
+; CHECK-GI-BASE-NEXT: addv s3, v18.4s
+; CHECK-GI-BASE-NEXT: addv s2, v2.4s
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: addv s4, v19.4s
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: addv s0, v1.4s
+; CHECK-GI-BASE-NEXT: addv s1, v20.4s
+; CHECK-GI-BASE-NEXT: addv s5, v6.4s
+; CHECK-GI-BASE-NEXT: add w10, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w11, s3
+; CHECK-GI-BASE-NEXT: fmov w12, s2
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s7
+; CHECK-GI-BASE-NEXT: add w9, w10, w9
+; CHECK-GI-BASE-NEXT: add w10, w11, w12
+; CHECK-GI-BASE-NEXT: fmov w11, s4
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w10, s0
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: add w9, w9, w10
+; CHECK-GI-BASE-NEXT: fmov w10, s1
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_udot_v48i8:
; CHECK-GI-DOT: // %bb.0: // %entry
-; CHECK-GI-DOT-NEXT: ldr b1, [x0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #1]
; CHECK-GI-DOT-NEXT: movi v0.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: ldr b2, [x0, #16]
-; CHECK-GI-DOT-NEXT: ldr b6, [x0, #17]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #1]
-; CHECK-GI-DOT-NEXT: mov v1.b[1], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x1, #16]
-; CHECK-GI-DOT-NEXT: ldr b18, [x1, #17]
-; CHECK-GI-DOT-NEXT: mov v2.b[1], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #32]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #33]
-; CHECK-GI-DOT-NEXT: mov v4.b[1], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #32]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #33]
-; CHECK-GI-DOT-NEXT: mov v5.b[1], v18.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #2]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #18]
-; CHECK-GI-DOT-NEXT: mov v3.b[1], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #2]
-; CHECK-GI-DOT-NEXT: mov v6.b[1], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[2], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #18]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #34]
-; CHECK-GI-DOT-NEXT: mov v2.b[2], v18.b[0]
-; CHECK-GI-DOT-NEXT: ldr b18, [x1, #34]
-; CHECK-GI-DOT-NEXT: mov v4.b[2], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #3]
-; CHECK-GI-DOT-NEXT: mov v5.b[2], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #19]
-; CHECK-GI-DOT-NEXT: mov v3.b[2], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #19]
-; CHECK-GI-DOT-NEXT: mov v6.b[2], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[3], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #3]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #35]
-; CHECK-GI-DOT-NEXT: mov v2.b[3], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #35]
-; CHECK-GI-DOT-NEXT: mov v4.b[3], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #4]
-; CHECK-GI-DOT-NEXT: mov v5.b[3], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #20]
-; CHECK-GI-DOT-NEXT: mov v3.b[3], v18.b[0]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #36]
-; CHECK-GI-DOT-NEXT: mov v6.b[3], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[4], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #4]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #20]
-; CHECK-GI-DOT-NEXT: mov v2.b[4], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #36]
-; CHECK-GI-DOT-NEXT: mov v4.b[4], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #5]
-; CHECK-GI-DOT-NEXT: mov v5.b[4], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #21]
-; CHECK-GI-DOT-NEXT: mov v3.b[4], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[4], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[5], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #5]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #21]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #37]
-; CHECK-GI-DOT-NEXT: mov v2.b[5], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #37]
-; CHECK-GI-DOT-NEXT: mov v4.b[5], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #6]
-; CHECK-GI-DOT-NEXT: mov v5.b[5], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #22]
-; CHECK-GI-DOT-NEXT: mov v3.b[5], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[5], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[6], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #6]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #22]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #38]
-; CHECK-GI-DOT-NEXT: mov v2.b[6], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #38]
-; CHECK-GI-DOT-NEXT: mov v4.b[6], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #7]
-; CHECK-GI-DOT-NEXT: mov v5.b[6], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #23]
-; CHECK-GI-DOT-NEXT: mov v3.b[6], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[6], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[7], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #7]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #23]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #39]
-; CHECK-GI-DOT-NEXT: mov v2.b[7], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #39]
-; CHECK-GI-DOT-NEXT: mov v4.b[7], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #8]
-; CHECK-GI-DOT-NEXT: mov v5.b[7], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #24]
-; CHECK-GI-DOT-NEXT: mov v3.b[7], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[7], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[8], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #8]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #24]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #40]
-; CHECK-GI-DOT-NEXT: mov v2.b[8], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #40]
-; CHECK-GI-DOT-NEXT: mov v4.b[8], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #9]
-; CHECK-GI-DOT-NEXT: mov v5.b[8], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #25]
-; CHECK-GI-DOT-NEXT: mov v3.b[8], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[8], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[9], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #9]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #25]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #41]
-; CHECK-GI-DOT-NEXT: mov v2.b[9], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #41]
-; CHECK-GI-DOT-NEXT: mov v4.b[9], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #10]
-; CHECK-GI-DOT-NEXT: mov v5.b[9], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #26]
-; CHECK-GI-DOT-NEXT: mov v3.b[9], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[9], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[10], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #10]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #26]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #42]
-; CHECK-GI-DOT-NEXT: mov v2.b[10], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #42]
-; CHECK-GI-DOT-NEXT: mov v4.b[10], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #11]
-; CHECK-GI-DOT-NEXT: mov v5.b[10], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #27]
-; CHECK-GI-DOT-NEXT: mov v3.b[10], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[10], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[11], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #11]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #27]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #43]
-; CHECK-GI-DOT-NEXT: mov v2.b[11], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #43]
-; CHECK-GI-DOT-NEXT: mov v4.b[11], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #12]
-; CHECK-GI-DOT-NEXT: mov v5.b[11], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #28]
-; CHECK-GI-DOT-NEXT: mov v3.b[11], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[11], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[12], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #12]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #28]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #44]
-; CHECK-GI-DOT-NEXT: mov v2.b[12], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #44]
-; CHECK-GI-DOT-NEXT: mov v4.b[12], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #13]
-; CHECK-GI-DOT-NEXT: mov v5.b[12], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #29]
-; CHECK-GI-DOT-NEXT: mov v3.b[12], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[12], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[13], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #13]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #29]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #45]
-; CHECK-GI-DOT-NEXT: mov v2.b[13], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #45]
-; CHECK-GI-DOT-NEXT: mov v4.b[13], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #14]
-; CHECK-GI-DOT-NEXT: mov v5.b[13], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #30]
-; CHECK-GI-DOT-NEXT: mov v3.b[13], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[13], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[14], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #14]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #30]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #46]
-; CHECK-GI-DOT-NEXT: mov v2.b[14], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #46]
-; CHECK-GI-DOT-NEXT: mov v4.b[14], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #15]
-; CHECK-GI-DOT-NEXT: mov v5.b[14], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #31]
-; CHECK-GI-DOT-NEXT: mov v3.b[14], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[14], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[15], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #15]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #31]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #47]
-; CHECK-GI-DOT-NEXT: mov v2.b[15], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #47]
-; CHECK-GI-DOT-NEXT: mov v4.b[15], v7.b[0]
-; CHECK-GI-DOT-NEXT: movi v7.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: mov v5.b[15], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v3.b[15], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[15], v16.b[0]
-; CHECK-GI-DOT-NEXT: movi v16.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: udot v0.4s, v4.16b, v1.16b
-; CHECK-GI-DOT-NEXT: udot v7.4s, v5.16b, v2.16b
-; CHECK-GI-DOT-NEXT: udot v16.4s, v6.16b, v3.16b
+; CHECK-GI-DOT-NEXT: movi v1.2d, #0000000000000000
+; CHECK-GI-DOT-NEXT: ldr q7, [x0, #32]
+; CHECK-GI-DOT-NEXT: ldp q3, q4, [x0]
+; CHECK-GI-DOT-NEXT: movi v2.2d, #0000000000000000
+; CHECK-GI-DOT-NEXT: ldp q5, q6, [x1]
+; CHECK-GI-DOT-NEXT: ldr q16, [x1, #32]
+; CHECK-GI-DOT-NEXT: udot v0.4s, v5.16b, v3.16b
+; CHECK-GI-DOT-NEXT: udot v1.4s, v6.16b, v4.16b
+; CHECK-GI-DOT-NEXT: udot v2.4s, v16.16b, v7.16b
; CHECK-GI-DOT-NEXT: addv s0, v0.4s
-; CHECK-GI-DOT-NEXT: addv s1, v7.4s
-; CHECK-GI-DOT-NEXT: addv s2, v16.4s
+; CHECK-GI-DOT-NEXT: addv s1, v1.4s
+; CHECK-GI-DOT-NEXT: addv s2, v2.4s
; CHECK-GI-DOT-NEXT: fmov w8, s0
; CHECK-GI-DOT-NEXT: fmov w9, s1
-; CHECK-GI-DOT-NEXT: fmov w10, s2
; CHECK-GI-DOT-NEXT: add w8, w8, w9
-; CHECK-GI-DOT-NEXT: add w0, w8, w10
+; CHECK-GI-DOT-NEXT: fmov w9, s2
+; CHECK-GI-DOT-NEXT: add w0, w8, w9
; CHECK-GI-DOT-NEXT: ret
entry:
%a = load <48 x i8>, ptr %p1
@@ -2648,126 +2415,50 @@ define i32 @test_sdot_v24i8(ptr %p1, ptr %p2) {
; CHECK-GI-BASE: // %bb.0: // %entry
; CHECK-GI-BASE-NEXT: ldr q0, [x0]
; CHECK-GI-BASE-NEXT: ldr q1, [x1]
-; CHECK-GI-BASE-NEXT: ldr d4, [x0, #16]
-; CHECK-GI-BASE-NEXT: ldr d5, [x1, #16]
-; CHECK-GI-BASE-NEXT: sshll v2.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: sshll v3.8h, v1.8b, #0
+; CHECK-GI-BASE-NEXT: ldr d2, [x0, #16]
+; CHECK-GI-BASE-NEXT: ldr d3, [x1, #16]
+; CHECK-GI-BASE-NEXT: sshll v4.8h, v0.8b, #0
; CHECK-GI-BASE-NEXT: sshll2 v0.8h, v0.16b, #0
+; CHECK-GI-BASE-NEXT: sshll v5.8h, v1.8b, #0
+; CHECK-GI-BASE-NEXT: sshll v2.8h, v2.8b, #0
; CHECK-GI-BASE-NEXT: sshll2 v1.8h, v1.16b, #0
-; CHECK-GI-BASE-NEXT: smull v6.4s, v3.4h, v2.4h
-; CHECK-GI-BASE-NEXT: smull2 v2.4s, v3.8h, v2.8h
-; CHECK-GI-BASE-NEXT: sshll v3.8h, v4.8b, #0
-; CHECK-GI-BASE-NEXT: sshll v4.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: smlal2 v2.4s, v4.8h, v3.8h
-; CHECK-GI-BASE-NEXT: smlal v6.4s, v4.4h, v3.4h
-; CHECK-GI-BASE-NEXT: smlal2 v2.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: smlal v6.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: add v0.4s, v6.4s, v2.4s
+; CHECK-GI-BASE-NEXT: sshll v3.8h, v3.8b, #0
+; CHECK-GI-BASE-NEXT: smull v6.4s, v5.4h, v4.4h
+; CHECK-GI-BASE-NEXT: smull2 v4.4s, v5.8h, v4.8h
+; CHECK-GI-BASE-NEXT: smull2 v5.4s, v1.8h, v0.8h
+; CHECK-GI-BASE-NEXT: smull v7.4s, v3.4h, v2.4h
+; CHECK-GI-BASE-NEXT: smull v0.4s, v1.4h, v0.4h
+; CHECK-GI-BASE-NEXT: smull2 v1.4s, v3.8h, v2.8h
+; CHECK-GI-BASE-NEXT: addv s2, v6.4s
+; CHECK-GI-BASE-NEXT: addv s3, v4.4s
+; CHECK-GI-BASE-NEXT: addv s4, v5.4s
+; CHECK-GI-BASE-NEXT: addv s5, v7.4s
; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: addv s1, v1.4s
+; CHECK-GI-BASE-NEXT: fmov w8, s2
+; CHECK-GI-BASE-NEXT: fmov w9, s3
+; CHECK-GI-BASE-NEXT: fmov w10, s4
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w10, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w11, s1
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_sdot_v24i8:
; CHECK-GI-DOT: // %bb.0: // %entry
-; CHECK-GI-DOT-NEXT: ldr b1, [x0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #1]
; CHECK-GI-DOT-NEXT: movi v0.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: ldr b2, [x1]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #1]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #8]
-; CHECK-GI-DOT-NEXT: mov v1.b[1], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #2]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #8]
-; CHECK-GI-DOT-NEXT: mov v2.b[1], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #2]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #17]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #17]
-; CHECK-GI-DOT-NEXT: mov v1.b[2], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #3]
-; CHECK-GI-DOT-NEXT: mov v2.b[2], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #3]
-; CHECK-GI-DOT-NEXT: mov v1.b[3], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #4]
-; CHECK-GI-DOT-NEXT: mov v2.b[3], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #4]
-; CHECK-GI-DOT-NEXT: mov v1.b[4], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #5]
-; CHECK-GI-DOT-NEXT: mov v2.b[4], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #5]
-; CHECK-GI-DOT-NEXT: mov v1.b[5], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #6]
-; CHECK-GI-DOT-NEXT: mov v2.b[5], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #6]
-; CHECK-GI-DOT-NEXT: mov v1.b[6], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #7]
-; CHECK-GI-DOT-NEXT: mov v2.b[6], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #7]
-; CHECK-GI-DOT-NEXT: mov v1.b[7], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #16]
-; CHECK-GI-DOT-NEXT: mov v2.b[7], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #16]
-; CHECK-GI-DOT-NEXT: mov v3.b[1], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #18]
-; CHECK-GI-DOT-NEXT: mov v4.b[1], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #18]
-; CHECK-GI-DOT-NEXT: mov v1.b[8], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #9]
-; CHECK-GI-DOT-NEXT: mov v2.b[8], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #9]
-; CHECK-GI-DOT-NEXT: mov v3.b[2], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #19]
-; CHECK-GI-DOT-NEXT: mov v4.b[2], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #19]
-; CHECK-GI-DOT-NEXT: mov v1.b[9], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #10]
-; CHECK-GI-DOT-NEXT: mov v2.b[9], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #10]
-; CHECK-GI-DOT-NEXT: mov v3.b[3], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #20]
-; CHECK-GI-DOT-NEXT: mov v4.b[3], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #20]
-; CHECK-GI-DOT-NEXT: mov v1.b[10], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #11]
-; CHECK-GI-DOT-NEXT: mov v2.b[10], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #11]
-; CHECK-GI-DOT-NEXT: mov v3.b[4], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #21]
-; CHECK-GI-DOT-NEXT: mov v4.b[4], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #21]
-; CHECK-GI-DOT-NEXT: mov v1.b[11], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #12]
-; CHECK-GI-DOT-NEXT: mov v2.b[11], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #12]
-; CHECK-GI-DOT-NEXT: mov v3.b[5], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #22]
-; CHECK-GI-DOT-NEXT: mov v4.b[5], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #22]
-; CHECK-GI-DOT-NEXT: mov v1.b[12], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #13]
-; CHECK-GI-DOT-NEXT: mov v2.b[12], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #13]
-; CHECK-GI-DOT-NEXT: mov v3.b[6], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #23]
-; CHECK-GI-DOT-NEXT: mov v4.b[6], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #23]
-; CHECK-GI-DOT-NEXT: mov v1.b[13], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #14]
-; CHECK-GI-DOT-NEXT: mov v2.b[13], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #14]
-; CHECK-GI-DOT-NEXT: mov v3.b[7], v7.b[0]
-; CHECK-GI-DOT-NEXT: mov v4.b[7], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[14], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #15]
-; CHECK-GI-DOT-NEXT: mov v2.b[14], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #15]
-; CHECK-GI-DOT-NEXT: fmov d3, d3
-; CHECK-GI-DOT-NEXT: fmov d4, d4
-; CHECK-GI-DOT-NEXT: mov v1.b[15], v5.b[0]
-; CHECK-GI-DOT-NEXT: movi v5.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: mov v2.b[15], v6.b[0]
-; CHECK-GI-DOT-NEXT: sdot v0.4s, v4.16b, v3.16b
-; CHECK-GI-DOT-NEXT: sdot v5.4s, v2.16b, v1.16b
-; CHECK-GI-DOT-NEXT: add v0.4s, v5.4s, v0.4s
+; CHECK-GI-DOT-NEXT: movi v1.2d, #0000000000000000
+; CHECK-GI-DOT-NEXT: ldr q2, [x0]
+; CHECK-GI-DOT-NEXT: ldr d3, [x0, #16]
+; CHECK-GI-DOT-NEXT: ldr q4, [x1]
+; CHECK-GI-DOT-NEXT: ldr d5, [x1, #16]
+; CHECK-GI-DOT-NEXT: sdot v1.4s, v4.16b, v2.16b
+; CHECK-GI-DOT-NEXT: sdot v0.4s, v5.16b, v3.16b
+; CHECK-GI-DOT-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-GI-DOT-NEXT: addv s0, v0.4s
; CHECK-GI-DOT-NEXT: fmov w0, s0
; CHECK-GI-DOT-NEXT: ret
@@ -2835,243 +2526,91 @@ define i32 @test_sdot_v48i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_sdot_v48i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: ldp q0, q4, [x1]
-; CHECK-GI-BASE-NEXT: ldr q2, [x0, #32]
-; CHECK-GI-BASE-NEXT: ldp q1, q3, [x0]
-; CHECK-GI-BASE-NEXT: ldr q7, [x1, #32]
-; CHECK-GI-BASE-NEXT: sshll2 v16.8h, v2.16b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v6.8h, v0.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v17.8h, v7.16b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v5.8h, v1.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: smull2 v18.4s, v6.8h, v5.8h
-; CHECK-GI-BASE-NEXT: smull v19.4s, v0.4h, v1.4h
-; CHECK-GI-BASE-NEXT: smull v5.4s, v6.4h, v5.4h
-; CHECK-GI-BASE-NEXT: smull2 v0.4s, v0.8h, v1.8h
-; CHECK-GI-BASE-NEXT: sshll v1.8h, v2.8b, #0
-; CHECK-GI-BASE-NEXT: sshll v2.8h, v7.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v6.8h, v3.16b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v7.8h, v4.16b, #0
-; CHECK-GI-BASE-NEXT: smlal2 v18.4s, v17.8h, v16.8h
-; CHECK-GI-BASE-NEXT: smlal v5.4s, v17.4h, v16.4h
-; CHECK-GI-BASE-NEXT: smlal v19.4s, v2.4h, v1.4h
-; CHECK-GI-BASE-NEXT: smlal2 v0.4s, v2.8h, v1.8h
-; CHECK-GI-BASE-NEXT: sshll v1.8h, v3.8b, #0
-; CHECK-GI-BASE-NEXT: sshll v2.8h, v4.8b, #0
-; CHECK-GI-BASE-NEXT: smlal2 v18.4s, v7.8h, v6.8h
-; CHECK-GI-BASE-NEXT: smlal v5.4s, v7.4h, v6.4h
-; CHECK-GI-BASE-NEXT: smlal v19.4s, v2.4h, v1.4h
-; CHECK-GI-BASE-NEXT: smlal2 v0.4s, v2.8h, v1.8h
-; CHECK-GI-BASE-NEXT: add v1.4s, v19.4s, v5.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v18.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-GI-BASE-NEXT: ldp q0, q3, [x1]
+; CHECK-GI-BASE-NEXT: ldr q6, [x1, #32]
+; CHECK-GI-BASE-NEXT: ldp q1, q2, [x0]
+; CHECK-GI-BASE-NEXT: ldr q17, [x0, #32]
+; CHECK-GI-BASE-NEXT: sshll v4.8h, v0.8b, #0
+; CHECK-GI-BASE-NEXT: sshll2 v0.8h, v0.16b, #0
+; CHECK-GI-BASE-NEXT: sshll v7.8h, v3.8b, #0
+; CHECK-GI-BASE-NEXT: sshll v5.8h, v1.8b, #0
+; CHECK-GI-BASE-NEXT: sshll2 v1.8h, v1.16b, #0
+; CHECK-GI-BASE-NEXT: sshll v16.8h, v2.8b, #0
+; CHECK-GI-BASE-NEXT: sshll2 v3.8h, v3.16b, #0
+; CHECK-GI-BASE-NEXT: sshll2 v2.8h, v2.16b, #0
+; CHECK-GI-BASE-NEXT: smull v18.4s, v4.4h, v5.4h
+; CHECK-GI-BASE-NEXT: smull2 v4.4s, v4.8h, v5.8h
+; CHECK-GI-BASE-NEXT: smull2 v19.4s, v0.8h, v1.8h
+; CHECK-GI-BASE-NEXT: smull v20.4s, v7.4h, v16.4h
+; CHECK-GI-BASE-NEXT: smull v0.4s, v0.4h, v1.4h
+; CHECK-GI-BASE-NEXT: sshll v5.8h, v6.8b, #0
+; CHECK-GI-BASE-NEXT: sshll v1.8h, v17.8b, #0
+; CHECK-GI-BASE-NEXT: smull2 v7.4s, v7.8h, v16.8h
+; CHECK-GI-BASE-NEXT: sshll2 v6.8h, v6.16b, #0
+; CHECK-GI-BASE-NEXT: sshll2 v17.8h, v17.16b, #0
+; CHECK-GI-BASE-NEXT: addv s16, v18.4s
+; CHECK-GI-BASE-NEXT: addv s4, v4.4s
+; CHECK-GI-BASE-NEXT: smull v18.4s, v3.4h, v2.4h
+; CHECK-GI-BASE-NEXT: smull2 v2.4s, v3.8h, v2.8h
+; CHECK-GI-BASE-NEXT: addv s3, v19.4s
+; CHECK-GI-BASE-NEXT: smull v19.4s, v5.4h, v1.4h
+; CHECK-GI-BASE-NEXT: smull2 v1.4s, v5.8h, v1.8h
+; CHECK-GI-BASE-NEXT: addv s5, v20.4s
; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: addv s7, v7.4s
+; CHECK-GI-BASE-NEXT: smull v20.4s, v6.4h, v17.4h
+; CHECK-GI-BASE-NEXT: smull2 v6.4s, v6.8h, v17.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s16
+; CHECK-GI-BASE-NEXT: fmov w9, s4
+; CHECK-GI-BASE-NEXT: fmov w10, s3
+; CHECK-GI-BASE-NEXT: addv s3, v18.4s
+; CHECK-GI-BASE-NEXT: addv s2, v2.4s
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: addv s4, v19.4s
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: addv s0, v1.4s
+; CHECK-GI-BASE-NEXT: addv s1, v20.4s
+; CHECK-GI-BASE-NEXT: addv s5, v6.4s
+; CHECK-GI-BASE-NEXT: add w10, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w11, s3
+; CHECK-GI-BASE-NEXT: fmov w12, s2
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s7
+; CHECK-GI-BASE-NEXT: add w9, w10, w9
+; CHECK-GI-BASE-NEXT: add w10, w11, w12
+; CHECK-GI-BASE-NEXT: fmov w11, s4
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w10, s0
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: add w9, w9, w10
+; CHECK-GI-BASE-NEXT: fmov w10, s1
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_sdot_v48i8:
; CHECK-GI-DOT: // %bb.0: // %entry
-; CHECK-GI-DOT-NEXT: ldr b1, [x0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #1]
; CHECK-GI-DOT-NEXT: movi v0.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: ldr b2, [x0, #16]
-; CHECK-GI-DOT-NEXT: ldr b6, [x0, #17]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #1]
-; CHECK-GI-DOT-NEXT: mov v1.b[1], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x1, #16]
-; CHECK-GI-DOT-NEXT: ldr b18, [x1, #17]
-; CHECK-GI-DOT-NEXT: mov v2.b[1], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #32]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #33]
-; CHECK-GI-DOT-NEXT: mov v4.b[1], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #32]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #33]
-; CHECK-GI-DOT-NEXT: mov v5.b[1], v18.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #2]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #18]
-; CHECK-GI-DOT-NEXT: mov v3.b[1], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #2]
-; CHECK-GI-DOT-NEXT: mov v6.b[1], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[2], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #18]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #34]
-; CHECK-GI-DOT-NEXT: mov v2.b[2], v18.b[0]
-; CHECK-GI-DOT-NEXT: ldr b18, [x1, #34]
-; CHECK-GI-DOT-NEXT: mov v4.b[2], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #3]
-; CHECK-GI-DOT-NEXT: mov v5.b[2], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #19]
-; CHECK-GI-DOT-NEXT: mov v3.b[2], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #19]
-; CHECK-GI-DOT-NEXT: mov v6.b[2], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[3], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #3]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #35]
-; CHECK-GI-DOT-NEXT: mov v2.b[3], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #35]
-; CHECK-GI-DOT-NEXT: mov v4.b[3], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #4]
-; CHECK-GI-DOT-NEXT: mov v5.b[3], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #20]
-; CHECK-GI-DOT-NEXT: mov v3.b[3], v18.b[0]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #36]
-; CHECK-GI-DOT-NEXT: mov v6.b[3], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[4], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #4]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #20]
-; CHECK-GI-DOT-NEXT: mov v2.b[4], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #36]
-; CHECK-GI-DOT-NEXT: mov v4.b[4], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #5]
-; CHECK-GI-DOT-NEXT: mov v5.b[4], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #21]
-; CHECK-GI-DOT-NEXT: mov v3.b[4], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[4], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[5], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #5]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #21]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #37]
-; CHECK-GI-DOT-NEXT: mov v2.b[5], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #37]
-; CHECK-GI-DOT-NEXT: mov v4.b[5], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #6]
-; CHECK-GI-DOT-NEXT: mov v5.b[5], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #22]
-; CHECK-GI-DOT-NEXT: mov v3.b[5], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[5], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[6], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #6]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #22]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #38]
-; CHECK-GI-DOT-NEXT: mov v2.b[6], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #38]
-; CHECK-GI-DOT-NEXT: mov v4.b[6], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #7]
-; CHECK-GI-DOT-NEXT: mov v5.b[6], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #23]
-; CHECK-GI-DOT-NEXT: mov v3.b[6], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[6], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[7], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #7]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #23]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #39]
-; CHECK-GI-DOT-NEXT: mov v2.b[7], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #39]
-; CHECK-GI-DOT-NEXT: mov v4.b[7], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #8]
-; CHECK-GI-DOT-NEXT: mov v5.b[7], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #24]
-; CHECK-GI-DOT-NEXT: mov v3.b[7], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[7], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[8], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #8]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #24]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #40]
-; CHECK-GI-DOT-NEXT: mov v2.b[8], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #40]
-; CHECK-GI-DOT-NEXT: mov v4.b[8], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #9]
-; CHECK-GI-DOT-NEXT: mov v5.b[8], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #25]
-; CHECK-GI-DOT-NEXT: mov v3.b[8], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[8], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[9], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #9]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #25]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #41]
-; CHECK-GI-DOT-NEXT: mov v2.b[9], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #41]
-; CHECK-GI-DOT-NEXT: mov v4.b[9], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #10]
-; CHECK-GI-DOT-NEXT: mov v5.b[9], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #26]
-; CHECK-GI-DOT-NEXT: mov v3.b[9], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[9], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[10], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #10]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #26]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #42]
-; CHECK-GI-DOT-NEXT: mov v2.b[10], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #42]
-; CHECK-GI-DOT-NEXT: mov v4.b[10], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #11]
-; CHECK-GI-DOT-NEXT: mov v5.b[10], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #27]
-; CHECK-GI-DOT-NEXT: mov v3.b[10], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[10], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[11], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #11]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #27]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #43]
-; CHECK-GI-DOT-NEXT: mov v2.b[11], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #43]
-; CHECK-GI-DOT-NEXT: mov v4.b[11], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #12]
-; CHECK-GI-DOT-NEXT: mov v5.b[11], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #28]
-; CHECK-GI-DOT-NEXT: mov v3.b[11], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[11], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[12], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #12]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #28]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #44]
-; CHECK-GI-DOT-NEXT: mov v2.b[12], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #44]
-; CHECK-GI-DOT-NEXT: mov v4.b[12], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #13]
-; CHECK-GI-DOT-NEXT: mov v5.b[12], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #29]
-; CHECK-GI-DOT-NEXT: mov v3.b[12], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[12], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[13], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #13]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #29]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #45]
-; CHECK-GI-DOT-NEXT: mov v2.b[13], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #45]
-; CHECK-GI-DOT-NEXT: mov v4.b[13], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #14]
-; CHECK-GI-DOT-NEXT: mov v5.b[13], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #30]
-; CHECK-GI-DOT-NEXT: mov v3.b[13], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[13], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[14], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #14]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #30]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #46]
-; CHECK-GI-DOT-NEXT: mov v2.b[14], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #46]
-; CHECK-GI-DOT-NEXT: mov v4.b[14], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #15]
-; CHECK-GI-DOT-NEXT: mov v5.b[14], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #31]
-; CHECK-GI-DOT-NEXT: mov v3.b[14], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[14], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[15], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #15]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #31]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #47]
-; CHECK-GI-DOT-NEXT: mov v2.b[15], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #47]
-; CHECK-GI-DOT-NEXT: mov v4.b[15], v7.b[0]
-; CHECK-GI-DOT-NEXT: movi v7.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: mov v5.b[15], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v3.b[15], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[15], v16.b[0]
-; CHECK-GI-DOT-NEXT: movi v16.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: sdot v0.4s, v4.16b, v1.16b
-; CHECK-GI-DOT-NEXT: sdot v7.4s, v5.16b, v2.16b
-; CHECK-GI-DOT-NEXT: sdot v16.4s, v6.16b, v3.16b
+; CHECK-GI-DOT-NEXT: movi v1.2d, #0000000000000000
+; CHECK-GI-DOT-NEXT: ldr q7, [x0, #32]
+; CHECK-GI-DOT-NEXT: ldp q3, q4, [x0]
+; CHECK-GI-DOT-NEXT: movi v2.2d, #0000000000000000
+; CHECK-GI-DOT-NEXT: ldp q5, q6, [x1]
+; CHECK-GI-DOT-NEXT: ldr q16, [x1, #32]
+; CHECK-GI-DOT-NEXT: sdot v0.4s, v5.16b, v3.16b
+; CHECK-GI-DOT-NEXT: sdot v1.4s, v6.16b, v4.16b
+; CHECK-GI-DOT-NEXT: sdot v2.4s, v16.16b, v7.16b
; CHECK-GI-DOT-NEXT: addv s0, v0.4s
-; CHECK-GI-DOT-NEXT: addv s1, v7.4s
-; CHECK-GI-DOT-NEXT: addv s2, v16.4s
+; CHECK-GI-DOT-NEXT: addv s1, v1.4s
+; CHECK-GI-DOT-NEXT: addv s2, v2.4s
; CHECK-GI-DOT-NEXT: fmov w8, s0
; CHECK-GI-DOT-NEXT: fmov w9, s1
-; CHECK-GI-DOT-NEXT: fmov w10, s2
; CHECK-GI-DOT-NEXT: add w8, w8, w9
-; CHECK-GI-DOT-NEXT: add w0, w8, w10
+; CHECK-GI-DOT-NEXT: fmov w9, s2
+; CHECK-GI-DOT-NEXT: add w0, w8, w9
; CHECK-GI-DOT-NEXT: ret
entry:
%a = load <48 x i8>, ptr %p1
diff --git a/llvm/test/CodeGen/AArch64/wrong-callee-save-size-after-livedebugvariables.mir b/llvm/test/CodeGen/AArch64/wrong-callee-save-size-after-livedebugvariables.mir
index 53a8612..8e11424 100644
--- a/llvm/test/CodeGen/AArch64/wrong-callee-save-size-after-livedebugvariables.mir
+++ b/llvm/test/CodeGen/AArch64/wrong-callee-save-size-after-livedebugvariables.mir
@@ -64,6 +64,7 @@
name: foo
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
fixedStack: []
stack:
diff --git a/llvm/test/CodeGen/AArch64/xor.ll b/llvm/test/CodeGen/AArch64/xor.ll
index d92402c..7d7f7bf 100644
--- a/llvm/test/CodeGen/AArch64/xor.ll
+++ b/llvm/test/CodeGen/AArch64/xor.ll
@@ -51,7 +51,7 @@ define <4 x i32> @vec_add_of_not_decrement(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: vec_add_of_not_decrement:
; CHECK: // %bb.0:
; CHECK-NEXT: mvn v1.16b, v1.16b
-; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
; CHECK-NEXT: ret
%t0 = sub <4 x i32> %x, %y
%r = sub <4 x i32> %t0, <i32 1, i32 1, i32 1, i32 1>
diff --git a/llvm/test/CodeGen/AArch64/zext.ll b/llvm/test/CodeGen/AArch64/zext.ll
index 54b29be..716d239 100644
--- a/llvm/test/CodeGen/AArch64/zext.ll
+++ b/llvm/test/CodeGen/AArch64/zext.ll
@@ -305,15 +305,14 @@ define <3 x i64> @zext_v3i8_v3i64(<3 x i8> %a) {
;
; CHECK-GI-LABEL: zext_v3i8_v3i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
-; CHECK-GI-NEXT: fmov d1, x0
-; CHECK-GI-NEXT: // kill: def $w1 killed $w1 def $x1
-; CHECK-GI-NEXT: movi v0.2d, #0x000000000000ff
+; CHECK-GI-NEXT: fmov s0, w0
+; CHECK-GI-NEXT: movi v1.2d, #0x000000000000ff
; CHECK-GI-NEXT: // kill: def $w2 killed $w2 def $x2
; CHECK-GI-NEXT: and x8, x2, #0xff
; CHECK-GI-NEXT: fmov d2, x8
-; CHECK-GI-NEXT: mov v1.d[1], x1
-; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b
+; CHECK-GI-NEXT: mov v0.s[1], w1
+; CHECK-GI-NEXT: ushll v0.2d, v0.2s, #0
+; CHECK-GI-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-GI-NEXT: mov d1, v0.d[1]
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-NEXT: ret
@@ -470,15 +469,14 @@ define <3 x i64> @zext_v3i10_v3i64(<3 x i10> %a) {
;
; CHECK-GI-LABEL: zext_v3i10_v3i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
-; CHECK-GI-NEXT: fmov d0, x0
-; CHECK-GI-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-GI-NEXT: fmov s0, w0
; CHECK-GI-NEXT: adrp x8, .LCPI27_0
; CHECK-GI-NEXT: // kill: def $w2 killed $w2 def $x2
; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI27_0]
; CHECK-GI-NEXT: and x8, x2, #0x3ff
; CHECK-GI-NEXT: fmov d2, x8
-; CHECK-GI-NEXT: mov v0.d[1], x1
+; CHECK-GI-NEXT: mov v0.s[1], w1
+; CHECK-GI-NEXT: ushll v0.2d, v0.2s, #0
; CHECK-GI-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-GI-NEXT: mov d1, v0.d[1]
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
index 255c6de..1a76f8c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
@@ -1090,18 +1090,29 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat(ptr addrspace(1) %ptr) #1 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB39_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX90A-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
-; GFX90A-NEXT: v_mov_b32_e32 v4, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX90A-NEXT: v_mov_b32_e32 v6, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
-; GFX90A-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB39_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
+; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
; GFX90A-NEXT: buffer_wbl2
-; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1_vol
@@ -1109,20 +1120,31 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat(ptr addrspace(1) %pt
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX90A-NEXT: s_cbranch_execnz .LBB39_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB39_2
+; GFX90A-NEXT: .LBB39_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB39_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc0 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] sc1
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc0 sc1
+; GFX940-NEXT: .LBB39_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 seq_cst
@@ -1132,26 +1154,47 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent(ptr addrspace(1) %ptr) #1 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_agent:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB40_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
+; GFX90A-NEXT: .LBB40_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_agent:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB40_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: .LBB40_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst
@@ -1161,18 +1204,29 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_system(ptr addrspace(1) %ptr) #1 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_system:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB41_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX90A-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
-; GFX90A-NEXT: v_mov_b32_e32 v4, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX90A-NEXT: v_mov_b32_e32 v6, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
-; GFX90A-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB41_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
+; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
; GFX90A-NEXT: buffer_wbl2
-; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1_vol
@@ -1180,20 +1234,31 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_system(ptr addrspace
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX90A-NEXT: s_cbranch_execnz .LBB41_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB41_2
+; GFX90A-NEXT: .LBB41_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_system:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB41_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc0 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] sc1
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc0 sc1
+; GFX940-NEXT: .LBB41_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") seq_cst
@@ -1203,26 +1268,47 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_flush(ptr addrspace(1) %ptr) #0 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_flush:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB42_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
+; GFX90A-NEXT: .LBB42_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_flush:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB42_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: .LBB42_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst
@@ -1394,37 +1480,59 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent_safe(ptr addrspace(1) %ptr) {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_agent_safe:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB49_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX90A-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
-; GFX90A-NEXT: v_mov_b32_e32 v4, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX90A-NEXT: v_mov_b32_e32 v6, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
-; GFX90A-NEXT: .LBB49_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB49_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
-; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX90A-NEXT: s_cbranch_execnz .LBB49_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB49_2
+; GFX90A-NEXT: .LBB49_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_agent_safe:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB49_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: .LBB49_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst
@@ -1866,23 +1974,44 @@ main_body:
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr) #1 {
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB65_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: .LBB65_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB65_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b32_e32 v2, s0
; GFX940-NEXT: ds_add_f64 v2, v[0:1]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: .LBB65_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
@@ -1892,23 +2021,44 @@ main_body:
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3) %ptr) #0 {
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat_flush:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB66_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: .LBB66_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat_flush:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB66_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b32_e32 v2, s0
; GFX940-NEXT: ds_add_f64 v2, v[0:1]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: .LBB66_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
@@ -1918,44 +2068,66 @@ main_body:
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrspace(3) %ptr) #4 {
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB67_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s0, s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_mov_b32_e32 v2, s0
-; GFX90A-NEXT: ds_read_b64 v[0:1], v2
+; GFX90A-NEXT: v_mov_b32_e32 v4, s0
+; GFX90A-NEXT: ds_read_b64 v[2:3], v4
; GFX90A-NEXT: s_mov_b64 s[0:1], 0
-; GFX90A-NEXT: .LBB67_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB67_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f64 v[4:5], v[0:1], 4.0
-; GFX90A-NEXT: ds_cmpst_rtn_b64 v[4:5], v2, v[0:1], v[4:5]
+; GFX90A-NEXT: v_add_f64 v[6:7], v[2:3], v[0:1]
+; GFX90A-NEXT: ds_cmpst_rtn_b64 v[6:7], v4, v[2:3], v[6:7]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[0:1]
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
; GFX90A-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[6:7], v[6:7] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; GFX90A-NEXT: s_cbranch_execnz .LBB67_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB67_2
+; GFX90A-NEXT: .LBB67_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB67_3
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s0, s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: v_mov_b32_e32 v2, s0
-; GFX940-NEXT: ds_read_b64 v[0:1], v2
+; GFX940-NEXT: v_mov_b32_e32 v4, s0
+; GFX940-NEXT: ds_read_b64 v[2:3], v4
; GFX940-NEXT: s_mov_b64 s[0:1], 0
-; GFX940-NEXT: .LBB67_1: ; %atomicrmw.start
+; GFX940-NEXT: .LBB67_2: ; %atomicrmw.start
; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: v_add_f64 v[4:5], v[0:1], 4.0
-; GFX940-NEXT: ds_cmpst_rtn_b64 v[4:5], v2, v[0:1], v[4:5]
+; GFX940-NEXT: v_add_f64 v[6:7], v[2:3], v[0:1]
+; GFX940-NEXT: ds_cmpst_rtn_b64 v[6:7], v4, v[2:3], v[6:7]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[0:1]
+; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
+; GFX940-NEXT: v_mov_b64_e32 v[2:3], v[6:7]
; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; GFX940-NEXT: s_cbranch_execnz .LBB67_1
-; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_cbranch_execnz .LBB67_2
+; GFX940-NEXT: .LBB67_3:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-atomic-cmpxchg-with-success.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-atomic-cmpxchg-with-success.mir
index e288d9d..eafd1e1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-atomic-cmpxchg-with-success.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-atomic-cmpxchg-with-success.mir
@@ -16,7 +16,8 @@ body: |
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY1]](s32)
; CHECK-NEXT: [[AMDGPU_ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_AMDGPU_ATOMIC_CMPXCHG [[COPY]](p1), [[BUILD_VECTOR]] :: (load store syncscope("agent-one-as") monotonic monotonic (s32), addrspace 1)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AMDGPU_ATOMIC_CMPXCHG]](s32), [[COPY1]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[AMDGPU_ATOMIC_CMPXCHG]](s32), implicit [[ICMP]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[AMDGPU_ATOMIC_CMPXCHG]](s32)
+ ; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s32), implicit [[ICMP]](s1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(s32) = COPY $vgpr3
@@ -40,7 +41,8 @@ body: |
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY1]](s32)
; CHECK-NEXT: [[AMDGPU_ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_AMDGPU_ATOMIC_CMPXCHG [[COPY]](p0), [[BUILD_VECTOR]] :: (load store syncscope("agent-one-as") monotonic monotonic (s32))
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AMDGPU_ATOMIC_CMPXCHG]](s32), [[COPY1]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[AMDGPU_ATOMIC_CMPXCHG]](s32), implicit [[ICMP]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[AMDGPU_ATOMIC_CMPXCHG]](s32)
+ ; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s32), implicit [[ICMP]](s1)
%0:_(p0) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(s32) = COPY $vgpr3
@@ -63,7 +65,8 @@ body: |
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p3), [[COPY1]], [[COPY2]] :: (load store syncscope("agent-one-as") monotonic monotonic (s32), addrspace 3)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s32), [[COPY1]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[ATOMIC_CMPXCHG]](s32), implicit [[ICMP]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ATOMIC_CMPXCHG]](s32)
+ ; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s32), implicit [[ICMP]](s1)
%0:_(p3) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = COPY $vgpr2
@@ -87,7 +90,8 @@ body: |
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[COPY2]](s64), [[COPY1]](s64)
; CHECK-NEXT: [[AMDGPU_ATOMIC_CMPXCHG:%[0-9]+]]:_(s64) = G_AMDGPU_ATOMIC_CMPXCHG [[COPY]](p1), [[BUILD_VECTOR]] :: (load store syncscope("agent-one-as") monotonic monotonic (s64), addrspace 1)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AMDGPU_ATOMIC_CMPXCHG]](s64), [[COPY1]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[AMDGPU_ATOMIC_CMPXCHG]](s64), implicit [[ICMP]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[AMDGPU_ATOMIC_CMPXCHG]](s64)
+ ; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s64), implicit [[ICMP]](s1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = COPY $vgpr4_vgpr5
@@ -110,7 +114,8 @@ body: |
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $vgpr3_vgpr4
; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p3), [[COPY1]], [[COPY2]] :: (load store syncscope("agent-one-as") monotonic monotonic (s64), addrspace 3)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s64), [[COPY1]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[ATOMIC_CMPXCHG]](s64), implicit [[ICMP]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[ATOMIC_CMPXCHG]](s64)
+ ; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s64), implicit [[ICMP]](s1)
%0:_(p3) = COPY $vgpr0
%1:_(s64) = COPY $vgpr1_vgpr2
%2:_(s64) = COPY $vgpr3_vgpr4
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir
index e9f8180..fed277d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir
@@ -64,9 +64,7 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK-NEXT: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[COPY]](s32)
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[CTLZ_ZERO_UNDEF]], [[C]]
- ; CHECK-NEXT: $vgpr0 = COPY [[AND]](s32)
+ ; CHECK-NEXT: $vgpr0 = COPY [[CTLZ_ZERO_UNDEF]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s16) = G_CTLZ_ZERO_UNDEF %0
%2:_(s32) = G_ZEXT %1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir
index dba20e1..eb86a98 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir
@@ -86,8 +86,9 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[ADD]](s32), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s32), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1)
- ; CHECK-NEXT: $vgpr0 = COPY [[ADD]](s32)
+ ; CHECK-NEXT: $vgpr0 = COPY [[COPY2]](s32)
; CHECK-NEXT: $vgpr1 = COPY [[ZEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
@@ -117,8 +118,9 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s64), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1)
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY2]](s64)
; CHECK-NEXT: $vgpr2 = COPY [[ZEXT]](s32)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
@@ -172,11 +174,12 @@ body: |
; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP1]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<2 x s16>) = COPY [[BITCAST2]](<2 x s16>)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C3]]
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND2]](s32), [[AND3]](s32)
- ; CHECK-NEXT: $vgpr0 = COPY [[BITCAST2]](<2 x s16>)
+ ; CHECK-NEXT: $vgpr0 = COPY [[COPY3]](<2 x s16>)
; CHECK-NEXT: $vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s16>) = COPY $vgpr1
@@ -360,13 +363,14 @@ body: |
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR2]](s1)
; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR3]](s1)
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<4 x s16>) = COPY [[CONCAT_VECTORS]](<4 x s16>)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C3]]
; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ANYEXT2]], [[C3]]
; CHECK-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ANYEXT3]], [[C3]]
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[AND4]](s32), [[AND5]](s32), [[AND6]](s32), [[AND7]](s32)
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY5]](<4 x s16>)
; CHECK-NEXT: $vgpr2_vgpr3_vgpr4_vgpr5 = COPY [[BUILD_VECTOR]](<4 x s32>)
%0:_(<4 x s16>) = COPY $vgpr0_vgpr1
%1:_(<4 x s16>) = COPY $vgpr1_vgpr2
@@ -403,11 +407,12 @@ body: |
; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP1]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY [[BUILD_VECTOR]](<2 x s32>)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C1]]
; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND]](s32), [[AND1]](s32)
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY2]](<2 x s32>)
; CHECK-NEXT: $vgpr2_vgpr3 = COPY [[BUILD_VECTOR1]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir
index 93d0071..80b3166 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir
@@ -955,15 +955,16 @@ body: |
; GFX6-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; GFX6-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s64), [[C]]
; GFX6-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX6-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX6-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX6-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX6-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX6-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
; GFX6-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO3]]
; GFX6-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
;
; GFX8-LABEL: name: saddsat_s64
@@ -980,15 +981,16 @@ body: |
; GFX8-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; GFX8-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s64), [[C]]
; GFX8-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX8-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX8-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX8-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX8-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
; GFX8-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO3]]
; GFX8-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
;
; GFX9-LABEL: name: saddsat_s64
@@ -1005,15 +1007,16 @@ body: |
; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s64), [[C]]
; GFX9-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX9-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX9-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX9-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
; GFX9-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO3]]
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
@@ -1043,15 +1046,16 @@ body: |
; GFX6-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
; GFX6-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV2]](s64), [[C]]
; GFX6-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX6-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX6-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX6-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX6-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX6-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
; GFX6-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO3]]
; GFX6-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX6-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX6-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX6-NEXT: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[UV12]], [[UV14]]
@@ -1060,13 +1064,14 @@ body: |
; GFX6-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
; GFX6-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV3]](s64), [[C]]
; GFX6-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
- ; GFX6-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX6-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
+ ; GFX6-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C1]](s32)
; GFX6-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
; GFX6-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX6-NEXT: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
; GFX6-NEXT: [[UADDE6:%[0-9]+]]:_(s32), [[UADDE7:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO7]]
; GFX6-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO6]](s32), [[UADDE6]](s32)
- ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[COPY3]]
; GFX6-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX6-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
;
@@ -1086,15 +1091,16 @@ body: |
; GFX8-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
; GFX8-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV2]](s64), [[C]]
; GFX8-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX8-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX8-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX8-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX8-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
; GFX8-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO3]]
; GFX8-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX8-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX8-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX8-NEXT: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[UV12]], [[UV14]]
@@ -1103,13 +1109,14 @@ body: |
; GFX8-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
; GFX8-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV3]](s64), [[C]]
; GFX8-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
- ; GFX8-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
+ ; GFX8-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C1]](s32)
; GFX8-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
; GFX8-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX8-NEXT: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
; GFX8-NEXT: [[UADDE6:%[0-9]+]]:_(s32), [[UADDE7:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO7]]
; GFX8-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO6]](s32), [[UADDE6]](s32)
- ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[COPY3]]
; GFX8-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX8-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
;
@@ -1129,15 +1136,16 @@ body: |
; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV2]](s64), [[C]]
; GFX9-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX9-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX9-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX9-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
; GFX9-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO3]]
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX9-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX9-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX9-NEXT: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[UV12]], [[UV14]]
@@ -1146,13 +1154,14 @@ body: |
; GFX9-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
; GFX9-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV3]](s64), [[C]]
; GFX9-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
- ; GFX9-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
+ ; GFX9-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C1]](s32)
; GFX9-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
; GFX9-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX9-NEXT: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
; GFX9-NEXT: [[UADDE6:%[0-9]+]]:_(s32), [[UADDE7:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO7]]
; GFX9-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO6]](s32), [[UADDE6]](s32)
- ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[COPY3]]
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
%0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubo.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubo.mir
index 57b1ab9..220450c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubo.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubo.mir
@@ -86,8 +86,9 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB]](s32), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY1]](s32), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1)
- ; CHECK-NEXT: $vgpr0 = COPY [[SUB]](s32)
+ ; CHECK-NEXT: $vgpr0 = COPY [[COPY2]](s32)
; CHECK-NEXT: $vgpr1 = COPY [[ZEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
@@ -117,8 +118,9 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY1]](s64), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1)
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY2]](s64)
; CHECK-NEXT: $vgpr2 = COPY [[ZEXT]](s32)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
@@ -172,11 +174,12 @@ body: |
; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP1]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<2 x s16>) = COPY [[BITCAST2]](<2 x s16>)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C3]]
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND2]](s32), [[AND3]](s32)
- ; CHECK-NEXT: $vgpr0 = COPY [[BITCAST2]](<2 x s16>)
+ ; CHECK-NEXT: $vgpr0 = COPY [[COPY3]](<2 x s16>)
; CHECK-NEXT: $vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s16>) = COPY $vgpr1
@@ -360,13 +363,14 @@ body: |
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR2]](s1)
; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR3]](s1)
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<4 x s16>) = COPY [[CONCAT_VECTORS]](<4 x s16>)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C3]]
; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ANYEXT2]], [[C3]]
; CHECK-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ANYEXT3]], [[C3]]
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[AND4]](s32), [[AND5]](s32), [[AND6]](s32), [[AND7]](s32)
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY5]](<4 x s16>)
; CHECK-NEXT: $vgpr2_vgpr3_vgpr4_vgpr5 = COPY [[BUILD_VECTOR]](<4 x s32>)
%0:_(<4 x s16>) = COPY $vgpr0_vgpr1
%1:_(<4 x s16>) = COPY $vgpr1_vgpr2
@@ -403,11 +407,12 @@ body: |
; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP1]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY [[BUILD_VECTOR]](<2 x s32>)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C1]]
; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND]](s32), [[AND1]](s32)
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY2]](<2 x s32>)
; CHECK-NEXT: $vgpr2_vgpr3 = COPY [[BUILD_VECTOR1]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir
index 33a8cda..49fb6e9 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir
@@ -955,15 +955,16 @@ body: |
; GFX6-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; GFX6-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY1]](s64), [[C]]
; GFX6-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX6-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX6-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX6-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX6-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX6-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
; GFX6-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO1]]
; GFX6-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
- ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
;
; GFX8-LABEL: name: ssubsat_s64
@@ -980,15 +981,16 @@ body: |
; GFX8-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; GFX8-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY1]](s64), [[C]]
; GFX8-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX8-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX8-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX8-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX8-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
; GFX8-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO1]]
; GFX8-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
- ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
;
; GFX9-LABEL: name: ssubsat_s64
@@ -1005,15 +1007,16 @@ body: |
; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY1]](s64), [[C]]
; GFX9-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX9-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX9-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX9-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
; GFX9-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO1]]
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
- ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
@@ -1043,15 +1046,16 @@ body: |
; GFX6-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
; GFX6-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV2]](s64), [[C]]
; GFX6-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX6-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX6-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX6-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX6-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX6-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
; GFX6-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO1]]
; GFX6-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
- ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX6-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX6-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX6-NEXT: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV12]], [[UV14]]
@@ -1060,13 +1064,14 @@ body: |
; GFX6-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
; GFX6-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV3]](s64), [[C]]
; GFX6-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
- ; GFX6-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX6-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
+ ; GFX6-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C1]](s32)
; GFX6-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
; GFX6-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX6-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
; GFX6-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO3]]
; GFX6-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[COPY3]]
; GFX6-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX6-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
;
@@ -1086,15 +1091,16 @@ body: |
; GFX8-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
; GFX8-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV2]](s64), [[C]]
; GFX8-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX8-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX8-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX8-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX8-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
; GFX8-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO1]]
; GFX8-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
- ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX8-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX8-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX8-NEXT: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV12]], [[UV14]]
@@ -1103,13 +1109,14 @@ body: |
; GFX8-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
; GFX8-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV3]](s64), [[C]]
; GFX8-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
- ; GFX8-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
+ ; GFX8-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C1]](s32)
; GFX8-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
; GFX8-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX8-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
; GFX8-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO3]]
; GFX8-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[COPY3]]
; GFX8-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX8-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
;
@@ -1129,15 +1136,16 @@ body: |
; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV2]](s64), [[C]]
; GFX9-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX9-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX9-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX9-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
; GFX9-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO1]]
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
- ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX9-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX9-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX9-NEXT: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV12]], [[UV14]]
@@ -1146,13 +1154,14 @@ body: |
; GFX9-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
; GFX9-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV3]](s64), [[C]]
; GFX9-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
- ; GFX9-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
+ ; GFX9-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C1]](s32)
; GFX9-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
; GFX9-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX9-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
; GFX9-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO3]]
; GFX9-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[COPY3]]
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
%0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap.mir
index b4bc648..305eca7 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap.mir
@@ -24,7 +24,7 @@ body: |
bb.0:
%0:_(s8) = G_CONSTANT i8 0
%1:_(p1) = G_CONSTANT i64 0
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.1:
G_STORE %0, %1 :: (store 1, addrspace 1)
@@ -55,7 +55,7 @@ body: |
; GCN-NEXT: S_ENDPGM 0
bb.0:
%0:_(s8) = G_CONSTANT i8 0
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
%1:_(p1) = G_CONSTANT i64 0
bb.1:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.interp.inreg.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.interp.inreg.ll
index 623360f..de46037 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.interp.inreg.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.interp.inreg.ll
@@ -147,6 +147,34 @@ main_body:
ret half %res
}
+define amdgpu_ps half @v_interp_rtz_f16(float inreg %i, float inreg %j, i32 inreg %m0) #0 {
+; GCN-LABEL: v_interp_rtz_f16:
+; GCN: ; %bb.0: ; %main_body
+; GCN-NEXT: s_mov_b32 s3, exec_lo
+; GCN-NEXT: s_wqm_b32 exec_lo, exec_lo
+; GCN-NEXT: s_mov_b32 m0, s2
+; GCN-NEXT: lds_param_load v1, attr0.x wait_vdst:15
+; GCN-NEXT: s_mov_b32 exec_lo, s3
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: v_mov_b32_e32 v2, s1
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GCN-NEXT: v_interp_p10_rtz_f16_f32 v3, v1, v0, v1 wait_exp:0
+; GCN-NEXT: v_interp_p10_rtz_f16_f32 v0, v1, v0, v1 op_sel:[1,0,1,0] wait_exp:7
+; GCN-NEXT: v_interp_p2_rtz_f16_f32 v3, v1, v2, v3 wait_exp:7
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GCN-NEXT: v_interp_p2_rtz_f16_f32 v0, v1, v2, v0 op_sel:[1,0,0,0] wait_exp:7
+; GCN-NEXT: v_add_f16_e32 v0, v3, v0
+; GCN-NEXT: ; return to shader part epilog
+main_body:
+ %p0 = call float @llvm.amdgcn.lds.param.load(i32 0, i32 0, i32 %m0)
+ %l_p0 = call float @llvm.amdgcn.interp.p10.rtz.f16(float %p0, float %i, float %p0, i1 0)
+ %l_p1 = call half @llvm.amdgcn.interp.p2.rtz.f16(float %p0, float %j, float %l_p0, i1 0)
+ %h_p0 = call float @llvm.amdgcn.interp.p10.rtz.f16(float %p0, float %i, float %p0, i1 1)
+ %h_p1 = call half @llvm.amdgcn.interp.p2.rtz.f16(float %p0, float %j, float %h_p0, i1 1)
+ %res = fadd half %l_p1, %h_p1
+ ret half %res
+}
+
define amdgpu_ps half @v_interp_f16_imm_params(float inreg %i, float inreg %j) #0 {
; GCN-LABEL: v_interp_f16_imm_params:
; GCN: ; %bb.0: ; %main_body
@@ -172,6 +200,8 @@ declare float @llvm.amdgcn.interp.inreg.p10(float, float, float) #0
declare float @llvm.amdgcn.interp.inreg.p2(float, float, float) #0
declare float @llvm.amdgcn.interp.inreg.p10.f16(float, float, float, i1) #0
declare half @llvm.amdgcn.interp.inreg.p2.f16(float, float, float, i1) #0
+declare float @llvm.amdgcn.interp.p10.rtz.f16(float, float, float, i1) #0
+declare half @llvm.amdgcn.interp.p2.rtz.f16(float, float, float, i1) #0
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
declare void @llvm.amdgcn.exp.f16(i32, i32, float, float, float, float, i1, i1) #0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll
index 6eed92b..6d4aa3b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll
@@ -670,36 +670,19 @@ define amdgpu_kernel void @bfe_sext_in_reg_i24(ptr addrspace(1) %out, ptr addrsp
define amdgpu_kernel void @simplify_demanded_bfe_sdiv(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
; GFX6-LABEL: simplify_demanded_bfe_sdiv:
; GFX6: ; %bb.0:
-; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, 2.0
-; GFX6-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x0
-; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
-; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
+; GFX6-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_load_dword s0, s[6:7], 0x0
-; GFX6-NEXT: s_mov_b32 s6, -1
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: v_mul_lo_u32 v1, v0, -2
-; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_bfe_i32 s0, s0, 0x100001
-; GFX6-NEXT: s_ashr_i32 s2, s0, 31
-; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
-; GFX6-NEXT: s_add_i32 s0, s0, s2
-; GFX6-NEXT: s_xor_b32 s0, s0, s2
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v0, s0, v0
-; GFX6-NEXT: v_lshlrev_b32_e32 v1, 1, v0
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, 1, v0
-; GFX6-NEXT: v_sub_i32_e32 v1, vcc, s0, v1
-; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 2, v1
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GFX6-NEXT: v_subrev_i32_e64 v2, s[0:1], 2, v1
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, 1, v0
-; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 2, v1
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GFX6-NEXT: v_xor_b32_e32 v0, s2, v0
-; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s2, v0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: s_load_dword s3, s[2:3], 0x0
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_bfe_i32 s3, s3, 0x100001
+; GFX6-NEXT: s_ashr_i32 s4, s3, 31
+; GFX6-NEXT: s_lshr_b32 s4, s4, 31
+; GFX6-NEXT: s_add_i32 s3, s3, s4
+; GFX6-NEXT: s_ashr_i32 s3, s3, 1
+; GFX6-NEXT: v_mov_b32_e32 v0, s3
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
%src = load i32, ptr addrspace(1) %in, align 4
%bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %src, i32 1, i32 16)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.format.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.format.ll
index 686b849..06bd45a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.format.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.format.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck -check-prefix=GFX8 %s
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck -check-prefix=GFX12 %s
+; Note that TFE instructions don't have the result initialization to zero due to stopping before finalize-isel - which is where that's inserted
define amdgpu_ps float @struct_buffer_load_format_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
; GFX8-LABEL: name: struct_buffer_load_format_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.ptr.buffer.load.format.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.ptr.buffer.load.format.ll
index 9edc2455..1e3f94a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.ptr.buffer.load.format.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.ptr.buffer.load.format.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck %s
+; Note that TFE instructions don't have the result initialization to zero due to stopping before finalize-isel - which is where that's inserted
define amdgpu_ps float @struct_ptr_buffer_load_format_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
; CHECK-LABEL: name: struct_ptr_buffer_load_format_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
index d36f5c0..a6f9bb7e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
@@ -4142,11 +4142,11 @@ define i48 @v_saddsat_i48(i48 %lhs, i48 %rhs) {
; GFX9-NEXT: v_lshlrev_b64 v[2:3], 16, v[2:3]
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v0, v2
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], v[4:5], v[0:1]
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[6:7], 0, v[2:3]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[2:3]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v5
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[6:7], s[4:5]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
; GFX9-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4162,7 +4162,7 @@ define i48 @v_saddsat_i48(i48 %lhs, i48 %rhs) {
; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, 0, v[2:3]
; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX10-NEXT: v_cmp_lt_i64_e64 s4, v[4:5], v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s5, 0x80000000, v6
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v6
; GFX10-NEXT: s_xor_b32 vcc_lo, vcc_lo, s4
; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
@@ -4179,7 +4179,7 @@ define i48 @v_saddsat_i48(i48 %lhs, i48 %rhs) {
; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, 0, v[2:3]
; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, v[4:5], v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v6
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v6
; GFX11-NEXT: s_xor_b32 vcc_lo, vcc_lo, s0
; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_cndmask_b32 v1, v5, v1
; GFX11-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4202,7 +4202,7 @@ define amdgpu_ps i48 @s_saddsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX6-NEXT: v_cmp_lt_i64_e64 s[0:1], s[0:1], 0
; GFX6-NEXT: s_ashr_i32 s2, s7, 31
; GFX6-NEXT: s_ashr_i32 s5, s7, 15
-; GFX6-NEXT: s_add_u32 s2, s2, 0xffff8000
+; GFX6-NEXT: s_addk_i32 s2, 0x8000
; GFX6-NEXT: v_mov_b32_e32 v0, s5
; GFX6-NEXT: v_mov_b32_e32 v1, s2
; GFX6-NEXT: v_mov_b32_e32 v2, s4
@@ -4227,7 +4227,7 @@ define amdgpu_ps i48 @s_saddsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX8-NEXT: v_cmp_lt_i64_e64 s[0:1], s[0:1], 0
; GFX8-NEXT: s_ashr_i32 s2, s7, 31
; GFX8-NEXT: s_ashr_i32 s5, s7, 15
-; GFX8-NEXT: s_add_u32 s2, s2, 0xffff8000
+; GFX8-NEXT: s_addk_i32 s2, 0x8000
; GFX8-NEXT: v_mov_b32_e32 v0, s5
; GFX8-NEXT: v_mov_b32_e32 v1, s2
; GFX8-NEXT: v_mov_b32_e32 v2, s4
@@ -4250,7 +4250,7 @@ define amdgpu_ps i48 @s_saddsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
; GFX9-NEXT: s_ashr_i32 s2, s5, 31
-; GFX9-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX9-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
; GFX9-NEXT: v_mov_b32_e32 v2, s4
@@ -4274,7 +4274,7 @@ define amdgpu_ps i48 @s_saddsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX10-NEXT: v_cmp_lt_i64_e64 s1, s[2:3], 0
; GFX10-NEXT: v_mov_b32_e32 v1, s5
; GFX10-NEXT: s_ashr_i32 s2, s5, 31
-; GFX10-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX10-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX10-NEXT: s_xor_b32 s0, s1, s0
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0
@@ -4293,7 +4293,7 @@ define amdgpu_ps i48 @s_saddsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[4:5], s[0:1]
; GFX11-NEXT: v_cmp_lt_i64_e64 s1, s[2:3], 0
; GFX11-NEXT: s_ashr_i32 s2, s5, 31
-; GFX11-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX11-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX11-NEXT: s_xor_b32 s0, s1, s0
; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0
; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0
@@ -4351,11 +4351,11 @@ define amdgpu_ps <2 x float> @saddsat_i48_sv(i48 inreg %lhs, i48 %rhs) {
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v0
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v1, vcc
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[0:1], v[2:3]
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[2:3], 0, v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, s[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], 0, v[0:1]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[2:3], s[0:1]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GFX9-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4371,7 +4371,7 @@ define amdgpu_ps <2 x float> @saddsat_i48_sv(i48 inreg %lhs, i48 %rhs) {
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[0:1], v[2:3]
; GFX10-NEXT: v_cmp_gt_i64_e64 s0, 0, v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s1, 0x80000000, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4388,7 +4388,7 @@ define amdgpu_ps <2 x float> @saddsat_i48_sv(i48 inreg %lhs, i48 %rhs) {
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[0:1], v[2:3]
; GFX11-NEXT: v_cmp_gt_i64_e64 s0, 0, v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4442,15 +4442,15 @@ define amdgpu_ps <2 x float> @saddsat_i48_vs(i48 %lhs, i48 inreg %rhs) {
; GFX9-LABEL: saddsat_i48_vs:
; GFX9: ; %bb.0:
; GFX9-NEXT: v_lshlrev_b64 v[0:1], 16, v[0:1]
-; GFX9-NEXT: s_lshl_b64 s[2:3], s[0:1], 16
-; GFX9-NEXT: v_mov_b32_e32 v3, s3
-; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v0
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 16
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v0
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], v[2:3], v[0:1]
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[2:3], s[2:3], 0
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], s[0:1], 0
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[2:3], s[0:1]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GFX9-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4466,7 +4466,7 @@ define amdgpu_ps <2 x float> @saddsat_i48_vs(i48 %lhs, i48 inreg %rhs) {
; GFX10-NEXT: v_cmp_lt_i64_e64 s0, s[0:1], 0
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s1, 0x80000000, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4483,7 +4483,7 @@ define amdgpu_ps <2 x float> @saddsat_i48_vs(i48 %lhs, i48 inreg %rhs) {
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[0:1], 0
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4529,11 +4529,11 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) {
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v0, v2
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], v[4:5], v[0:1]
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[6:7], 0, v[2:3]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[2:3]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v5
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[6:7], s[4:5]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -4546,7 +4546,7 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) {
; GFX10-NEXT: v_cmp_gt_i64_e64 s4, 0, v[2:3]
; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s5, 0x80000000, v6
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v6
; GFX10-NEXT: s_xor_b32 vcc_lo, s4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
@@ -4560,7 +4560,7 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) {
; GFX11-NEXT: v_cmp_gt_i64_e64 s0, 0, v[2:3]
; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v6
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v6
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_cndmask_b32 v1, v5, v1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -4578,7 +4578,7 @@ define amdgpu_ps i64 @s_saddsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
; GFX6-NEXT: v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
; GFX6-NEXT: s_ashr_i32 s2, s5, 31
-; GFX6-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX6-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v0, s2
; GFX6-NEXT: v_mov_b32_e32 v1, s3
; GFX6-NEXT: v_mov_b32_e32 v2, s4
@@ -4599,7 +4599,7 @@ define amdgpu_ps i64 @s_saddsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
; GFX8-NEXT: v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
; GFX8-NEXT: s_ashr_i32 s2, s5, 31
-; GFX8-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX8-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v0, s2
; GFX8-NEXT: v_mov_b32_e32 v1, s3
; GFX8-NEXT: v_mov_b32_e32 v2, s4
@@ -4620,7 +4620,7 @@ define amdgpu_ps i64 @s_saddsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
; GFX9-NEXT: s_ashr_i32 s2, s5, 31
-; GFX9-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX9-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
; GFX9-NEXT: v_mov_b32_e32 v2, s4
@@ -4641,7 +4641,7 @@ define amdgpu_ps i64 @s_saddsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX10-NEXT: v_cmp_lt_i64_e64 s1, s[2:3], 0
; GFX10-NEXT: v_mov_b32_e32 v1, s5
; GFX10-NEXT: s_ashr_i32 s2, s5, 31
-; GFX10-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX10-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX10-NEXT: s_xor_b32 s0, s1, s0
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0
@@ -4657,7 +4657,7 @@ define amdgpu_ps i64 @s_saddsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[4:5], s[0:1]
; GFX11-NEXT: v_cmp_lt_i64_e64 s1, s[2:3], 0
; GFX11-NEXT: s_ashr_i32 s2, s5, 31
-; GFX11-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX11-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX11-NEXT: s_xor_b32 s0, s1, s0
; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0
; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0
@@ -4702,11 +4702,11 @@ define amdgpu_ps <2 x float> @saddsat_i64_sv(i64 inreg %lhs, i64 %rhs) {
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v0
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v1, vcc
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[0:1], v[2:3]
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[2:3], 0, v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, s[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], 0, v[0:1]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[2:3], s[0:1]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GFX9-NEXT: ; return to shader part epilog
@@ -4718,7 +4718,7 @@ define amdgpu_ps <2 x float> @saddsat_i64_sv(i64 inreg %lhs, i64 %rhs) {
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[0:1], v[2:3]
; GFX10-NEXT: v_cmp_gt_i64_e64 s0, 0, v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s1, 0x80000000, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4731,7 +4731,7 @@ define amdgpu_ps <2 x float> @saddsat_i64_sv(i64 inreg %lhs, i64 %rhs) {
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[0:1], v[2:3]
; GFX11-NEXT: v_cmp_gt_i64_e64 s0, 0, v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-NEXT: ; return to shader part epilog
@@ -4774,11 +4774,11 @@ define amdgpu_ps <2 x float> @saddsat_i64_vs(i64 %lhs, i64 inreg %rhs) {
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v0
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[2:3], v[2:3], v[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[0:1]
; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], s[0:1], 0
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[0:1], s[2:3]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GFX9-NEXT: ; return to shader part epilog
@@ -4790,7 +4790,7 @@ define amdgpu_ps <2 x float> @saddsat_i64_vs(i64 %lhs, i64 inreg %rhs) {
; GFX10-NEXT: v_cmp_lt_i64_e64 s0, s[0:1], 0
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s1, 0x80000000, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4803,7 +4803,7 @@ define amdgpu_ps <2 x float> @saddsat_i64_vs(i64 %lhs, i64 inreg %rhs) {
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[0:1], 0
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-NEXT: ; return to shader part epilog
@@ -4866,21 +4866,20 @@ define <2 x i64> @v_saddsat_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v0, v4
; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v1, v5, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], v[8:9], v[0:1]
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[6:7], 0, v[4:5]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[8:9], v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[4:5]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v9
-; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, v0, v1
-; GFX9-NEXT: s_xor_b64 vcc, s[6:7], s[4:5]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v2, v6
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v3, v7, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], v[4:5], v[2:3]
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[6:7], 0, v[6:7]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[2:3]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[6:7]
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v5
-; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, 0x80000000, v2
-; GFX9-NEXT: s_xor_b64 vcc, s[6:7], s[4:5]
+; GFX9-NEXT: v_add_u32_e32 v3, 0x80000000, v2
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -4896,10 +4895,10 @@ define <2 x i64> @v_saddsat_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[8:9], v[0:1]
; GFX10-NEXT: v_cmp_gt_i64_e64 s4, 0, v[4:5]
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v11
-; GFX10-NEXT: v_cmp_gt_i64_e64 s6, 0, v[6:7]
-; GFX10-NEXT: v_add_co_u32 v1, s5, 0x80000000, v12
; GFX10-NEXT: v_cmp_lt_i64_e64 s5, v[10:11], v[2:3]
-; GFX10-NEXT: v_add_co_u32 v3, s7, 0x80000000, v4
+; GFX10-NEXT: v_cmp_gt_i64_e64 s6, 0, v[6:7]
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v12
+; GFX10-NEXT: v_add_nc_u32_e32 v3, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v12, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc_lo
@@ -4921,8 +4920,8 @@ define <2 x i64> @v_saddsat_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v11
; GFX11-NEXT: v_cmp_lt_i64_e64 s1, v[10:11], v[2:3]
; GFX11-NEXT: v_cmp_gt_i64_e64 s2, 0, v[6:7]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v12
-; GFX11-NEXT: v_add_co_u32 v3, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v12
+; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v8, v12 :: v_dual_cndmask_b32 v1, v9, v1
; GFX11-NEXT: s_xor_b32 vcc_lo, s2, s1
@@ -4942,7 +4941,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[0:1]
; GFX6-NEXT: v_cmp_lt_i64_e64 s[0:1], s[4:5], 0
; GFX6-NEXT: s_ashr_i32 s4, s9, 31
-; GFX6-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX6-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v0, s4
; GFX6-NEXT: v_mov_b32_e32 v1, s5
; GFX6-NEXT: v_mov_b32_e32 v2, s8
@@ -4957,7 +4956,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1]
; GFX6-NEXT: v_cmp_lt_i64_e64 s[2:3], s[6:7], 0
; GFX6-NEXT: s_ashr_i32 s4, s1, 31
-; GFX6-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX6-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v0, s4
; GFX6-NEXT: v_mov_b32_e32 v1, s5
; GFX6-NEXT: v_mov_b32_e32 v4, s0
@@ -4980,7 +4979,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[0:1]
; GFX8-NEXT: v_cmp_lt_i64_e64 s[0:1], s[4:5], 0
; GFX8-NEXT: s_ashr_i32 s4, s9, 31
-; GFX8-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX8-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v0, s4
; GFX8-NEXT: v_mov_b32_e32 v1, s5
; GFX8-NEXT: v_mov_b32_e32 v2, s8
@@ -4995,7 +4994,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1]
; GFX8-NEXT: v_cmp_lt_i64_e64 s[2:3], s[6:7], 0
; GFX8-NEXT: s_ashr_i32 s4, s1, 31
-; GFX8-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX8-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v0, s4
; GFX8-NEXT: v_mov_b32_e32 v1, s5
; GFX8-NEXT: v_mov_b32_e32 v4, s0
@@ -5018,7 +5017,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[0:1]
; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], s[4:5], 0
; GFX9-NEXT: s_ashr_i32 s4, s9, 31
-; GFX9-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX9-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s8
@@ -5033,7 +5032,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1]
; GFX9-NEXT: v_cmp_lt_i64_e64 s[2:3], s[6:7], 0
; GFX9-NEXT: s_ashr_i32 s4, s1, 31
-; GFX9-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX9-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v4, s0
@@ -5056,7 +5055,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX10-NEXT: v_cmp_lt_i64_e64 s1, s[4:5], 0
; GFX10-NEXT: s_ashr_i32 s4, s9, 31
; GFX10-NEXT: v_mov_b32_e32 v1, s9
-; GFX10-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX10-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX10-NEXT: s_xor_b32 s8, s1, s0
; GFX10-NEXT: s_add_u32 s0, s2, s6
; GFX10-NEXT: s_addc_u32 s1, s3, s7
@@ -5067,7 +5066,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s4, s8
; GFX10-NEXT: s_ashr_i32 s4, s1, 31
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s5, s8
-; GFX10-NEXT: s_add_u32 s0, s4, 0x80000000
+; GFX10-NEXT: s_add_i32 s0, s4, 0x80000000
; GFX10-NEXT: s_xor_b32 s1, s3, s2
; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s4, s1
; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s0, s1
@@ -5085,7 +5084,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[8:9], s[0:1]
; GFX11-NEXT: v_cmp_lt_i64_e64 s1, s[4:5], 0
; GFX11-NEXT: s_ashr_i32 s4, s9, 31
-; GFX11-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX11-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX11-NEXT: s_xor_b32 s8, s1, s0
; GFX11-NEXT: s_add_u32 s0, s2, s6
; GFX11-NEXT: s_addc_u32 s1, s3, s7
@@ -5095,7 +5094,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s4, s8
; GFX11-NEXT: s_ashr_i32 s4, s1, 31
; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s5, s8
-; GFX11-NEXT: s_add_u32 s0, s4, 0x80000000
+; GFX11-NEXT: s_add_i32 s0, s4, 0x80000000
; GFX11-NEXT: s_xor_b32 s1, s3, s2
; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s4, s1
; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, s0, s1
@@ -5132,7 +5131,7 @@ define amdgpu_ps i128 @s_saddsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX6-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX6-NEXT: s_ashr_i32 s0, s9, 31
; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX6-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX6-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v1, s0
; GFX6-NEXT: v_mov_b32_e32 v2, s4
; GFX6-NEXT: v_mov_b32_e32 v3, s5
@@ -5179,7 +5178,7 @@ define amdgpu_ps i128 @s_saddsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX8-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX8-NEXT: s_ashr_i32 s0, s9, 31
; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX8-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX8-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v1, s0
; GFX8-NEXT: v_mov_b32_e32 v2, s4
; GFX8-NEXT: v_mov_b32_e32 v3, s5
@@ -5226,7 +5225,7 @@ define amdgpu_ps i128 @s_saddsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX9-NEXT: s_ashr_i32 s0, s9, 31
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX9-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: v_mov_b32_e32 v2, s4
; GFX9-NEXT: v_mov_b32_e32 v3, s5
@@ -5269,7 +5268,7 @@ define amdgpu_ps i128 @s_saddsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX10-NEXT: v_cndmask_b32_e64 v1, v2, 0, s0
; GFX10-NEXT: v_mov_b32_e32 v2, s5
; GFX10-NEXT: s_ashr_i32 s0, s9, 31
-; GFX10-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX10-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX10-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX10-NEXT: v_mov_b32_e32 v1, s4
; GFX10-NEXT: v_and_b32_e32 v0, 1, v0
@@ -5310,7 +5309,7 @@ define amdgpu_ps i128 @s_saddsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX11-NEXT: v_cndmask_b32_e64 v1, v2, 0, s0
; GFX11-NEXT: v_mov_b32_e32 v2, s5
; GFX11-NEXT: s_ashr_i32 s0, s9, 31
-; GFX11-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX11-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX11-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX11-NEXT: v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 1, v0
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
@@ -5412,9 +5411,8 @@ define amdgpu_ps <4 x float> @saddsat_i128_sv(i128 inreg %lhs, i128 %rhs) {
; GFX9-NEXT: v_ashrrev_i32_e32 v3, 31, v5
; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, 0, vcc
; GFX9-NEXT: v_xor_b32_e32 v2, v2, v6
-; GFX9-NEXT: v_bfrev_b32_e32 v6, 1
-; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v3, v6
; GFX9-NEXT: v_and_b32_e32 v2, 1, v2
+; GFX9-NEXT: v_add_u32_e32 v6, 0x80000000, v3
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
@@ -5440,7 +5438,7 @@ define amdgpu_ps <4 x float> @saddsat_i128_sv(i128 inreg %lhs, i128 %rhs) {
; GFX10-NEXT: v_ashrrev_i32_e32 v3, 31, v5
; GFX10-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc_lo
; GFX10-NEXT: v_xor_b32_e32 v2, v2, v6
-; GFX10-NEXT: v_add_co_u32 v6, s0, 0x80000000, v3
+; GFX10-NEXT: v_add_nc_u32_e32 v6, 0x80000000, v3
; GFX10-NEXT: v_and_b32_e32 v2, 1, v2
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
@@ -5467,7 +5465,7 @@ define amdgpu_ps <4 x float> @saddsat_i128_sv(i128 inreg %lhs, i128 %rhs) {
; GFX11-NEXT: v_ashrrev_i32_e32 v3, 31, v5
; GFX11-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc_lo
; GFX11-NEXT: v_xor_b32_e32 v2, v2, v6
-; GFX11-NEXT: v_add_co_u32 v6, null, 0x80000000, v3
+; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x80000000, v3
; GFX11-NEXT: v_and_b32_e32 v2, 1, v2
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
@@ -5569,9 +5567,8 @@ define amdgpu_ps <4 x float> @saddsat_i128_vs(i128 %lhs, i128 inreg %rhs) {
; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, 0, s[0:1]
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v7
-; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
-; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v2, v1
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_add_u32_e32 v3, 0x80000000, v2
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
@@ -5597,9 +5594,9 @@ define amdgpu_ps <4 x float> @saddsat_i128_vs(i128 %lhs, i128 inreg %rhs) {
; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3]
; GFX10-NEXT: v_ashrrev_i32_e32 v2, 31, v7
+; GFX10-NEXT: v_add_nc_u32_e32 v3, 0x80000000, v2
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e64 v1, v8, 0, s0
-; GFX10-NEXT: v_add_co_u32 v3, s0, 0x80000000, v2
; GFX10-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX10-NEXT: v_and_b32_e32 v0, 1, v0
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
@@ -5627,15 +5624,14 @@ define amdgpu_ps <4 x float> @saddsat_i128_vs(i128 %lhs, i128 inreg %rhs) {
; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3]
; GFX11-NEXT: v_ashrrev_i32_e32 v2, 31, v7
-; GFX11-NEXT: v_add_co_u32 v3, null, 0x80000000, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v1, v0 :: v_dual_add_nc_u32 v3, 0x80000000, v2
; GFX11-NEXT: v_cndmask_b32_e64 v1, v8, 0, s0
; GFX11-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc_lo
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v2 :: v_dual_cndmask_b32 v3, v7, v3
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v2 :: v_dual_cndmask_b32 v3, v7, v3
+; GFX11-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo
; GFX11-NEXT: ; return to shader part epilog
%result = call i128 @llvm.sadd.sat.i128(i128 %lhs, i128 %rhs)
%cast = bitcast i128 %result to <4 x float>
@@ -5762,12 +5758,11 @@ define <2 x i128> @v_saddsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v17
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[10:11]
+; GFX9-NEXT: v_add_u32_e32 v3, 0x80000000, v2
; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
-; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
-; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v2, v1
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v2, vcc
@@ -5786,11 +5781,11 @@ define <2 x i128> @v_saddsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
; GFX9-NEXT: v_ashrrev_i32_e32 v6, 31, v11
; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[14:15]
+; GFX9-NEXT: v_add_u32_e32 v7, 0x80000000, v6
; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[14:15]
; GFX9-NEXT: v_cndmask_b32_e64 v5, v5, 0, vcc
; GFX9-NEXT: v_xor_b32_e32 v4, v5, v4
-; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, 0x80000000, v6
; GFX9-NEXT: v_and_b32_e32 v4, 1, v4
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
; GFX9-NEXT: v_cndmask_b32_e32 v4, v8, v6, vcc
@@ -5832,18 +5827,18 @@ define <2 x i128> @v_saddsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v19
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc_lo
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[14:15]
-; GFX10-NEXT: v_ashrrev_i32_e32 v3, 31, v17
-; GFX10-NEXT: v_add_co_u32 v7, s5, 0x80000000, v6
+; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x80000000, v6
; GFX10-NEXT: v_cndmask_b32_e64 v2, v4, 0, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v4, s4, 0x80000000, v3
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX10-NEXT: v_xor_b32_e32 v1, v2, v1
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v3, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v16, v3, vcc_lo
-; GFX10-NEXT: v_and_b32_e32 v5, 1, v1
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v3, vcc_lo
+; GFX10-NEXT: v_ashrrev_i32_e32 v2, 31, v17
+; GFX10-NEXT: v_and_b32_e32 v3, 1, v1
+; GFX10-NEXT: v_add_nc_u32_e32 v4, 0x80000000, v2
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v2, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v2, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v16, v2, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, v3
; GFX10-NEXT: v_cndmask_b32_e32 v3, v17, v4, vcc_lo
-; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, v5
; GFX10-NEXT: v_cndmask_b32_e64 v4, v12, v6, s4
; GFX10-NEXT: v_cndmask_b32_e64 v5, v13, v6, s4
; GFX10-NEXT: v_cndmask_b32_e64 v6, v18, v6, s4
@@ -5882,18 +5877,17 @@ define <2 x i128> @v_saddsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v19
; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc_lo
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[14:15]
-; GFX11-NEXT: v_ashrrev_i32_e32 v3, 31, v17
-; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX11-NEXT: v_add_co_u32 v7, null, 0x80000000, v6
+; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x80000000, v6
; GFX11-NEXT: v_cndmask_b32_e64 v2, v4, 0, vcc_lo
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: v_add_co_u32 v4, null, 0x80000000, v3
; GFX11-NEXT: v_xor_b32_e32 v1, v2, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v8, v3, vcc_lo
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v16, v3 :: v_dual_and_b32 v5, 1, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v9, v3, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v17, v4, vcc_lo
-; GFX11-NEXT: v_cmp_ne_u32_e64 s0, 0, v5
+; GFX11-NEXT: v_ashrrev_i32_e32 v2, 31, v17
+; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x80000000, v2
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v8, v2 :: v_dual_and_b32 v3, 1, v1
+; GFX11-NEXT: v_cmp_ne_u32_e64 s0, 0, v3
+; GFX11-NEXT: v_cndmask_b32_e32 v1, v9, v2, vcc_lo
+; GFX11-NEXT: v_dual_cndmask_b32 v2, v16, v2 :: v_dual_cndmask_b32 v3, v17, v4
; GFX11-NEXT: v_cndmask_b32_e64 v4, v12, v6, s0
; GFX11-NEXT: v_cndmask_b32_e64 v5, v13, v6, s0
; GFX11-NEXT: v_cndmask_b32_e64 v6, v18, v6, s0
@@ -5927,7 +5921,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX6-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX6-NEXT: s_ashr_i32 s0, s17, 31
; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX6-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX6-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v1, s0
; GFX6-NEXT: v_mov_b32_e32 v2, s8
; GFX6-NEXT: v_mov_b32_e32 v3, s9
@@ -5960,7 +5954,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX6-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX6-NEXT: s_ashr_i32 s4, s3, 31
; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX6-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX6-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v1, s4
; GFX6-NEXT: v_mov_b32_e32 v2, s0
; GFX6-NEXT: v_mov_b32_e32 v3, s1
@@ -6011,7 +6005,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX8-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX8-NEXT: s_ashr_i32 s0, s17, 31
; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX8-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX8-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v1, s0
; GFX8-NEXT: v_mov_b32_e32 v2, s8
; GFX8-NEXT: v_mov_b32_e32 v3, s9
@@ -6050,7 +6044,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX8-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX8-NEXT: s_ashr_i32 s4, s3, 31
; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX8-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX8-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v1, s4
; GFX8-NEXT: v_mov_b32_e32 v2, s0
; GFX8-NEXT: v_mov_b32_e32 v3, s1
@@ -6101,7 +6095,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX9-NEXT: s_ashr_i32 s0, s17, 31
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX9-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: v_mov_b32_e32 v2, s8
; GFX9-NEXT: v_mov_b32_e32 v3, s9
@@ -6140,7 +6134,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX9-NEXT: s_ashr_i32 s4, s3, 31
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX9-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: v_mov_b32_e32 v2, s0
; GFX9-NEXT: v_mov_b32_e32 v3, s1
@@ -6184,7 +6178,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX10-NEXT: s_and_b32 s1, 1, s1
; GFX10-NEXT: s_ashr_i32 s10, s17, 31
; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, s1
-; GFX10-NEXT: s_add_u32 s11, s10, 0x80000000
+; GFX10-NEXT: s_add_i32 s11, s10, 0x80000000
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e64 v1, v2, 0, s0
; GFX10-NEXT: s_add_u32 s0, s4, s12
@@ -6221,7 +6215,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX10-NEXT: v_xor_b32_e32 v1, v2, v1
; GFX10-NEXT: v_mov_b32_e32 v2, s17
; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s10, vcc_lo
-; GFX10-NEXT: s_add_u32 s0, s4, 0x80000000
+; GFX10-NEXT: s_add_i32 s0, s4, 0x80000000
; GFX10-NEXT: v_readfirstlane_b32 s1, v4
; GFX10-NEXT: v_and_b32_e32 v1, 1, v1
; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s11, vcc_lo
@@ -6261,7 +6255,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX11-NEXT: s_and_b32 s1, 1, s1
; GFX11-NEXT: s_ashr_i32 s10, s17, 31
; GFX11-NEXT: v_cmp_ne_u32_e64 s0, 0, s1
-; GFX11-NEXT: s_add_u32 s11, s10, 0x80000000
+; GFX11-NEXT: s_add_i32 s11, s10, 0x80000000
; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX11-NEXT: v_cndmask_b32_e64 v1, v2, 0, s0
; GFX11-NEXT: s_add_u32 s0, s4, s12
@@ -6299,7 +6293,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX11-NEXT: v_and_b32_e32 v1, 1, v1
; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, s10, vcc_lo
; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s11, vcc_lo
-; GFX11-NEXT: s_add_u32 s0, s4, 0x80000000
+; GFX11-NEXT: s_add_i32 s0, s4, 0x80000000
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1
; GFX11-NEXT: v_mov_b32_e32 v1, s2
; GFX11-NEXT: v_readfirstlane_b32 s1, v4
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll
index 1061f00..2c2f8e9 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll
@@ -279,125 +279,27 @@ define i32 @v_sdiv_i32_pow2k_denom(i32 %num) {
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; CHECK-NEXT: v_rcp_iflag_f32_e32 v2, 0x45800000
-; CHECK-NEXT: v_mov_b32_e32 v3, 0xfffff000
-; CHECK-NEXT: v_mov_b32_e32 v4, 0x1000
+; CHECK-NEXT: v_lshrrev_b32_e32 v1, 20, v1
; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; CHECK-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
-; CHECK-NEXT: v_xor_b32_e32 v0, v0, v1
-; CHECK-NEXT: v_cvt_u32_f32_e32 v2, v2
-; CHECK-NEXT: v_mul_lo_u32 v3, v2, v3
-; CHECK-NEXT: v_mul_hi_u32 v3, v2, v3
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v3
-; CHECK-NEXT: v_mul_hi_u32 v2, v0, v2
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, 12, v2
-; CHECK-NEXT: v_add_i32_e32 v5, vcc, 1, v2
-; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v0, v3
-; CHECK-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v4
-; CHECK-NEXT: v_cndmask_b32_e64 v2, v2, v5, s[4:5]
-; CHECK-NEXT: v_subrev_i32_e32 v3, vcc, 0x1000, v0
-; CHECK-NEXT: v_cndmask_b32_e64 v0, v0, v3, s[4:5]
-; CHECK-NEXT: v_add_i32_e32 v3, vcc, 1, v2
-; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v0, v4
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
-; CHECK-NEXT: v_xor_b32_e32 v0, v0, v1
-; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, 12, v0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%result = sdiv i32 %num, 4096
ret i32 %result
}
define <2 x i32> @v_sdiv_v2i32_pow2k_denom(<2 x i32> %num) {
-; GISEL-LABEL: v_sdiv_v2i32_pow2k_denom:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-NEXT: v_ashrrev_i32_e32 v2, 31, v0
-; GISEL-NEXT: v_mov_b32_e32 v3, 0x1000
-; GISEL-NEXT: v_cvt_f32_u32_e32 v4, 0x1000
-; GISEL-NEXT: v_mov_b32_e32 v5, 0xfffff000
-; GISEL-NEXT: v_ashrrev_i32_e32 v6, 31, v1
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v2
-; GISEL-NEXT: v_rcp_iflag_f32_e32 v4, v4
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v6
-; GISEL-NEXT: v_xor_b32_e32 v0, v0, v2
-; GISEL-NEXT: v_mul_f32_e32 v4, 0x4f7ffffe, v4
-; GISEL-NEXT: v_xor_b32_e32 v1, v1, v6
-; GISEL-NEXT: v_cvt_u32_f32_e32 v4, v4
-; GISEL-NEXT: v_mul_lo_u32 v5, v4, v5
-; GISEL-NEXT: v_mul_hi_u32 v5, v4, v5
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v5
-; GISEL-NEXT: v_mul_hi_u32 v5, v0, v4
-; GISEL-NEXT: v_mul_hi_u32 v4, v1, v4
-; GISEL-NEXT: v_lshlrev_b32_e32 v7, 12, v5
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, 1, v5
-; GISEL-NEXT: v_lshlrev_b32_e32 v9, 12, v4
-; GISEL-NEXT: v_add_i32_e32 v10, vcc, 1, v4
-; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v7
-; GISEL-NEXT: v_sub_i32_e32 v1, vcc, v1, v9
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v3
-; GISEL-NEXT: v_cndmask_b32_e64 v5, v5, v8, s[4:5]
-; GISEL-NEXT: v_sub_i32_e32 v7, vcc, v0, v3
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[6:7], v1, v3
-; GISEL-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[6:7]
-; GISEL-NEXT: v_subrev_i32_e32 v8, vcc, 0x1000, v1
-; GISEL-NEXT: v_cndmask_b32_e64 v0, v0, v7, s[4:5]
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, 1, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v1, v1, v8, s[6:7]
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, 1, v4
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v0, v3
-; GISEL-NEXT: v_cndmask_b32_e32 v0, v5, v7, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3
-; GISEL-NEXT: v_cndmask_b32_e32 v1, v4, v8, vcc
-; GISEL-NEXT: v_xor_b32_e32 v0, v0, v2
-; GISEL-NEXT: v_xor_b32_e32 v1, v1, v6
-; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
-; GISEL-NEXT: v_sub_i32_e32 v1, vcc, v1, v6
-; GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; CGP-LABEL: v_sdiv_v2i32_pow2k_denom:
-; CGP: ; %bb.0:
-; CGP-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CGP-NEXT: v_ashrrev_i32_e32 v2, 31, v0
-; CGP-NEXT: v_rcp_iflag_f32_e32 v3, 0x45800000
-; CGP-NEXT: v_mov_b32_e32 v4, 0xfffff000
-; CGP-NEXT: v_mov_b32_e32 v5, 0x1000
-; CGP-NEXT: v_ashrrev_i32_e32 v6, 31, v1
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v2
-; CGP-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v1, v6
-; CGP-NEXT: v_xor_b32_e32 v0, v0, v2
-; CGP-NEXT: v_cvt_u32_f32_e32 v3, v3
-; CGP-NEXT: v_xor_b32_e32 v1, v1, v6
-; CGP-NEXT: v_mul_lo_u32 v4, v3, v4
-; CGP-NEXT: v_mul_hi_u32 v4, v3, v4
-; CGP-NEXT: v_add_i32_e32 v3, vcc, v3, v4
-; CGP-NEXT: v_mul_hi_u32 v4, v0, v3
-; CGP-NEXT: v_mul_hi_u32 v3, v1, v3
-; CGP-NEXT: v_lshlrev_b32_e32 v7, 12, v4
-; CGP-NEXT: v_add_i32_e32 v8, vcc, 1, v4
-; CGP-NEXT: v_lshlrev_b32_e32 v9, 12, v3
-; CGP-NEXT: v_add_i32_e32 v10, vcc, 1, v3
-; CGP-NEXT: v_sub_i32_e32 v0, vcc, v0, v7
-; CGP-NEXT: v_sub_i32_e32 v1, vcc, v1, v9
-; CGP-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v5
-; CGP-NEXT: v_cndmask_b32_e64 v4, v4, v8, s[4:5]
-; CGP-NEXT: v_sub_i32_e32 v7, vcc, v0, v5
-; CGP-NEXT: v_cmp_ge_u32_e64 s[6:7], v1, v5
-; CGP-NEXT: v_cndmask_b32_e64 v3, v3, v10, s[6:7]
-; CGP-NEXT: v_subrev_i32_e32 v8, vcc, 0x1000, v1
-; CGP-NEXT: v_cndmask_b32_e64 v0, v0, v7, s[4:5]
-; CGP-NEXT: v_add_i32_e32 v7, vcc, 1, v4
-; CGP-NEXT: v_cndmask_b32_e64 v1, v1, v8, s[6:7]
-; CGP-NEXT: v_add_i32_e32 v8, vcc, 1, v3
-; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v0, v5
-; CGP-NEXT: v_cndmask_b32_e32 v0, v4, v7, vcc
-; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v1, v5
-; CGP-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
-; CGP-NEXT: v_xor_b32_e32 v0, v0, v2
-; CGP-NEXT: v_xor_b32_e32 v1, v1, v6
-; CGP-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
-; CGP-NEXT: v_sub_i32_e32 v1, vcc, v1, v6
-; CGP-NEXT: s_setpc_b64 s[30:31]
+; CHECK-LABEL: v_sdiv_v2i32_pow2k_denom:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashrrev_i32_e32 v2, 31, v0
+; CHECK-NEXT: v_ashrrev_i32_e32 v3, 31, v1
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 20, v2
+; CHECK-NEXT: v_lshrrev_b32_e32 v3, 20, v3
+; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; CHECK-NEXT: v_add_i32_e32 v1, vcc, v1, v3
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, 12, v0
+; CHECK-NEXT: v_ashrrev_i32_e32 v1, 12, v1
+; CHECK-NEXT: s_setpc_b64 s[30:31]
%result = sdiv <2 x i32> %num, <i32 4096, i32 4096>
ret <2 x i32> %result
}
@@ -884,3 +786,24 @@ define <2 x i32> @v_sdiv_v2i32_24bit(<2 x i32> %num, <2 x i32> %den) {
%result = sdiv <2 x i32> %num.mask, %den.mask
ret <2 x i32> %result
}
+
+define i32 @v_sdiv_i32_exact(i32 %num) {
+; CHECK-LABEL: v_sdiv_i32_exact:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, 12, v0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %result = sdiv exact i32 %num, 4096
+ ret i32 %result
+}
+
+define <2 x i32> @v_sdiv_v2i32_exact(<2 x i32> %num) {
+; CHECK-LABEL: v_sdiv_v2i32_exact:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, 12, v0
+; CHECK-NEXT: v_ashrrev_i32_e32 v1, 10, v1
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %result = sdiv exact <2 x i32> %num, <i32 4096, i32 1024>
+ ret <2 x i32> %result
+}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
index 0a6b7af..377fa24 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
@@ -999,126 +999,11 @@ define i64 @v_sdiv_i64_pow2k_denom(i64 %num) {
; CHECK-LABEL: v_sdiv_i64_pow2k_denom:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_cvt_f32_u32_e32 v2, 0x1000
-; CHECK-NEXT: v_cvt_f32_ubyte0_e32 v3, 0
-; CHECK-NEXT: v_mov_b32_e32 v6, 0xfffff000
-; CHECK-NEXT: v_mac_f32_e32 v2, 0x4f800000, v3
-; CHECK-NEXT: v_rcp_iflag_f32_e32 v2, v2
-; CHECK-NEXT: v_mul_f32_e32 v2, 0x5f7ffffc, v2
-; CHECK-NEXT: v_mul_f32_e32 v3, 0x2f800000, v2
-; CHECK-NEXT: v_trunc_f32_e32 v4, v3
-; CHECK-NEXT: v_mac_f32_e32 v2, 0xcf800000, v4
-; CHECK-NEXT: v_cvt_u32_f32_e32 v5, v2
-; CHECK-NEXT: v_cvt_u32_f32_e32 v7, v4
-; CHECK-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, v5, 0
-; CHECK-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v6, v7, v[3:4]
-; CHECK-NEXT: v_mul_hi_u32 v8, v5, v2
-; CHECK-NEXT: v_mad_u64_u32 v[3:4], s[4:5], -1, v5, v[3:4]
-; CHECK-NEXT: v_mul_lo_u32 v4, v7, v2
-; CHECK-NEXT: v_mul_hi_u32 v2, v7, v2
-; CHECK-NEXT: v_mul_lo_u32 v9, v5, v3
-; CHECK-NEXT: v_mul_lo_u32 v10, v7, v3
-; CHECK-NEXT: v_mul_hi_u32 v11, v5, v3
-; CHECK-NEXT: v_mul_hi_u32 v3, v7, v3
-; CHECK-NEXT: v_add_i32_e32 v4, vcc, v4, v9
-; CHECK-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v10, v2
-; CHECK-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v4, vcc, v4, v8
-; CHECK-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v4, vcc, v9, v4
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v11
-; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v8, vcc, v10, v8
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v4
-; CHECK-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v4, vcc, v8, v4
-; CHECK-NEXT: v_add_i32_e32 v3, vcc, v3, v4
-; CHECK-NEXT: v_add_i32_e32 v5, vcc, v5, v2
-; CHECK-NEXT: v_addc_u32_e32 v7, vcc, v7, v3, vcc
-; CHECK-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, v5, 0
-; CHECK-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v6, v7, v[3:4]
-; CHECK-NEXT: v_ashrrev_i32_e32 v6, 31, v1
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v6
-; CHECK-NEXT: v_mad_u64_u32 v[3:4], s[4:5], -1, v5, v[3:4]
-; CHECK-NEXT: v_addc_u32_e32 v1, vcc, v1, v6, vcc
-; CHECK-NEXT: v_xor_b32_e32 v4, v0, v6
-; CHECK-NEXT: v_mul_lo_u32 v0, v7, v2
-; CHECK-NEXT: v_mul_lo_u32 v8, v5, v3
-; CHECK-NEXT: v_xor_b32_e32 v9, v1, v6
-; CHECK-NEXT: v_mul_hi_u32 v1, v5, v2
-; CHECK-NEXT: v_mul_hi_u32 v2, v7, v2
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v8
-; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; CHECK-NEXT: v_mul_lo_u32 v1, v7, v3
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v8, v0
-; CHECK-NEXT: v_mul_hi_u32 v8, v5, v3
-; CHECK-NEXT: v_add_i32_e32 v1, vcc, v1, v2
-; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v1, vcc, v1, v8
-; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v8
-; CHECK-NEXT: v_mul_hi_u32 v3, v7, v3
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v1, v0
-; CHECK-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v1, vcc, v2, v1
-; CHECK-NEXT: v_add_i32_e32 v1, vcc, v3, v1
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v5, v0
-; CHECK-NEXT: v_addc_u32_e32 v1, vcc, v7, v1, vcc
-; CHECK-NEXT: v_mul_lo_u32 v2, v9, v0
-; CHECK-NEXT: v_mul_lo_u32 v3, v4, v1
-; CHECK-NEXT: v_mul_hi_u32 v7, v4, v0
-; CHECK-NEXT: v_mul_hi_u32 v0, v9, v0
-; CHECK-NEXT: v_mov_b32_e32 v5, 0x1000
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v3
-; CHECK-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v7
-; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; CHECK-NEXT: v_mul_lo_u32 v7, v9, v1
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; CHECK-NEXT: v_mul_hi_u32 v3, v4, v1
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v7, v0
-; CHECK-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v3
-; CHECK-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v3, vcc, v7, v3
-; CHECK-NEXT: v_add_i32_e32 v7, vcc, v0, v2
-; CHECK-NEXT: v_mul_hi_u32 v8, v9, v1
-; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v5, v7, 0
-; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; CHECK-NEXT: v_add_i32_e32 v3, vcc, v8, v2
-; CHECK-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v5, v3, v[1:2]
-; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v4, v0
-; CHECK-NEXT: v_subb_u32_e64 v2, s[4:5], v9, v1, vcc
-; CHECK-NEXT: v_sub_i32_e64 v1, s[4:5], v9, v1
-; CHECK-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
-; CHECK-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v5
-; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v0, v5
-; CHECK-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[4:5]
-; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v2
-; CHECK-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
-; CHECK-NEXT: v_cndmask_b32_e64 v2, -1, v4, s[4:5]
-; CHECK-NEXT: v_add_i32_e32 v4, vcc, 1, v7
-; CHECK-NEXT: v_addc_u32_e32 v8, vcc, 0, v3, vcc
-; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v0, v5
-; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
-; CHECK-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
-; CHECK-NEXT: v_add_i32_e32 v1, vcc, 1, v4
-; CHECK-NEXT: v_addc_u32_e32 v5, vcc, 0, v8, vcc
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v1, v8, v5, vcc
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v7, v0, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
-; CHECK-NEXT: v_xor_b32_e32 v0, v0, v6
-; CHECK-NEXT: v_xor_b32_e32 v1, v1, v6
-; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v0, v6
-; CHECK-NEXT: v_subb_u32_e32 v1, vcc, v1, v6, vcc
+; CHECK-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 20, v2
+; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; CHECK-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; CHECK-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
; CHECK-NEXT: s_setpc_b64 s[30:31]
%result = sdiv i64 %num, 4096
ret i64 %result
@@ -1128,473 +1013,31 @@ define <2 x i64> @v_sdiv_v2i64_pow2k_denom(<2 x i64> %num) {
; GISEL-LABEL: v_sdiv_v2i64_pow2k_denom:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-NEXT: v_cvt_f32_u32_e32 v4, 0x1000
-; GISEL-NEXT: v_cvt_f32_ubyte0_e32 v5, 0
-; GISEL-NEXT: s_sub_u32 s6, 0, 0x1000
-; GISEL-NEXT: s_subb_u32 s7, 0, 0
-; GISEL-NEXT: v_mac_f32_e32 v4, 0x4f800000, v5
-; GISEL-NEXT: v_rcp_iflag_f32_e32 v4, v4
-; GISEL-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
-; GISEL-NEXT: v_mul_f32_e32 v5, 0x2f800000, v4
-; GISEL-NEXT: v_trunc_f32_e32 v7, v5
-; GISEL-NEXT: v_mac_f32_e32 v4, 0xcf800000, v7
-; GISEL-NEXT: v_cvt_u32_f32_e32 v6, v4
-; GISEL-NEXT: v_cvt_u32_f32_e32 v7, v7
-; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], s6, v6, 0
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s6, v7, v[5:6]
-; GISEL-NEXT: v_mul_lo_u32 v5, v7, v4
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s7, v6, v[8:9]
-; GISEL-NEXT: v_mul_hi_u32 v9, v6, v4
-; GISEL-NEXT: v_mul_hi_u32 v4, v7, v4
-; GISEL-NEXT: v_mul_lo_u32 v10, v6, v8
-; GISEL-NEXT: v_mul_lo_u32 v11, v7, v8
-; GISEL-NEXT: v_mul_hi_u32 v12, v6, v8
-; GISEL-NEXT: v_mul_hi_u32 v8, v7, v8
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v10
-; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v9
-; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v10, v5
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v11, v4
-; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v12
-; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v9, v10
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v9, v5
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v8, v5
-; GISEL-NEXT: v_add_i32_e32 v11, vcc, v6, v4
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s6, v11, 0
-; GISEL-NEXT: v_addc_u32_e32 v5, vcc, v7, v5, vcc
-; GISEL-NEXT: v_mov_b32_e32 v4, v9
-; GISEL-NEXT: v_mad_u64_u32 v[9:10], s[4:5], s6, v5, v[4:5]
; GISEL-NEXT: v_ashrrev_i32_e32 v4, 31, v1
+; GISEL-NEXT: v_lshrrev_b32_e32 v4, 20, v4
+; GISEL-NEXT: v_ashrrev_i32_e32 v5, 31, v3
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v4
-; GISEL-NEXT: v_mad_u64_u32 v[9:10], s[4:5], s7, v11, v[9:10]
-; GISEL-NEXT: v_addc_u32_e32 v1, vcc, v1, v4, vcc
-; GISEL-NEXT: v_xor_b32_e32 v10, v0, v4
-; GISEL-NEXT: v_mul_lo_u32 v0, v5, v8
-; GISEL-NEXT: v_mul_lo_u32 v12, v11, v9
-; GISEL-NEXT: v_xor_b32_e32 v13, v1, v4
-; GISEL-NEXT: v_mul_hi_u32 v1, v11, v8
-; GISEL-NEXT: v_mul_hi_u32 v8, v5, v8
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v12
-; GISEL-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v1, v5, v9
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v12, v0
-; GISEL-NEXT: v_mul_hi_u32 v12, v11, v9
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v12
-; GISEL-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v12
-; GISEL-NEXT: v_mul_hi_u32 v9, v5, v9
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v1, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v8, v1
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v9, v1
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v11, v0
-; GISEL-NEXT: v_addc_u32_e32 v1, vcc, v5, v1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v8, v13, v0
-; GISEL-NEXT: v_mul_lo_u32 v9, v10, v1
-; GISEL-NEXT: v_mul_hi_u32 v11, v10, v0
-; GISEL-NEXT: v_mul_hi_u32 v0, v13, v0
-; GISEL-NEXT: v_mov_b32_e32 v5, 0x1000
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v9
-; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v11
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v11, v13, v1
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v9, v8
-; GISEL-NEXT: v_mul_hi_u32 v9, v10, v1
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v11, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v9
-; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v11, v9
-; GISEL-NEXT: v_add_i32_e32 v11, vcc, v0, v8
-; GISEL-NEXT: v_mul_hi_u32 v12, v13, v1
-; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v5, v11, 0
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v9, v8
-; GISEL-NEXT: v_add_i32_e32 v12, vcc, v12, v8
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v5, v12, v[1:2]
-; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v10, v0
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], 0, v11, v[8:9]
-; GISEL-NEXT: s_sub_u32 s6, 0, 0x1000
-; GISEL-NEXT: s_subb_u32 s7, 0, 0
-; GISEL-NEXT: v_subb_u32_e64 v1, s[4:5], v13, v8, vcc
-; GISEL-NEXT: v_sub_i32_e64 v8, s[4:5], v13, v8
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v1
-; GISEL-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v8, vcc
-; GISEL-NEXT: v_sub_i32_e32 v8, vcc, v0, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v10, -1, v9, s[4:5]
-; GISEL-NEXT: v_subbrev_u32_e32 v9, vcc, 0, v1, vcc
-; GISEL-NEXT: v_add_i32_e32 v13, vcc, 1, v11
-; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], s6, v6, 0
-; GISEL-NEXT: v_addc_u32_e32 v14, vcc, 0, v12, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v8, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc
-; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9
-; GISEL-NEXT: v_cndmask_b32_e32 v15, -1, v8, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s6, v7, v[1:2]
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, 1, v13
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s7, v6, v[8:9]
-; GISEL-NEXT: v_addc_u32_e32 v16, vcc, 0, v14, vcc
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15
-; GISEL-NEXT: v_cndmask_b32_e32 v9, v13, v1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v1, v7, v0
-; GISEL-NEXT: v_mul_lo_u32 v13, v6, v8
-; GISEL-NEXT: v_mul_hi_u32 v15, v6, v0
-; GISEL-NEXT: v_cndmask_b32_e32 v14, v14, v16, vcc
-; GISEL-NEXT: v_mul_hi_u32 v0, v7, v0
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v13
-; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v15
-; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v15, v7, v8
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v13, v1
-; GISEL-NEXT: v_mul_hi_u32 v13, v6, v8
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v15, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v13
-; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v13, vcc, v15, v13
-; GISEL-NEXT: v_mul_hi_u32 v8, v7, v8
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v13, v1
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v8, v1
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v6, v0
-; GISEL-NEXT: v_addc_u32_e32 v13, vcc, v7, v1, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], s6, v8, 0
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
-; GISEL-NEXT: v_cndmask_b32_e32 v9, v11, v9, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], s6, v13, v[1:2]
-; GISEL-NEXT: v_xor_b32_e32 v1, v9, v4
-; GISEL-NEXT: v_ashrrev_i32_e32 v9, 31, v3
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], s7, v8, v[6:7]
-; GISEL-NEXT: v_cndmask_b32_e32 v10, v12, v14, vcc
-; GISEL-NEXT: v_add_i32_e32 v2, vcc, v2, v9
-; GISEL-NEXT: v_addc_u32_e32 v3, vcc, v3, v9, vcc
-; GISEL-NEXT: v_xor_b32_e32 v11, v2, v9
-; GISEL-NEXT: v_mul_lo_u32 v2, v13, v0
-; GISEL-NEXT: v_mul_lo_u32 v7, v8, v6
-; GISEL-NEXT: v_xor_b32_e32 v12, v3, v9
-; GISEL-NEXT: v_mul_hi_u32 v3, v8, v0
-; GISEL-NEXT: v_mul_hi_u32 v0, v13, v0
-; GISEL-NEXT: v_add_i32_e32 v2, vcc, v2, v7
-; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v2, vcc, v2, v3
-; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v3, v13, v6
-; GISEL-NEXT: v_add_i32_e32 v2, vcc, v7, v2
-; GISEL-NEXT: v_mul_hi_u32 v7, v8, v6
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v3, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v7
-; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v3, vcc, v3, v7
-; GISEL-NEXT: v_mul_hi_u32 v6, v13, v6
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v2
-; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; GISEL-NEXT: v_add_i32_e32 v2, vcc, v6, v2
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v8, v0
-; GISEL-NEXT: v_addc_u32_e32 v2, vcc, v13, v2, vcc
-; GISEL-NEXT: v_mul_lo_u32 v3, v12, v0
-; GISEL-NEXT: v_mul_lo_u32 v6, v11, v2
-; GISEL-NEXT: v_mul_hi_u32 v7, v11, v0
-; GISEL-NEXT: v_mul_hi_u32 v0, v12, v0
-; GISEL-NEXT: v_xor_b32_e32 v8, v10, v4
-; GISEL-NEXT: v_add_i32_e32 v3, vcc, v3, v6
-; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v3, vcc, v3, v7
-; GISEL-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v7, v12, v2
-; GISEL-NEXT: v_add_i32_e32 v3, vcc, v6, v3
-; GISEL-NEXT: v_mul_hi_u32 v6, v11, v2
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v7, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v6
-; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v6, vcc, v7, v6
-; GISEL-NEXT: v_add_i32_e32 v10, vcc, v0, v3
-; GISEL-NEXT: v_mul_hi_u32 v7, v12, v2
-; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v5, v10, 0
-; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v6, v0
-; GISEL-NEXT: v_add_i32_e32 v13, vcc, v7, v0
-; GISEL-NEXT: v_mov_b32_e32 v0, v3
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v5, v13, v[0:1]
-; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v1, v4
-; GISEL-NEXT: v_subb_u32_e32 v1, vcc, v8, v4, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[4:5], 0, v10, v[6:7]
-; GISEL-NEXT: v_sub_i32_e32 v2, vcc, v11, v2
-; GISEL-NEXT: v_subb_u32_e64 v4, s[4:5], v12, v3, vcc
-; GISEL-NEXT: v_sub_i32_e64 v3, s[4:5], v12, v3
-; GISEL-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v2, v5
-; GISEL-NEXT: v_sub_i32_e32 v2, vcc, v2, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v4
-; GISEL-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
-; GISEL-NEXT: v_cndmask_b32_e64 v4, -1, v6, s[4:5]
-; GISEL-NEXT: v_add_i32_e32 v6, vcc, 1, v10
-; GISEL-NEXT: v_addc_u32_e32 v7, vcc, 0, v13, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v2, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
-; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
-; GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc
-; GISEL-NEXT: v_add_i32_e32 v3, vcc, 1, v6
-; GISEL-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; GISEL-NEXT: v_cndmask_b32_e32 v2, v6, v3, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v7, v5, vcc
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; GISEL-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v13, v3, vcc
-; GISEL-NEXT: v_xor_b32_e32 v2, v2, v9
-; GISEL-NEXT: v_xor_b32_e32 v3, v3, v9
-; GISEL-NEXT: v_sub_i32_e32 v2, vcc, v2, v9
-; GISEL-NEXT: v_subb_u32_e32 v3, vcc, v3, v9, vcc
+; GISEL-NEXT: v_lshrrev_b32_e32 v5, 20, v5
+; GISEL-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GISEL-NEXT: v_add_i32_e32 v2, vcc, v2, v5
+; GISEL-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; GISEL-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
+; GISEL-NEXT: v_ashr_i64 v[2:3], v[2:3], 12
; GISEL-NEXT: s_setpc_b64 s[30:31]
;
; CGP-LABEL: v_sdiv_v2i64_pow2k_denom:
; CGP: ; %bb.0:
; CGP-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CGP-NEXT: v_cvt_f32_u32_e32 v4, 0x1000
-; CGP-NEXT: v_cvt_f32_ubyte0_e32 v5, 0
-; CGP-NEXT: v_mov_b32_e32 v6, 0xfffff000
-; CGP-NEXT: v_mac_f32_e32 v4, 0x4f800000, v5
-; CGP-NEXT: v_rcp_iflag_f32_e32 v4, v4
-; CGP-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
-; CGP-NEXT: v_mul_f32_e32 v5, 0x2f800000, v4
-; CGP-NEXT: v_trunc_f32_e32 v7, v5
-; CGP-NEXT: v_mac_f32_e32 v4, 0xcf800000, v7
-; CGP-NEXT: v_cvt_u32_f32_e32 v8, v4
-; CGP-NEXT: v_cvt_u32_f32_e32 v9, v7
-; CGP-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v6, v8, 0
-; CGP-NEXT: v_mov_b32_e32 v7, v5
-; CGP-NEXT: v_mad_u64_u32 v[10:11], s[4:5], v6, v9, v[7:8]
-; CGP-NEXT: v_mul_hi_u32 v12, v9, v4
-; CGP-NEXT: v_mad_u64_u32 v[13:14], s[4:5], -1, v8, v[10:11]
-; CGP-NEXT: v_mul_lo_u32 v10, v9, v4
-; CGP-NEXT: v_mul_hi_u32 v11, v8, v4
-; CGP-NEXT: v_mul_lo_u32 v4, v8, v13
-; CGP-NEXT: v_mul_lo_u32 v7, v9, v13
-; CGP-NEXT: v_mul_hi_u32 v14, v8, v13
-; CGP-NEXT: v_mul_hi_u32 v13, v9, v13
-; CGP-NEXT: v_add_i32_e32 v4, vcc, v10, v4
-; CGP-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v4, vcc, v4, v11
-; CGP-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v4, vcc, v15, v4
-; CGP-NEXT: v_add_i32_e32 v7, vcc, v7, v12
-; CGP-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v7, vcc, v7, v14
-; CGP-NEXT: v_cndmask_b32_e64 v14, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v14, vcc, v15, v14
-; CGP-NEXT: v_add_i32_e32 v4, vcc, v7, v4
-; CGP-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v7, vcc, v14, v7
-; CGP-NEXT: v_add_i32_e32 v7, vcc, v13, v7
-; CGP-NEXT: v_add_i32_e32 v16, vcc, v8, v4
-; CGP-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v6, v16, 0
-; CGP-NEXT: v_addc_u32_e32 v17, vcc, v9, v7, vcc
-; CGP-NEXT: v_mov_b32_e32 v4, v14
-; CGP-NEXT: v_mad_u64_u32 v[14:15], s[4:5], v6, v17, v[4:5]
-; CGP-NEXT: v_ashrrev_i32_e32 v7, 31, v1
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v7
-; CGP-NEXT: v_mad_u64_u32 v[14:15], s[4:5], -1, v16, v[14:15]
-; CGP-NEXT: v_addc_u32_e32 v1, vcc, v1, v7, vcc
-; CGP-NEXT: v_xor_b32_e32 v15, v0, v7
-; CGP-NEXT: v_mul_lo_u32 v0, v17, v13
-; CGP-NEXT: v_mul_lo_u32 v4, v16, v14
-; CGP-NEXT: v_xor_b32_e32 v18, v1, v7
-; CGP-NEXT: v_mul_hi_u32 v1, v16, v13
-; CGP-NEXT: v_mul_hi_u32 v13, v17, v13
+; CGP-NEXT: v_ashrrev_i32_e32 v4, 31, v1
+; CGP-NEXT: v_lshrrev_b32_e32 v4, 20, v4
; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v4
-; CGP-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; CGP-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; CGP-NEXT: v_mul_lo_u32 v1, v17, v14
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v4, v0
-; CGP-NEXT: v_mul_hi_u32 v4, v16, v14
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v1, v13
-; CGP-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v1, v4
-; CGP-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v4, vcc, v13, v4
-; CGP-NEXT: v_mul_hi_u32 v13, v17, v14
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v1, v0
-; CGP-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v4, v1
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v13, v1
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v16, v0
-; CGP-NEXT: v_addc_u32_e32 v1, vcc, v17, v1, vcc
-; CGP-NEXT: v_mul_lo_u32 v13, v18, v0
-; CGP-NEXT: v_mul_lo_u32 v14, v15, v1
-; CGP-NEXT: v_mul_hi_u32 v16, v15, v0
-; CGP-NEXT: v_mul_hi_u32 v0, v18, v0
-; CGP-NEXT: v_mov_b32_e32 v4, 0x1000
-; CGP-NEXT: v_add_i32_e32 v13, vcc, v13, v14
-; CGP-NEXT: v_cndmask_b32_e64 v14, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v13, vcc, v13, v16
-; CGP-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; CGP-NEXT: v_mul_lo_u32 v16, v18, v1
-; CGP-NEXT: v_add_i32_e32 v13, vcc, v14, v13
-; CGP-NEXT: v_mul_hi_u32 v14, v15, v1
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v16, v0
-; CGP-NEXT: v_cndmask_b32_e64 v16, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v14
-; CGP-NEXT: v_cndmask_b32_e64 v14, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v14, vcc, v16, v14
-; CGP-NEXT: v_add_i32_e32 v16, vcc, v0, v13
-; CGP-NEXT: v_mul_hi_u32 v17, v18, v1
-; CGP-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v4, v16, 0
-; CGP-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v13, vcc, v14, v13
-; CGP-NEXT: v_add_i32_e32 v17, vcc, v17, v13
-; CGP-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v4, v17, v[1:2]
-; CGP-NEXT: v_sub_i32_e32 v0, vcc, v15, v0
-; CGP-NEXT: v_subb_u32_e64 v1, s[4:5], v18, v13, vcc
-; CGP-NEXT: v_sub_i32_e64 v13, s[4:5], v18, v13
-; CGP-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v4
-; CGP-NEXT: v_cndmask_b32_e64 v14, 0, -1, s[4:5]
-; CGP-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v1
-; CGP-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v13, vcc
-; CGP-NEXT: v_sub_i32_e32 v0, vcc, v0, v4
-; CGP-NEXT: v_subbrev_u32_e32 v13, vcc, 0, v1, vcc
-; CGP-NEXT: v_add_i32_e32 v15, vcc, 1, v16
-; CGP-NEXT: v_addc_u32_e32 v18, vcc, 0, v17, vcc
-; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v0, v4
-; CGP-NEXT: v_mov_b32_e32 v0, v5
-; CGP-NEXT: v_cndmask_b32_e64 v14, -1, v14, s[4:5]
-; CGP-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v6, v9, v[0:1]
-; CGP-NEXT: v_cndmask_b32_e64 v19, 0, -1, vcc
-; CGP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v13
-; CGP-NEXT: v_mad_u64_u32 v[0:1], s[4:5], -1, v8, v[0:1]
-; CGP-NEXT: v_cndmask_b32_e32 v5, -1, v19, vcc
-; CGP-NEXT: v_add_i32_e32 v1, vcc, 1, v15
-; CGP-NEXT: v_mul_lo_u32 v19, v8, v0
-; CGP-NEXT: v_addc_u32_e32 v13, vcc, 0, v18, vcc
-; CGP-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
-; CGP-NEXT: v_cndmask_b32_e32 v5, v15, v1, vcc
-; CGP-NEXT: v_cndmask_b32_e32 v13, v18, v13, vcc
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v10, v19
-; CGP-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v1, v11
-; CGP-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; CGP-NEXT: v_mul_lo_u32 v11, v9, v0
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v10, v1
-; CGP-NEXT: v_mul_hi_u32 v10, v8, v0
-; CGP-NEXT: v_add_i32_e32 v11, vcc, v11, v12
-; CGP-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v10, vcc, v11, v10
-; CGP-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v11, vcc, v12, v11
-; CGP-NEXT: v_mul_hi_u32 v0, v9, v0
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v10, v1
-; CGP-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v10, vcc, v11, v10
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v10
-; CGP-NEXT: v_add_i32_e32 v8, vcc, v8, v1
-; CGP-NEXT: v_addc_u32_e32 v9, vcc, v9, v0, vcc
-; CGP-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v6, v8, 0
-; CGP-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
-; CGP-NEXT: v_cndmask_b32_e32 v5, v16, v5, vcc
-; CGP-NEXT: v_xor_b32_e32 v11, v5, v7
-; CGP-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v6, v9, v[1:2]
-; CGP-NEXT: v_cndmask_b32_e32 v10, v17, v13, vcc
-; CGP-NEXT: v_xor_b32_e32 v1, v10, v7
-; CGP-NEXT: v_mad_u64_u32 v[5:6], s[4:5], -1, v8, v[5:6]
-; CGP-NEXT: v_ashrrev_i32_e32 v10, 31, v3
-; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v10
-; CGP-NEXT: v_addc_u32_e32 v3, vcc, v3, v10, vcc
-; CGP-NEXT: v_xor_b32_e32 v12, v2, v10
-; CGP-NEXT: v_mul_lo_u32 v2, v9, v0
-; CGP-NEXT: v_mul_lo_u32 v6, v8, v5
-; CGP-NEXT: v_xor_b32_e32 v13, v3, v10
-; CGP-NEXT: v_mul_hi_u32 v3, v8, v0
-; CGP-NEXT: v_mul_hi_u32 v0, v9, v0
-; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v6
-; CGP-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v3
-; CGP-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; CGP-NEXT: v_mul_lo_u32 v3, v9, v5
-; CGP-NEXT: v_add_i32_e32 v2, vcc, v6, v2
-; CGP-NEXT: v_mul_hi_u32 v6, v8, v5
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v3, v0
-; CGP-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v6
-; CGP-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v3, vcc, v3, v6
-; CGP-NEXT: v_mul_hi_u32 v5, v9, v5
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v2
-; CGP-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; CGP-NEXT: v_add_i32_e32 v2, vcc, v5, v2
-; CGP-NEXT: v_add_i32_e32 v3, vcc, v8, v0
-; CGP-NEXT: v_addc_u32_e32 v2, vcc, v9, v2, vcc
-; CGP-NEXT: v_mul_lo_u32 v5, v13, v3
-; CGP-NEXT: v_mul_lo_u32 v6, v12, v2
-; CGP-NEXT: v_sub_i32_e32 v0, vcc, v11, v7
-; CGP-NEXT: v_subb_u32_e32 v1, vcc, v1, v7, vcc
-; CGP-NEXT: v_mul_hi_u32 v7, v12, v3
-; CGP-NEXT: v_add_i32_e32 v5, vcc, v5, v6
-; CGP-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v5, vcc, v5, v7
-; CGP-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; CGP-NEXT: v_mul_lo_u32 v7, v13, v2
-; CGP-NEXT: v_mul_hi_u32 v3, v13, v3
-; CGP-NEXT: v_add_i32_e32 v5, vcc, v6, v5
-; CGP-NEXT: v_mul_hi_u32 v6, v12, v2
-; CGP-NEXT: v_add_i32_e32 v3, vcc, v7, v3
-; CGP-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v3, vcc, v3, v6
-; CGP-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v6, vcc, v7, v6
-; CGP-NEXT: v_add_i32_e32 v7, vcc, v3, v5
-; CGP-NEXT: v_mul_hi_u32 v8, v13, v2
-; CGP-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v4, v7, 0
-; CGP-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v5, vcc, v6, v5
-; CGP-NEXT: v_add_i32_e32 v8, vcc, v8, v5
-; CGP-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v4, v8, v[3:4]
-; CGP-NEXT: v_sub_i32_e32 v2, vcc, v12, v2
-; CGP-NEXT: v_subb_u32_e64 v3, s[4:5], v13, v5, vcc
-; CGP-NEXT: v_sub_i32_e64 v5, s[4:5], v13, v5
-; CGP-NEXT: v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
-; CGP-NEXT: v_cmp_ge_u32_e64 s[4:5], v2, v4
-; CGP-NEXT: v_sub_i32_e32 v2, vcc, v2, v4
-; CGP-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[4:5]
-; CGP-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v3
-; CGP-NEXT: v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
-; CGP-NEXT: v_cndmask_b32_e64 v3, -1, v6, s[4:5]
-; CGP-NEXT: v_add_i32_e32 v6, vcc, 1, v7
-; CGP-NEXT: v_addc_u32_e32 v9, vcc, 0, v8, vcc
-; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v2, v4
-; CGP-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
-; CGP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
-; CGP-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc
-; CGP-NEXT: v_add_i32_e32 v4, vcc, 1, v6
-; CGP-NEXT: v_addc_u32_e32 v5, vcc, 0, v9, vcc
-; CGP-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; CGP-NEXT: v_cndmask_b32_e32 v2, v6, v4, vcc
-; CGP-NEXT: v_cndmask_b32_e32 v4, v9, v5, vcc
-; CGP-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
-; CGP-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
-; CGP-NEXT: v_cndmask_b32_e32 v3, v8, v4, vcc
-; CGP-NEXT: v_xor_b32_e32 v2, v2, v10
-; CGP-NEXT: v_xor_b32_e32 v3, v3, v10
-; CGP-NEXT: v_sub_i32_e32 v2, vcc, v2, v10
-; CGP-NEXT: v_subb_u32_e32 v3, vcc, v3, v10, vcc
+; CGP-NEXT: v_ashrrev_i32_e32 v4, 31, v3
+; CGP-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; CGP-NEXT: v_lshrrev_b32_e32 v4, 20, v4
+; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v4
+; CGP-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; CGP-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
+; CGP-NEXT: v_ashr_i64 v[2:3], v[2:3], 12
; CGP-NEXT: s_setpc_b64 s[30:31]
%result = sdiv <2 x i64> %num, <i64 4096, i64 4096>
ret <2 x i64> %result
@@ -3091,253 +2534,252 @@ define <2 x i64> @v_sdiv_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_and_b32_e32 v1, 0xffffff, v4
-; GISEL-NEXT: v_add_i32_e32 v3, vcc, 0, v1
-; GISEL-NEXT: v_addc_u32_e64 v1, s[4:5], 0, 0, vcc
-; GISEL-NEXT: v_cvt_f32_u32_e32 v4, v3
+; GISEL-NEXT: v_add_i32_e64 v3, s[4:5], 0, 0
+; GISEL-NEXT: v_add_i32_e32 v1, vcc, 0, v1
; GISEL-NEXT: v_cvt_f32_u32_e32 v5, v1
-; GISEL-NEXT: v_sub_i32_e32 v10, vcc, 0, v3
-; GISEL-NEXT: v_subb_u32_e32 v11, vcc, 0, v1, vcc
-; GISEL-NEXT: v_mac_f32_e32 v4, 0x4f800000, v5
-; GISEL-NEXT: v_rcp_iflag_f32_e32 v4, v4
+; GISEL-NEXT: v_cvt_f32_u32_e32 v4, v3
+; GISEL-NEXT: v_sub_i32_e32 v11, vcc, 0, v1
+; GISEL-NEXT: v_subb_u32_e32 v12, vcc, 0, v3, vcc
+; GISEL-NEXT: v_mac_f32_e32 v5, 0x4f800000, v4
+; GISEL-NEXT: v_rcp_iflag_f32_e32 v5, v5
; GISEL-NEXT: v_and_b32_e32 v0, 0xffffff, v0
-; GISEL-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
-; GISEL-NEXT: v_mul_f32_e32 v5, 0x2f800000, v4
-; GISEL-NEXT: v_trunc_f32_e32 v7, v5
-; GISEL-NEXT: v_mac_f32_e32 v4, 0xcf800000, v7
-; GISEL-NEXT: v_cvt_u32_f32_e32 v9, v4
-; GISEL-NEXT: v_cvt_u32_f32_e32 v12, v7
-; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v10, v9, 0
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v10, v12, v[5:6]
-; GISEL-NEXT: v_mul_lo_u32 v5, v12, v4
-; GISEL-NEXT: v_mul_hi_u32 v13, v9, v4
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v11, v9, v[7:8]
-; GISEL-NEXT: v_mul_hi_u32 v4, v12, v4
-; GISEL-NEXT: v_mul_lo_u32 v8, v9, v7
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v13
-; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v13, v12, v7
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v8, v5
-; GISEL-NEXT: v_mul_hi_u32 v8, v9, v7
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v13, v4
-; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v13, v8
-; GISEL-NEXT: v_mul_hi_u32 v7, v12, v7
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v5
+; GISEL-NEXT: v_mul_f32_e32 v5, 0x5f7ffffc, v5
+; GISEL-NEXT: v_mul_f32_e32 v7, 0x2f800000, v5
+; GISEL-NEXT: v_trunc_f32_e32 v9, v7
+; GISEL-NEXT: v_mac_f32_e32 v5, 0xcf800000, v9
+; GISEL-NEXT: v_cvt_u32_f32_e32 v10, v5
+; GISEL-NEXT: v_cvt_u32_f32_e32 v13, v9
+; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v11, v10, 0
+; GISEL-NEXT: v_mov_b32_e32 v5, v8
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v11, v13, v[5:6]
+; GISEL-NEXT: v_mul_lo_u32 v5, v13, v7
+; GISEL-NEXT: v_mul_hi_u32 v14, v10, v7
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v12, v10, v[8:9]
+; GISEL-NEXT: v_mul_hi_u32 v7, v13, v7
+; GISEL-NEXT: v_mul_lo_u32 v9, v10, v8
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v9
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v14
; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v8, v5
+; GISEL-NEXT: v_mul_lo_u32 v14, v13, v8
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v9, v5
+; GISEL-NEXT: v_mul_hi_u32 v9, v10, v8
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v14, v7
+; GISEL-NEXT: v_cndmask_b32_e64 v14, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v9
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v9, vcc, v14, v9
+; GISEL-NEXT: v_mul_hi_u32 v8, v13, v8
; GISEL-NEXT: v_add_i32_e32 v5, vcc, v7, v5
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v9, v4
-; GISEL-NEXT: v_addc_u32_e32 v12, vcc, v12, v5, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v10, v9, 0
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v10, v12, v[5:6]
-; GISEL-NEXT: v_mul_lo_u32 v5, v12, v4
-; GISEL-NEXT: v_add_i32_e32 v10, vcc, 0, v0
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v11, v9, v[7:8]
-; GISEL-NEXT: v_mul_hi_u32 v0, v9, v4
-; GISEL-NEXT: v_addc_u32_e64 v11, s[4:5], 0, 0, vcc
-; GISEL-NEXT: v_mul_lo_u32 v8, v9, v7
-; GISEL-NEXT: v_mul_hi_u32 v4, v12, v4
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v9, v7
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v8, v7
+; GISEL-NEXT: v_add_i32_e32 v10, vcc, v10, v5
+; GISEL-NEXT: v_addc_u32_e32 v13, vcc, v13, v7, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v11, v10, 0
+; GISEL-NEXT: v_mov_b32_e32 v5, v8
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v11, v13, v[5:6]
+; GISEL-NEXT: v_mul_lo_u32 v5, v13, v7
+; GISEL-NEXT: v_add_i32_e32 v11, vcc, 0, v0
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v12, v10, v[8:9]
+; GISEL-NEXT: v_mul_hi_u32 v0, v10, v7
+; GISEL-NEXT: v_mul_hi_u32 v7, v13, v7
+; GISEL-NEXT: v_mul_lo_u32 v9, v10, v8
+; GISEL-NEXT: v_and_b32_e32 v12, 0xffffff, v2
+; GISEL-NEXT: v_and_b32_e32 v2, 0xffffff, v6
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v9
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v5, v0
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v5, v12, v7
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v8, v0
-; GISEL-NEXT: v_mul_hi_u32 v8, v9, v7
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v5, v4
-; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v8
-; GISEL-NEXT: v_mul_hi_u32 v7, v12, v7
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v4, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v5, v4
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v7, v4
+; GISEL-NEXT: v_mul_lo_u32 v5, v13, v8
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v9, v0
-; GISEL-NEXT: v_addc_u32_e32 v4, vcc, v12, v4, vcc
-; GISEL-NEXT: v_mul_lo_u32 v5, v11, v0
-; GISEL-NEXT: v_mul_lo_u32 v7, v10, v4
-; GISEL-NEXT: v_mul_hi_u32 v8, v10, v0
-; GISEL-NEXT: v_mul_hi_u32 v0, v11, v0
-; GISEL-NEXT: v_and_b32_e32 v12, 0xffffff, v2
+; GISEL-NEXT: v_mul_hi_u32 v9, v10, v8
; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v7
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v8
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v9
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v9
+; GISEL-NEXT: v_mul_hi_u32 v8, v13, v8
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v5, v0
; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v8, v11, v4
; GISEL-NEXT: v_add_i32_e32 v5, vcc, v7, v5
-; GISEL-NEXT: v_mul_hi_u32 v7, v10, v4
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v8, v0
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v8, v5
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v10, v0
+; GISEL-NEXT: v_addc_u32_e32 v5, vcc, v13, v5, vcc
+; GISEL-NEXT: v_mul_lo_u32 v7, v3, v0
+; GISEL-NEXT: v_mul_lo_u32 v8, v11, v5
+; GISEL-NEXT: v_mul_hi_u32 v9, v11, v0
+; GISEL-NEXT: v_mul_hi_u32 v0, v3, v0
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v8
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v7
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v9
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
+; GISEL-NEXT: v_mul_lo_u32 v9, v3, v5
; GISEL-NEXT: v_add_i32_e32 v7, vcc, v8, v7
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v0, v5
-; GISEL-NEXT: v_mul_hi_u32 v8, v11, v4
-; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v3, v9, 0
+; GISEL-NEXT: v_mul_hi_u32 v8, v11, v5
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v9, v0
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v8
+; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v9, vcc, v9, v8
+; GISEL-NEXT: v_add_i32_e32 v10, vcc, v0, v7
+; GISEL-NEXT: v_mul_hi_u32 v5, v3, v5
+; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v1, v10, 0
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v7, v0
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v8, v0
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v3, v0, v[5:6]
-; GISEL-NEXT: v_and_b32_e32 v2, 0xffffff, v6
-; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v1, v9, v[7:8]
-; GISEL-NEXT: v_sub_i32_e32 v6, vcc, v10, v4
-; GISEL-NEXT: v_subb_u32_e64 v7, s[4:5], v11, v5, vcc
-; GISEL-NEXT: v_sub_i32_e64 v5, s[4:5], v11, v5
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v7, v1
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v9, v0
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v5, v0
+; GISEL-NEXT: v_mov_b32_e32 v5, v8
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v1, v0, v[5:6]
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v3, v10, v[8:9]
+; GISEL-NEXT: v_sub_i32_e32 v6, vcc, v11, v7
+; GISEL-NEXT: v_subb_u32_e64 v7, s[4:5], v3, v5, vcc
+; GISEL-NEXT: v_sub_i32_e64 v5, s[4:5], v3, v5
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v7, v3
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v6, v3
-; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, -1, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v4, s[4:5], 0, v2
-; GISEL-NEXT: v_addc_u32_e64 v2, s[4:5], 0, 0, s[4:5]
-; GISEL-NEXT: v_cvt_f32_u32_e32 v11, v4
-; GISEL-NEXT: v_cvt_f32_u32_e32 v13, v2
-; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v7, v1
-; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v5, v1, vcc
-; GISEL-NEXT: v_mac_f32_e32 v11, 0x4f800000, v13
-; GISEL-NEXT: v_rcp_iflag_f32_e32 v7, v11
-; GISEL-NEXT: v_cndmask_b32_e64 v8, v8, v10, s[4:5]
-; GISEL-NEXT: v_sub_i32_e32 v10, vcc, v6, v3
-; GISEL-NEXT: v_subbrev_u32_e32 v11, vcc, 0, v5, vcc
-; GISEL-NEXT: v_mul_f32_e32 v5, 0x5f7ffffc, v7
-; GISEL-NEXT: v_mul_f32_e32 v6, 0x2f800000, v5
-; GISEL-NEXT: v_trunc_f32_e32 v6, v6
-; GISEL-NEXT: v_mac_f32_e32 v5, 0xcf800000, v6
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v6, v1
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[4:5]
+; GISEL-NEXT: v_add_i32_e64 v2, s[4:5], 0, v2
+; GISEL-NEXT: v_cvt_f32_u32_e32 v11, v2
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v7, v3
+; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v5, v3, vcc
+; GISEL-NEXT: v_mac_f32_e32 v11, 0x4f800000, v4
+; GISEL-NEXT: v_rcp_iflag_f32_e32 v4, v11
+; GISEL-NEXT: v_cndmask_b32_e64 v7, v8, v9, s[4:5]
+; GISEL-NEXT: v_sub_i32_e32 v8, vcc, v6, v1
+; GISEL-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
+; GISEL-NEXT: v_subbrev_u32_e32 v9, vcc, 0, v5, vcc
+; GISEL-NEXT: v_mul_f32_e32 v5, 0x2f800000, v4
+; GISEL-NEXT: v_trunc_f32_e32 v5, v5
+; GISEL-NEXT: v_mac_f32_e32 v4, 0xcf800000, v5
+; GISEL-NEXT: v_cvt_u32_f32_e32 v11, v4
+; GISEL-NEXT: v_sub_i32_e32 v14, vcc, 0, v2
; GISEL-NEXT: v_cvt_u32_f32_e32 v13, v5
-; GISEL-NEXT: v_sub_i32_e32 v15, vcc, 0, v4
-; GISEL-NEXT: v_cvt_u32_f32_e32 v14, v6
-; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v15, v13, 0
-; GISEL-NEXT: v_subb_u32_e32 v16, vcc, 0, v2, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v15, v14, v[6:7]
-; GISEL-NEXT: v_add_i32_e32 v17, vcc, 1, v9
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v16, v13, v[6:7]
-; GISEL-NEXT: v_addc_u32_e32 v18, vcc, 0, v0, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v11, v1
-; GISEL-NEXT: v_cndmask_b32_e64 v19, 0, -1, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v10, v3
-; GISEL-NEXT: v_mul_lo_u32 v7, v14, v5
-; GISEL-NEXT: v_mul_lo_u32 v10, v13, v6
-; GISEL-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc
-; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v11, v1
-; GISEL-NEXT: v_mul_hi_u32 v1, v13, v5
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v19, v3, vcc
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v10
-; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v7, v1
+; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v14, v11, 0
+; GISEL-NEXT: v_subb_u32_e32 v15, vcc, 0, v3, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v14, v13, v[5:6]
+; GISEL-NEXT: v_add_i32_e32 v16, vcc, 1, v10
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v15, v11, v[5:6]
+; GISEL-NEXT: v_addc_u32_e32 v17, vcc, 0, v0, vcc
+; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v9, v3
+; GISEL-NEXT: v_cndmask_b32_e64 v18, 0, -1, vcc
+; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v8, v1
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc
+; GISEL-NEXT: v_mul_lo_u32 v6, v13, v4
+; GISEL-NEXT: v_mul_lo_u32 v8, v11, v5
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v9, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v9, v18, v1, vcc
+; GISEL-NEXT: v_mul_hi_u32 v1, v11, v4
+; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v8
+; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v1, vcc, v6, v1
; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v7, v14, v6
-; GISEL-NEXT: v_mul_hi_u32 v5, v14, v5
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v10, v1
-; GISEL-NEXT: v_mul_hi_u32 v10, v13, v6
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v7, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v10
-; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v10
-; GISEL-NEXT: v_mul_hi_u32 v6, v14, v6
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v5, v1
-; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v7, v5
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v6, v5
-; GISEL-NEXT: v_add_i32_e32 v10, vcc, v13, v1
-; GISEL-NEXT: v_addc_u32_e32 v11, vcc, v14, v5, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v15, v10, 0
-; GISEL-NEXT: v_add_i32_e32 v13, vcc, 1, v17
-; GISEL-NEXT: v_mov_b32_e32 v1, v6
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v15, v11, v[1:2]
-; GISEL-NEXT: v_addc_u32_e32 v14, vcc, 0, v18, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v16, v10, v[6:7]
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
-; GISEL-NEXT: v_cndmask_b32_e32 v1, v17, v13, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v18, v14, vcc
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
-; GISEL-NEXT: v_mul_lo_u32 v7, v11, v5
-; GISEL-NEXT: v_mul_lo_u32 v8, v10, v6
-; GISEL-NEXT: v_mul_hi_u32 v13, v10, v5
-; GISEL-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
-; GISEL-NEXT: v_add_i32_e64 v9, s[4:5], 0, v12
-; GISEL-NEXT: v_addc_u32_e64 v12, s[4:5], 0, 0, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v7, s[4:5], v7, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v7, s[4:5], v7, v13
+; GISEL-NEXT: v_mul_lo_u32 v6, v13, v5
+; GISEL-NEXT: v_mul_hi_u32 v4, v13, v4
+; GISEL-NEXT: v_add_i32_e32 v1, vcc, v8, v1
+; GISEL-NEXT: v_mul_hi_u32 v8, v11, v5
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v6, v4
+; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v8
+; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v8
+; GISEL-NEXT: v_mul_hi_u32 v5, v13, v5
+; GISEL-NEXT: v_add_i32_e32 v1, vcc, v4, v1
+; GISEL-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v6, v4
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v5, v4
+; GISEL-NEXT: v_add_i32_e32 v8, vcc, v11, v1
+; GISEL-NEXT: v_addc_u32_e32 v11, vcc, v13, v4, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v14, v8, 0
+; GISEL-NEXT: v_add_i32_e32 v13, vcc, 1, v16
+; GISEL-NEXT: v_mov_b32_e32 v1, v5
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v14, v11, v[1:2]
+; GISEL-NEXT: v_addc_u32_e32 v18, vcc, 0, v17, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v15, v8, v[5:6]
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v9
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v16, v13, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v9, v17, v18, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7
+; GISEL-NEXT: v_mul_lo_u32 v6, v11, v4
+; GISEL-NEXT: v_mul_lo_u32 v7, v8, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v10, v1, vcc
+; GISEL-NEXT: v_add_i32_e64 v10, s[4:5], 0, v12
+; GISEL-NEXT: v_mul_hi_u32 v12, v8, v4
+; GISEL-NEXT: v_add_i32_e64 v6, s[4:5], v6, v7
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[4:5]
-; GISEL-NEXT: v_mul_lo_u32 v13, v11, v6
-; GISEL-NEXT: v_mul_hi_u32 v5, v11, v5
-; GISEL-NEXT: v_add_i32_e64 v7, s[4:5], v8, v7
-; GISEL-NEXT: v_mul_hi_u32 v8, v10, v6
-; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v13, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v5, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v8, s[4:5], v13, v8
-; GISEL-NEXT: v_mul_hi_u32 v6, v11, v6
-; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v5, v7
+; GISEL-NEXT: v_add_i32_e64 v6, s[4:5], v6, v12
+; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[4:5]
+; GISEL-NEXT: v_mul_lo_u32 v12, v11, v5
+; GISEL-NEXT: v_mul_hi_u32 v4, v11, v4
+; GISEL-NEXT: v_add_i32_e64 v6, s[4:5], v7, v6
+; GISEL-NEXT: v_mul_hi_u32 v7, v8, v5
+; GISEL-NEXT: v_add_i32_e64 v4, s[4:5], v12, v4
+; GISEL-NEXT: v_cndmask_b32_e64 v12, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e64 v4, s[4:5], v4, v7
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v7, s[4:5], v8, v7
-; GISEL-NEXT: v_add_i32_e64 v6, s[4:5], v6, v7
-; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v10, v5
-; GISEL-NEXT: v_addc_u32_e64 v6, s[4:5], v11, v6, s[4:5]
-; GISEL-NEXT: v_mul_lo_u32 v7, v12, v5
-; GISEL-NEXT: v_mul_lo_u32 v8, v9, v6
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v0, v3, vcc
-; GISEL-NEXT: v_mul_hi_u32 v0, v9, v5
-; GISEL-NEXT: v_mul_hi_u32 v5, v12, v5
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v7, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v7, v12, v6
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v8, v0
-; GISEL-NEXT: v_mul_hi_u32 v8, v9, v6
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v7, v5
+; GISEL-NEXT: v_add_i32_e64 v7, s[4:5], v12, v7
+; GISEL-NEXT: v_mul_hi_u32 v5, v11, v5
+; GISEL-NEXT: v_add_i32_e64 v4, s[4:5], v4, v6
+; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e64 v6, s[4:5], v7, v6
+; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v5, v6
+; GISEL-NEXT: v_add_i32_e64 v4, s[4:5], v8, v4
+; GISEL-NEXT: v_addc_u32_e64 v5, s[4:5], v11, v5, s[4:5]
+; GISEL-NEXT: v_mul_lo_u32 v6, v3, v4
+; GISEL-NEXT: v_mul_lo_u32 v7, v10, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v8, v0, v9, vcc
+; GISEL-NEXT: v_mul_hi_u32 v0, v10, v4
+; GISEL-NEXT: v_mul_hi_u32 v4, v3, v4
+; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v7
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v8
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v5, v0
-; GISEL-NEXT: v_mul_hi_u32 v10, v12, v6
-; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v4, v8, 0
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v6, v0
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_mul_lo_u32 v6, v3, v5
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v7, v0
-; GISEL-NEXT: v_add_i32_e32 v10, vcc, v10, v0
-; GISEL-NEXT: v_mov_b32_e32 v0, v6
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v4, v10, v[0:1]
+; GISEL-NEXT: v_mul_hi_u32 v7, v10, v5
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v6, v4
+; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v7
+; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v7
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v4, v0
+; GISEL-NEXT: v_mul_hi_u32 v9, v3, v5
+; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v2, v7, 0
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v6, v0
+; GISEL-NEXT: v_add_i32_e32 v9, vcc, v9, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v5
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v2, v9, v[0:1]
; GISEL-NEXT: v_subrev_i32_e32 v0, vcc, 0, v1
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v2, v8, v[6:7]
-; GISEL-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v3, vcc
-; GISEL-NEXT: v_sub_i32_e32 v3, vcc, v9, v5
-; GISEL-NEXT: v_subb_u32_e64 v5, s[4:5], v12, v6, vcc
-; GISEL-NEXT: v_sub_i32_e64 v6, s[4:5], v12, v6
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v5, v2
-; GISEL-NEXT: v_subb_u32_e32 v6, vcc, v6, v2, vcc
-; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v3, v4
-; GISEL-NEXT: v_sub_i32_e32 v3, vcc, v3, v4
-; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v5, v2
-; GISEL-NEXT: v_subbrev_u32_e32 v6, vcc, 0, v6, vcc
-; GISEL-NEXT: v_cndmask_b32_e64 v5, v7, v9, s[4:5]
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, 1, v8
-; GISEL-NEXT: v_addc_u32_e32 v9, vcc, 0, v10, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v6, v2
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v3, v7, v[5:6]
+; GISEL-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v8, vcc
+; GISEL-NEXT: v_sub_i32_e32 v4, vcc, v10, v4
+; GISEL-NEXT: v_subb_u32_e64 v6, s[4:5], v3, v5, vcc
+; GISEL-NEXT: v_sub_i32_e64 v5, s[4:5], v3, v5
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v6, v3
+; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v5, v3, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, -1, s[4:5]
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v4, v2
+; GISEL-NEXT: v_sub_i32_e32 v4, vcc, v4, v2
+; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, -1, s[4:5]
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v6, v3
+; GISEL-NEXT: v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v6, v8, v10, s[4:5]
+; GISEL-NEXT: v_add_i32_e32 v8, vcc, 1, v7
+; GISEL-NEXT: v_addc_u32_e32 v10, vcc, 0, v9, vcc
+; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v5, v3
; GISEL-NEXT: v_cndmask_b32_e64 v11, 0, -1, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v3, v4
-; GISEL-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc
-; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v6, v2
-; GISEL-NEXT: v_cndmask_b32_e32 v2, v11, v3, vcc
-; GISEL-NEXT: v_add_i32_e32 v3, vcc, 1, v7
-; GISEL-NEXT: v_addc_u32_e32 v4, vcc, 0, v9, vcc
+; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v4, v2
+; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v5, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v11, v2, vcc
+; GISEL-NEXT: v_add_i32_e32 v3, vcc, 1, v8
+; GISEL-NEXT: v_addc_u32_e32 v4, vcc, 0, v10, vcc
; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; GISEL-NEXT: v_cndmask_b32_e32 v2, v7, v3, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v9, v4, vcc
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
-; GISEL-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v8, v3, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v10, v4, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v9, v3, vcc
; GISEL-NEXT: v_subrev_i32_e32 v2, vcc, 0, v2
; GISEL-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
@@ -3399,3 +2841,24 @@ define <2 x i64> @v_sdiv_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
%result = sdiv <2 x i64> %num.mask, %den.mask
ret <2 x i64> %result
}
+
+define i64 @v_sdiv_i64_exact(i64 %num) {
+; CHECK-LABEL: v_sdiv_i64_exact:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %result = sdiv exact i64 %num, 4096
+ ret i64 %result
+}
+
+define <2 x i64> @v_sdiv_v2i64_exact(<2 x i64> %num) {
+; CHECK-LABEL: v_sdiv_v2i64_exact:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
+; CHECK-NEXT: v_ashr_i64 v[2:3], v[2:3], 10
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %result = sdiv exact <2 x i64> %num, <i64 4096, i64 1024>
+ ret <2 x i64> %result
+}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
index c455b24..83ebc84 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
@@ -3034,253 +3034,251 @@ define <2 x i64> @v_srem_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_and_b32_e32 v1, 0xffffff, v4
+; GISEL-NEXT: v_add_i32_e64 v3, s[4:5], 0, 0
; GISEL-NEXT: v_add_i32_e32 v1, vcc, 0, v1
-; GISEL-NEXT: v_addc_u32_e64 v3, s[4:5], 0, 0, vcc
-; GISEL-NEXT: v_cvt_f32_u32_e32 v4, v1
-; GISEL-NEXT: v_cvt_f32_u32_e32 v5, v3
-; GISEL-NEXT: v_sub_i32_e32 v10, vcc, 0, v1
-; GISEL-NEXT: v_subb_u32_e32 v11, vcc, 0, v3, vcc
-; GISEL-NEXT: v_mac_f32_e32 v4, 0x4f800000, v5
-; GISEL-NEXT: v_rcp_iflag_f32_e32 v4, v4
+; GISEL-NEXT: v_cvt_f32_u32_e32 v5, v1
+; GISEL-NEXT: v_cvt_f32_u32_e32 v4, v3
+; GISEL-NEXT: v_sub_i32_e32 v11, vcc, 0, v1
+; GISEL-NEXT: v_subb_u32_e32 v12, vcc, 0, v3, vcc
+; GISEL-NEXT: v_mac_f32_e32 v5, 0x4f800000, v4
+; GISEL-NEXT: v_rcp_iflag_f32_e32 v5, v5
; GISEL-NEXT: v_and_b32_e32 v0, 0xffffff, v0
-; GISEL-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
-; GISEL-NEXT: v_mul_f32_e32 v5, 0x2f800000, v4
-; GISEL-NEXT: v_trunc_f32_e32 v7, v5
-; GISEL-NEXT: v_mac_f32_e32 v4, 0xcf800000, v7
-; GISEL-NEXT: v_cvt_u32_f32_e32 v9, v4
-; GISEL-NEXT: v_cvt_u32_f32_e32 v12, v7
-; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v10, v9, 0
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v10, v12, v[5:6]
-; GISEL-NEXT: v_mul_lo_u32 v5, v12, v4
-; GISEL-NEXT: v_mul_hi_u32 v13, v9, v4
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v11, v9, v[7:8]
-; GISEL-NEXT: v_mul_hi_u32 v4, v12, v4
-; GISEL-NEXT: v_mul_lo_u32 v8, v9, v7
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v13
-; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v13, v12, v7
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v8, v5
-; GISEL-NEXT: v_mul_hi_u32 v8, v9, v7
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v13, v4
-; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v13, v8
-; GISEL-NEXT: v_mul_hi_u32 v7, v12, v7
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v5
+; GISEL-NEXT: v_mul_f32_e32 v5, 0x5f7ffffc, v5
+; GISEL-NEXT: v_mul_f32_e32 v7, 0x2f800000, v5
+; GISEL-NEXT: v_trunc_f32_e32 v9, v7
+; GISEL-NEXT: v_mac_f32_e32 v5, 0xcf800000, v9
+; GISEL-NEXT: v_cvt_u32_f32_e32 v10, v5
+; GISEL-NEXT: v_cvt_u32_f32_e32 v13, v9
+; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v11, v10, 0
+; GISEL-NEXT: v_mov_b32_e32 v5, v8
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v11, v13, v[5:6]
+; GISEL-NEXT: v_mul_lo_u32 v5, v13, v7
+; GISEL-NEXT: v_mul_hi_u32 v14, v10, v7
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v12, v10, v[8:9]
+; GISEL-NEXT: v_mul_hi_u32 v7, v13, v7
+; GISEL-NEXT: v_mul_lo_u32 v9, v10, v8
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v9
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v14
; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v8, v5
+; GISEL-NEXT: v_mul_lo_u32 v14, v13, v8
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v9, v5
+; GISEL-NEXT: v_mul_hi_u32 v9, v10, v8
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v14, v7
+; GISEL-NEXT: v_cndmask_b32_e64 v14, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v9
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v9, vcc, v14, v9
+; GISEL-NEXT: v_mul_hi_u32 v8, v13, v8
; GISEL-NEXT: v_add_i32_e32 v5, vcc, v7, v5
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v9, v4
-; GISEL-NEXT: v_addc_u32_e32 v12, vcc, v12, v5, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v10, v9, 0
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v10, v12, v[5:6]
-; GISEL-NEXT: v_mul_lo_u32 v5, v12, v4
-; GISEL-NEXT: v_add_i32_e32 v10, vcc, 0, v0
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v11, v9, v[7:8]
-; GISEL-NEXT: v_mul_hi_u32 v0, v9, v4
-; GISEL-NEXT: v_addc_u32_e64 v11, s[4:5], 0, 0, vcc
-; GISEL-NEXT: v_mul_lo_u32 v8, v9, v7
-; GISEL-NEXT: v_mul_hi_u32 v4, v12, v4
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v9, v7
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v8, v7
+; GISEL-NEXT: v_add_i32_e32 v10, vcc, v10, v5
+; GISEL-NEXT: v_addc_u32_e32 v13, vcc, v13, v7, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v11, v10, 0
+; GISEL-NEXT: v_mov_b32_e32 v5, v8
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v11, v13, v[5:6]
+; GISEL-NEXT: v_mul_lo_u32 v5, v13, v7
+; GISEL-NEXT: v_add_i32_e32 v11, vcc, 0, v0
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v12, v10, v[8:9]
+; GISEL-NEXT: v_mul_hi_u32 v0, v10, v7
+; GISEL-NEXT: v_mul_hi_u32 v7, v13, v7
+; GISEL-NEXT: v_mul_lo_u32 v9, v10, v8
+; GISEL-NEXT: v_and_b32_e32 v12, 0xffffff, v2
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v9
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v5, v0
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v5, v12, v7
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v8, v0
-; GISEL-NEXT: v_mul_hi_u32 v8, v9, v7
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v5, v4
-; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v8
-; GISEL-NEXT: v_mul_hi_u32 v7, v12, v7
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v4, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v5, v4
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v7, v4
+; GISEL-NEXT: v_mul_lo_u32 v5, v13, v8
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v9, v0
-; GISEL-NEXT: v_addc_u32_e32 v4, vcc, v12, v4, vcc
-; GISEL-NEXT: v_mul_lo_u32 v5, v11, v0
-; GISEL-NEXT: v_mul_lo_u32 v7, v10, v4
-; GISEL-NEXT: v_mul_hi_u32 v8, v10, v0
-; GISEL-NEXT: v_mul_hi_u32 v0, v11, v0
-; GISEL-NEXT: v_and_b32_e32 v12, 0xffffff, v2
+; GISEL-NEXT: v_mul_hi_u32 v9, v10, v8
; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v7
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v8
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v9
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v9
+; GISEL-NEXT: v_mul_hi_u32 v8, v13, v8
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v5, v0
; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v8, v11, v4
; GISEL-NEXT: v_add_i32_e32 v5, vcc, v7, v5
-; GISEL-NEXT: v_mul_hi_u32 v7, v10, v4
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v8, v0
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v8, v5
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v10, v0
+; GISEL-NEXT: v_addc_u32_e32 v5, vcc, v13, v5, vcc
+; GISEL-NEXT: v_mul_lo_u32 v7, v3, v0
+; GISEL-NEXT: v_mul_lo_u32 v8, v11, v5
+; GISEL-NEXT: v_mul_hi_u32 v9, v11, v0
+; GISEL-NEXT: v_mul_hi_u32 v0, v3, v0
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v8
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v7
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v9
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
+; GISEL-NEXT: v_mul_lo_u32 v9, v3, v5
; GISEL-NEXT: v_add_i32_e32 v7, vcc, v8, v7
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v0, v5
-; GISEL-NEXT: v_mul_hi_u32 v8, v11, v4
-; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, v9, 0
+; GISEL-NEXT: v_mul_hi_u32 v8, v11, v5
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v9, v0
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v8
+; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v9, vcc, v9, v8
+; GISEL-NEXT: v_add_i32_e32 v10, vcc, v0, v7
+; GISEL-NEXT: v_mul_hi_u32 v5, v3, v5
+; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v1, v10, 0
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v7, v0
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, v8, v0
-; GISEL-NEXT: v_mov_b32_e32 v0, v5
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v1, v7, v[0:1]
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v9, v0
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v8
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v1, v5, v[0:1]
; GISEL-NEXT: v_and_b32_e32 v0, 0xffffff, v6
-; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v3, v9, v[7:8]
-; GISEL-NEXT: v_sub_i32_e32 v8, vcc, v10, v4
-; GISEL-NEXT: v_subb_u32_e64 v9, s[4:5], v11, v5, vcc
-; GISEL-NEXT: v_sub_i32_e64 v5, s[4:5], v11, v5
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v9, v3
+; GISEL-NEXT: v_sub_i32_e32 v7, vcc, v11, v7
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v3, v10, v[8:9]
+; GISEL-NEXT: v_subb_u32_e64 v8, s[4:5], v3, v5, vcc
+; GISEL-NEXT: v_sub_i32_e64 v5, s[4:5], v3, v5
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v8, v3
; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v8, v1
-; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[4:5]
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v7, v1
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[4:5]
; GISEL-NEXT: v_add_i32_e64 v2, s[4:5], 0, v0
-; GISEL-NEXT: v_addc_u32_e64 v4, s[4:5], 0, 0, s[4:5]
; GISEL-NEXT: v_cvt_f32_u32_e32 v0, v2
-; GISEL-NEXT: v_cvt_f32_u32_e32 v10, v4
-; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v9, v3
-; GISEL-NEXT: v_subb_u32_e32 v13, vcc, v5, v3, vcc
-; GISEL-NEXT: v_mac_f32_e32 v0, 0x4f800000, v10
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v8, v3
+; GISEL-NEXT: v_cndmask_b32_e64 v9, v6, v9, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v10, vcc, v5, v3, vcc
+; GISEL-NEXT: v_mac_f32_e32 v0, 0x4f800000, v4
; GISEL-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v11, v6, v7, s[4:5]
-; GISEL-NEXT: v_sub_i32_e32 v10, vcc, v8, v1
+; GISEL-NEXT: v_sub_i32_e32 v11, vcc, v7, v1
+; GISEL-NEXT: v_subbrev_u32_e64 v13, s[4:5], 0, v10, vcc
; GISEL-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
-; GISEL-NEXT: v_mul_f32_e32 v5, 0x2f800000, v0
-; GISEL-NEXT: v_trunc_f32_e32 v7, v5
-; GISEL-NEXT: v_mac_f32_e32 v0, 0xcf800000, v7
-; GISEL-NEXT: v_cvt_u32_f32_e32 v15, v0
-; GISEL-NEXT: v_subbrev_u32_e64 v14, s[4:5], 0, v13, vcc
-; GISEL-NEXT: v_sub_i32_e64 v16, s[4:5], 0, v2
-; GISEL-NEXT: v_subb_u32_e64 v17, s[4:5], 0, v4, s[4:5]
-; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v16, v15, 0
-; GISEL-NEXT: v_cvt_u32_f32_e32 v18, v7
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v14, v3
-; GISEL-NEXT: v_mov_b32_e32 v0, v6
-; GISEL-NEXT: v_cndmask_b32_e64 v19, 0, -1, s[4:5]
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v16, v18, v[0:1]
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v10, v1
+; GISEL-NEXT: v_mul_f32_e32 v4, 0x2f800000, v0
+; GISEL-NEXT: v_trunc_f32_e32 v6, v4
+; GISEL-NEXT: v_mac_f32_e32 v0, 0xcf800000, v6
+; GISEL-NEXT: v_cvt_u32_f32_e32 v14, v0
+; GISEL-NEXT: v_sub_i32_e64 v15, s[4:5], 0, v2
+; GISEL-NEXT: v_subb_u32_e64 v16, s[4:5], 0, v3, s[4:5]
+; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v15, v14, 0
+; GISEL-NEXT: v_cvt_u32_f32_e32 v17, v6
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v13, v3
+; GISEL-NEXT: v_mov_b32_e32 v0, v5
+; GISEL-NEXT: v_cndmask_b32_e64 v18, 0, -1, s[4:5]
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v15, v17, v[0:1]
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v11, v1
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v17, v15, v[6:7]
-; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v14, v3
-; GISEL-NEXT: v_cndmask_b32_e64 v7, v19, v0, s[4:5]
-; GISEL-NEXT: v_mul_lo_u32 v0, v18, v5
-; GISEL-NEXT: v_mul_lo_u32 v19, v15, v6
-; GISEL-NEXT: v_subb_u32_e32 v3, vcc, v13, v3, vcc
-; GISEL-NEXT: v_mul_hi_u32 v13, v15, v5
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v16, v14, v[5:6]
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v13, v3
+; GISEL-NEXT: v_cndmask_b32_e64 v6, v18, v0, s[4:5]
+; GISEL-NEXT: v_mul_lo_u32 v0, v17, v4
+; GISEL-NEXT: v_mul_lo_u32 v18, v14, v5
+; GISEL-NEXT: v_mul_hi_u32 v19, v14, v4
+; GISEL-NEXT: v_subb_u32_e32 v10, vcc, v10, v3, vcc
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v18
+; GISEL-NEXT: v_cndmask_b32_e64 v18, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v19
-; GISEL-NEXT: v_cndmask_b32_e64 v19, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v13
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v13, v18, v6
-; GISEL-NEXT: v_mul_hi_u32 v5, v18, v5
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v19, v0
-; GISEL-NEXT: v_mul_hi_u32 v19, v15, v6
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v13, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v19
+; GISEL-NEXT: v_mul_lo_u32 v19, v17, v5
+; GISEL-NEXT: v_mul_hi_u32 v4, v17, v4
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v18, v0
+; GISEL-NEXT: v_mul_hi_u32 v18, v14, v5
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v19, v4
; GISEL-NEXT: v_cndmask_b32_e64 v19, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v13, vcc, v13, v19
-; GISEL-NEXT: v_mul_hi_u32 v6, v18, v6
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v5, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v13, v5
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v6, v5
-; GISEL-NEXT: v_add_i32_e32 v13, vcc, v15, v0
-; GISEL-NEXT: v_addc_u32_e32 v15, vcc, v18, v5, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v16, v13, 0
-; GISEL-NEXT: v_sub_i32_e32 v18, vcc, v10, v1
-; GISEL-NEXT: v_mov_b32_e32 v0, v6
-; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v16, v15, v[0:1]
-; GISEL-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v17, v13, v[0:1]
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7
-; GISEL-NEXT: v_cndmask_b32_e32 v6, v10, v18, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v14, v3, vcc
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
-; GISEL-NEXT: v_cndmask_b32_e32 v1, v8, v6, vcc
-; GISEL-NEXT: v_mul_lo_u32 v6, v15, v5
-; GISEL-NEXT: v_mul_lo_u32 v7, v13, v0
-; GISEL-NEXT: v_mul_hi_u32 v11, v13, v5
-; GISEL-NEXT: v_add_i32_e64 v8, s[4:5], 0, v12
-; GISEL-NEXT: v_addc_u32_e64 v10, s[4:5], 0, 0, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v6, s[4:5], v6, v7
-; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v6, s[4:5], v6, v11
-; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[4:5]
-; GISEL-NEXT: v_mul_lo_u32 v11, v15, v0
-; GISEL-NEXT: v_mul_hi_u32 v5, v15, v5
-; GISEL-NEXT: v_add_i32_e64 v6, s[4:5], v7, v6
-; GISEL-NEXT: v_mul_hi_u32 v7, v13, v0
-; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v11, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v11, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v18
+; GISEL-NEXT: v_cndmask_b32_e64 v18, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v18, vcc, v19, v18
+; GISEL-NEXT: v_mul_hi_u32 v5, v17, v5
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v4, v0
+; GISEL-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v18, v4
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v5, v4
+; GISEL-NEXT: v_add_i32_e32 v14, vcc, v14, v0
+; GISEL-NEXT: v_addc_u32_e32 v17, vcc, v17, v4, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v15, v14, 0
+; GISEL-NEXT: v_sub_i32_e32 v18, vcc, v11, v1
+; GISEL-NEXT: v_mov_b32_e32 v0, v5
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v15, v17, v[0:1]
+; GISEL-NEXT: v_subbrev_u32_e32 v10, vcc, 0, v10, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v16, v14, v[0:1]
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v5, v11, v18, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v6, v13, v10, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v9
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v7, v5, vcc
+; GISEL-NEXT: v_mul_lo_u32 v5, v17, v4
+; GISEL-NEXT: v_mul_lo_u32 v7, v14, v0
+; GISEL-NEXT: v_mul_hi_u32 v10, v14, v4
+; GISEL-NEXT: v_add_i32_e64 v9, s[4:5], 0, v12
; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v5, v7
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v7, s[4:5], v11, v7
-; GISEL-NEXT: v_mul_hi_u32 v0, v15, v0
-; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v5, v6
-; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v6, s[4:5], v7, v6
-; GISEL-NEXT: v_add_i32_e64 v0, s[4:5], v0, v6
-; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v13, v5
-; GISEL-NEXT: v_addc_u32_e64 v0, s[4:5], v15, v0, s[4:5]
-; GISEL-NEXT: v_mul_lo_u32 v6, v10, v5
-; GISEL-NEXT: v_mul_lo_u32 v7, v8, v0
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v9, v3, vcc
-; GISEL-NEXT: v_mul_hi_u32 v9, v8, v5
-; GISEL-NEXT: v_mul_hi_u32 v5, v10, v5
-; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v7
+; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v5, v10
+; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, s[4:5]
+; GISEL-NEXT: v_mul_lo_u32 v10, v17, v0
+; GISEL-NEXT: v_mul_hi_u32 v4, v17, v4
+; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v7, v5
+; GISEL-NEXT: v_mul_hi_u32 v7, v14, v0
+; GISEL-NEXT: v_add_i32_e64 v4, s[4:5], v10, v4
+; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e64 v4, s[4:5], v4, v7
+; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e64 v7, s[4:5], v10, v7
+; GISEL-NEXT: v_mul_hi_u32 v0, v17, v0
+; GISEL-NEXT: v_add_i32_e64 v4, s[4:5], v4, v5
+; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v7, v5
+; GISEL-NEXT: v_add_i32_e64 v0, s[4:5], v0, v5
+; GISEL-NEXT: v_add_i32_e64 v4, s[4:5], v14, v4
+; GISEL-NEXT: v_addc_u32_e64 v0, s[4:5], v17, v0, s[4:5]
+; GISEL-NEXT: v_mul_lo_u32 v5, v3, v4
+; GISEL-NEXT: v_mul_lo_u32 v7, v9, v0
+; GISEL-NEXT: v_cndmask_b32_e32 v8, v8, v6, vcc
+; GISEL-NEXT: v_mul_hi_u32 v6, v9, v4
+; GISEL-NEXT: v_mul_hi_u32 v4, v3, v4
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v7
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v9
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v6
+; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GISEL-NEXT: v_mul_lo_u32 v6, v3, v0
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v7, v5
+; GISEL-NEXT: v_mul_hi_u32 v7, v9, v0
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v6, v4
; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v9, v10, v0
-; GISEL-NEXT: v_add_i32_e32 v6, vcc, v7, v6
-; GISEL-NEXT: v_mul_hi_u32 v7, v8, v0
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v9, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v7
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v7
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, v9, v7
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v5, v6
-; GISEL-NEXT: v_mul_hi_u32 v0, v10, v0
-; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v2, v9, 0
-; GISEL-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v11
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, v0, v7
-; GISEL-NEXT: v_mov_b32_e32 v0, v6
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v2, v7, v[0:1]
+; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v7
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v4, v5
+; GISEL-NEXT: v_mul_hi_u32 v0, v3, v0
+; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v2, v7, 0
+; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v10
+; GISEL-NEXT: v_add_i32_e32 v6, vcc, v0, v6
+; GISEL-NEXT: v_mov_b32_e32 v0, v5
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v2, v6, v[0:1]
; GISEL-NEXT: v_subrev_i32_e32 v0, vcc, 0, v1
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v4, v9, v[6:7]
-; GISEL-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v3, vcc
-; GISEL-NEXT: v_sub_i32_e32 v3, vcc, v8, v5
-; GISEL-NEXT: v_subb_u32_e64 v5, s[4:5], v10, v6, vcc
-; GISEL-NEXT: v_sub_i32_e64 v6, s[4:5], v10, v6
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v5, v4
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v3, v7, v[5:6]
+; GISEL-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v8, vcc
+; GISEL-NEXT: v_sub_i32_e32 v4, vcc, v9, v4
+; GISEL-NEXT: v_subb_u32_e64 v6, s[4:5], v3, v5, vcc
+; GISEL-NEXT: v_sub_i32_e64 v5, s[4:5], v3, v5
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v6, v3
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v3, v2
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v4, v2
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v5, v4
-; GISEL-NEXT: v_subb_u32_e32 v6, vcc, v6, v4, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v6, v3
+; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v5, v3, vcc
; GISEL-NEXT: v_cndmask_b32_e64 v7, v7, v8, s[4:5]
-; GISEL-NEXT: v_sub_i32_e32 v8, vcc, v3, v2
-; GISEL-NEXT: v_subbrev_u32_e64 v9, s[4:5], 0, v6, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v9, v4
+; GISEL-NEXT: v_sub_i32_e32 v8, vcc, v4, v2
+; GISEL-NEXT: v_subbrev_u32_e64 v9, s[4:5], 0, v5, vcc
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v9, v3
; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, -1, s[4:5]
; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v8, v2
; GISEL-NEXT: v_cndmask_b32_e64 v11, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v9, v4
-; GISEL-NEXT: v_subb_u32_e32 v4, vcc, v6, v4, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v9, v3
+; GISEL-NEXT: v_subb_u32_e32 v3, vcc, v5, v3, vcc
; GISEL-NEXT: v_sub_i32_e32 v2, vcc, v8, v2
; GISEL-NEXT: v_cndmask_b32_e64 v10, v10, v11, s[4:5]
-; GISEL-NEXT: v_subbrev_u32_e32 v4, vcc, 0, v4, vcc
+; GISEL-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
; GISEL-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v4, v9, v4, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v9, v3, vcc
; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7
-; GISEL-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v5, v4, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v6, v3, vcc
; GISEL-NEXT: v_subrev_i32_e32 v2, vcc, 0, v2
; GISEL-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
index 61e1e67..320dfbb 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
@@ -4142,11 +4142,11 @@ define i48 @v_ssubsat_i48(i48 %lhs, i48 %rhs) {
; GFX9-NEXT: v_lshlrev_b64 v[2:3], 16, v[2:3]
; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, v0, v2
; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], v[4:5], v[0:1]
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[6:7], 0, v[2:3]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], 0, v[2:3]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v5
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[6:7], s[4:5]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
; GFX9-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4162,7 +4162,7 @@ define i48 @v_ssubsat_i48(i48 %lhs, i48 %rhs) {
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, 0, v[2:3]
; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX10-NEXT: v_cmp_lt_i64_e64 s4, v[4:5], v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s5, 0x80000000, v6
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v6
; GFX10-NEXT: s_xor_b32 vcc_lo, vcc_lo, s4
; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
@@ -4179,7 +4179,7 @@ define i48 @v_ssubsat_i48(i48 %lhs, i48 %rhs) {
; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, 0, v[2:3]
; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, v[4:5], v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v6
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v6
; GFX11-NEXT: s_xor_b32 vcc_lo, vcc_lo, s0
; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_cndmask_b32 v1, v5, v1
; GFX11-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4202,7 +4202,7 @@ define amdgpu_ps i48 @s_ssubsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX6-NEXT: v_cmp_gt_i64_e64 s[0:1], s[0:1], 0
; GFX6-NEXT: s_ashr_i32 s2, s7, 31
; GFX6-NEXT: s_ashr_i32 s5, s7, 15
-; GFX6-NEXT: s_add_u32 s2, s2, 0xffff8000
+; GFX6-NEXT: s_addk_i32 s2, 0x8000
; GFX6-NEXT: v_mov_b32_e32 v0, s5
; GFX6-NEXT: v_mov_b32_e32 v1, s2
; GFX6-NEXT: v_mov_b32_e32 v2, s4
@@ -4227,7 +4227,7 @@ define amdgpu_ps i48 @s_ssubsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX8-NEXT: v_cmp_gt_i64_e64 s[0:1], s[0:1], 0
; GFX8-NEXT: s_ashr_i32 s2, s7, 31
; GFX8-NEXT: s_ashr_i32 s5, s7, 15
-; GFX8-NEXT: s_add_u32 s2, s2, 0xffff8000
+; GFX8-NEXT: s_addk_i32 s2, 0x8000
; GFX8-NEXT: v_mov_b32_e32 v0, s5
; GFX8-NEXT: v_mov_b32_e32 v1, s2
; GFX8-NEXT: v_mov_b32_e32 v2, s4
@@ -4250,7 +4250,7 @@ define amdgpu_ps i48 @s_ssubsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[2:3], 0
; GFX9-NEXT: s_ashr_i32 s2, s5, 31
-; GFX9-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX9-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
; GFX9-NEXT: v_mov_b32_e32 v2, s4
@@ -4274,7 +4274,7 @@ define amdgpu_ps i48 @s_ssubsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX10-NEXT: v_cmp_gt_i64_e64 s1, s[2:3], 0
; GFX10-NEXT: v_mov_b32_e32 v1, s5
; GFX10-NEXT: s_ashr_i32 s2, s5, 31
-; GFX10-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX10-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX10-NEXT: s_xor_b32 s0, s1, s0
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0
@@ -4293,7 +4293,7 @@ define amdgpu_ps i48 @s_ssubsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[4:5], s[0:1]
; GFX11-NEXT: v_cmp_gt_i64_e64 s1, s[2:3], 0
; GFX11-NEXT: s_ashr_i32 s2, s5, 31
-; GFX11-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX11-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX11-NEXT: s_xor_b32 s0, s1, s0
; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0
; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0
@@ -4351,11 +4351,11 @@ define amdgpu_ps <2 x float> @ssubsat_i48_sv(i48 inreg %lhs, i48 %rhs) {
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, s0, v0
; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v1, vcc
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[0:1], v[2:3]
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[2:3], 0, v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, s[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], 0, v[0:1]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[2:3], s[0:1]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GFX9-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4371,7 +4371,7 @@ define amdgpu_ps <2 x float> @ssubsat_i48_sv(i48 inreg %lhs, i48 %rhs) {
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[0:1], v[2:3]
; GFX10-NEXT: v_cmp_lt_i64_e64 s0, 0, v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s1, 0x80000000, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4388,7 +4388,7 @@ define amdgpu_ps <2 x float> @ssubsat_i48_sv(i48 inreg %lhs, i48 %rhs) {
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[0:1], v[2:3]
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, 0, v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4442,15 +4442,15 @@ define amdgpu_ps <2 x float> @ssubsat_i48_vs(i48 %lhs, i48 inreg %rhs) {
; GFX9-LABEL: ssubsat_i48_vs:
; GFX9: ; %bb.0:
; GFX9-NEXT: v_lshlrev_b64 v[0:1], 16, v[0:1]
-; GFX9-NEXT: s_lshl_b64 s[2:3], s[0:1], 16
-; GFX9-NEXT: v_mov_b32_e32 v3, s3
-; GFX9-NEXT: v_subrev_co_u32_e32 v2, vcc, s2, v0
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 16
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: v_subrev_co_u32_e32 v2, vcc, s0, v0
; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], v[2:3], v[0:1]
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[2:3], s[2:3], 0
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[0:1], 0
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[2:3], s[0:1]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GFX9-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4466,7 +4466,7 @@ define amdgpu_ps <2 x float> @ssubsat_i48_vs(i48 %lhs, i48 inreg %rhs) {
; GFX10-NEXT: v_cmp_gt_i64_e64 s0, s[0:1], 0
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s1, 0x80000000, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4483,7 +4483,7 @@ define amdgpu_ps <2 x float> @ssubsat_i48_vs(i48 %lhs, i48 inreg %rhs) {
; GFX11-NEXT: v_cmp_gt_i64_e64 s0, s[0:1], 0
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4529,11 +4529,11 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) {
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, v0, v2
; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], v[4:5], v[0:1]
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[6:7], 0, v[2:3]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], 0, v[2:3]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v5
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[6:7], s[4:5]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -4546,7 +4546,7 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) {
; GFX10-NEXT: v_cmp_lt_i64_e64 s4, 0, v[2:3]
; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s5, 0x80000000, v6
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v6
; GFX10-NEXT: s_xor_b32 vcc_lo, s4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
@@ -4560,7 +4560,7 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) {
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, 0, v[2:3]
; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v6
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v6
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_cndmask_b32 v1, v5, v1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -4578,7 +4578,7 @@ define amdgpu_ps i64 @s_ssubsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
; GFX6-NEXT: v_cmp_gt_i64_e64 s[0:1], s[2:3], 0
; GFX6-NEXT: s_ashr_i32 s2, s5, 31
-; GFX6-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX6-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v0, s2
; GFX6-NEXT: v_mov_b32_e32 v1, s3
; GFX6-NEXT: v_mov_b32_e32 v2, s4
@@ -4599,7 +4599,7 @@ define amdgpu_ps i64 @s_ssubsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
; GFX8-NEXT: v_cmp_gt_i64_e64 s[0:1], s[2:3], 0
; GFX8-NEXT: s_ashr_i32 s2, s5, 31
-; GFX8-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX8-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v0, s2
; GFX8-NEXT: v_mov_b32_e32 v1, s3
; GFX8-NEXT: v_mov_b32_e32 v2, s4
@@ -4620,7 +4620,7 @@ define amdgpu_ps i64 @s_ssubsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[2:3], 0
; GFX9-NEXT: s_ashr_i32 s2, s5, 31
-; GFX9-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX9-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
; GFX9-NEXT: v_mov_b32_e32 v2, s4
@@ -4641,7 +4641,7 @@ define amdgpu_ps i64 @s_ssubsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX10-NEXT: v_cmp_gt_i64_e64 s1, s[2:3], 0
; GFX10-NEXT: v_mov_b32_e32 v1, s5
; GFX10-NEXT: s_ashr_i32 s2, s5, 31
-; GFX10-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX10-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX10-NEXT: s_xor_b32 s0, s1, s0
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0
@@ -4657,7 +4657,7 @@ define amdgpu_ps i64 @s_ssubsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[4:5], s[0:1]
; GFX11-NEXT: v_cmp_gt_i64_e64 s1, s[2:3], 0
; GFX11-NEXT: s_ashr_i32 s2, s5, 31
-; GFX11-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX11-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX11-NEXT: s_xor_b32 s0, s1, s0
; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0
; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0
@@ -4702,11 +4702,11 @@ define amdgpu_ps <2 x float> @ssubsat_i64_sv(i64 inreg %lhs, i64 %rhs) {
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, s0, v0
; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v1, vcc
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[0:1], v[2:3]
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[2:3], 0, v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, s[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], 0, v[0:1]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[2:3], s[0:1]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GFX9-NEXT: ; return to shader part epilog
@@ -4718,7 +4718,7 @@ define amdgpu_ps <2 x float> @ssubsat_i64_sv(i64 inreg %lhs, i64 %rhs) {
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[0:1], v[2:3]
; GFX10-NEXT: v_cmp_lt_i64_e64 s0, 0, v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s1, 0x80000000, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4731,7 +4731,7 @@ define amdgpu_ps <2 x float> @ssubsat_i64_sv(i64 inreg %lhs, i64 %rhs) {
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[0:1], v[2:3]
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, 0, v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-NEXT: ; return to shader part epilog
@@ -4774,11 +4774,11 @@ define amdgpu_ps <2 x float> @ssubsat_i64_vs(i64 %lhs, i64 inreg %rhs) {
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: v_subrev_co_u32_e32 v2, vcc, s0, v0
; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[2:3], v[2:3], v[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[0:1]
; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[0:1], 0
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[0:1], s[2:3]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GFX9-NEXT: ; return to shader part epilog
@@ -4790,7 +4790,7 @@ define amdgpu_ps <2 x float> @ssubsat_i64_vs(i64 %lhs, i64 inreg %rhs) {
; GFX10-NEXT: v_cmp_gt_i64_e64 s0, s[0:1], 0
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s1, 0x80000000, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4803,7 +4803,7 @@ define amdgpu_ps <2 x float> @ssubsat_i64_vs(i64 %lhs, i64 inreg %rhs) {
; GFX11-NEXT: v_cmp_gt_i64_e64 s0, s[0:1], 0
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-NEXT: ; return to shader part epilog
@@ -4866,21 +4866,20 @@ define <2 x i64> @v_ssubsat_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, v0, v4
; GFX9-NEXT: v_subb_co_u32_e32 v9, vcc, v1, v5, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], v[8:9], v[0:1]
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[6:7], 0, v[4:5]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[8:9], v[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], 0, v[4:5]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v9
-; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, v0, v1
-; GFX9-NEXT: s_xor_b64 vcc, s[6:7], s[4:5]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, v2, v6
; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v3, v7, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], v[4:5], v[2:3]
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[6:7], 0, v[6:7]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[2:3]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], 0, v[6:7]
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v5
-; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, 0x80000000, v2
-; GFX9-NEXT: s_xor_b64 vcc, s[6:7], s[4:5]
+; GFX9-NEXT: v_add_u32_e32 v3, 0x80000000, v2
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -4896,10 +4895,10 @@ define <2 x i64> @v_ssubsat_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[8:9], v[0:1]
; GFX10-NEXT: v_cmp_lt_i64_e64 s4, 0, v[4:5]
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v11
-; GFX10-NEXT: v_cmp_lt_i64_e64 s6, 0, v[6:7]
-; GFX10-NEXT: v_add_co_u32 v1, s5, 0x80000000, v12
; GFX10-NEXT: v_cmp_lt_i64_e64 s5, v[10:11], v[2:3]
-; GFX10-NEXT: v_add_co_u32 v3, s7, 0x80000000, v4
+; GFX10-NEXT: v_cmp_lt_i64_e64 s6, 0, v[6:7]
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v12
+; GFX10-NEXT: v_add_nc_u32_e32 v3, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v12, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc_lo
@@ -4921,8 +4920,8 @@ define <2 x i64> @v_ssubsat_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v11
; GFX11-NEXT: v_cmp_lt_i64_e64 s1, v[10:11], v[2:3]
; GFX11-NEXT: v_cmp_lt_i64_e64 s2, 0, v[6:7]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v12
-; GFX11-NEXT: v_add_co_u32 v3, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v12
+; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v8, v12 :: v_dual_cndmask_b32 v1, v9, v1
; GFX11-NEXT: s_xor_b32 vcc_lo, s2, s1
@@ -4942,7 +4941,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[0:1]
; GFX6-NEXT: v_cmp_gt_i64_e64 s[0:1], s[4:5], 0
; GFX6-NEXT: s_ashr_i32 s4, s9, 31
-; GFX6-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX6-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v0, s4
; GFX6-NEXT: v_mov_b32_e32 v1, s5
; GFX6-NEXT: v_mov_b32_e32 v2, s8
@@ -4957,7 +4956,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1]
; GFX6-NEXT: v_cmp_gt_i64_e64 s[2:3], s[6:7], 0
; GFX6-NEXT: s_ashr_i32 s4, s1, 31
-; GFX6-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX6-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v0, s4
; GFX6-NEXT: v_mov_b32_e32 v1, s5
; GFX6-NEXT: v_mov_b32_e32 v4, s0
@@ -4980,7 +4979,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[0:1]
; GFX8-NEXT: v_cmp_gt_i64_e64 s[0:1], s[4:5], 0
; GFX8-NEXT: s_ashr_i32 s4, s9, 31
-; GFX8-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX8-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v0, s4
; GFX8-NEXT: v_mov_b32_e32 v1, s5
; GFX8-NEXT: v_mov_b32_e32 v2, s8
@@ -4995,7 +4994,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1]
; GFX8-NEXT: v_cmp_gt_i64_e64 s[2:3], s[6:7], 0
; GFX8-NEXT: s_ashr_i32 s4, s1, 31
-; GFX8-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX8-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v0, s4
; GFX8-NEXT: v_mov_b32_e32 v1, s5
; GFX8-NEXT: v_mov_b32_e32 v4, s0
@@ -5018,7 +5017,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[0:1]
; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[4:5], 0
; GFX9-NEXT: s_ashr_i32 s4, s9, 31
-; GFX9-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX9-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s8
@@ -5033,7 +5032,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1]
; GFX9-NEXT: v_cmp_gt_i64_e64 s[2:3], s[6:7], 0
; GFX9-NEXT: s_ashr_i32 s4, s1, 31
-; GFX9-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX9-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v4, s0
@@ -5056,7 +5055,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX10-NEXT: v_cmp_gt_i64_e64 s1, s[4:5], 0
; GFX10-NEXT: s_ashr_i32 s4, s9, 31
; GFX10-NEXT: v_mov_b32_e32 v1, s9
-; GFX10-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX10-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX10-NEXT: s_xor_b32 s8, s1, s0
; GFX10-NEXT: s_sub_u32 s0, s2, s6
; GFX10-NEXT: s_subb_u32 s1, s3, s7
@@ -5067,7 +5066,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s4, s8
; GFX10-NEXT: s_ashr_i32 s4, s1, 31
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s5, s8
-; GFX10-NEXT: s_add_u32 s0, s4, 0x80000000
+; GFX10-NEXT: s_add_i32 s0, s4, 0x80000000
; GFX10-NEXT: s_xor_b32 s1, s3, s2
; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s4, s1
; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s0, s1
@@ -5085,7 +5084,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[8:9], s[0:1]
; GFX11-NEXT: v_cmp_gt_i64_e64 s1, s[4:5], 0
; GFX11-NEXT: s_ashr_i32 s4, s9, 31
-; GFX11-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX11-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX11-NEXT: s_xor_b32 s8, s1, s0
; GFX11-NEXT: s_sub_u32 s0, s2, s6
; GFX11-NEXT: s_subb_u32 s1, s3, s7
@@ -5095,7 +5094,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s4, s8
; GFX11-NEXT: s_ashr_i32 s4, s1, 31
; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s5, s8
-; GFX11-NEXT: s_add_u32 s0, s4, 0x80000000
+; GFX11-NEXT: s_add_i32 s0, s4, 0x80000000
; GFX11-NEXT: s_xor_b32 s1, s3, s2
; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s4, s1
; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, s0, s1
@@ -5134,7 +5133,7 @@ define amdgpu_ps i128 @s_ssubsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX6-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX6-NEXT: s_ashr_i32 s0, s11, 31
; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX6-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX6-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v1, s0
; GFX6-NEXT: v_mov_b32_e32 v2, s8
; GFX6-NEXT: v_mov_b32_e32 v3, s9
@@ -5183,7 +5182,7 @@ define amdgpu_ps i128 @s_ssubsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX8-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX8-NEXT: s_ashr_i32 s0, s11, 31
; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX8-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX8-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v1, s0
; GFX8-NEXT: v_mov_b32_e32 v2, s8
; GFX8-NEXT: v_mov_b32_e32 v3, s9
@@ -5232,7 +5231,7 @@ define amdgpu_ps i128 @s_ssubsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX9-NEXT: s_ashr_i32 s0, s11, 31
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX9-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: v_mov_b32_e32 v2, s8
; GFX9-NEXT: v_mov_b32_e32 v3, s9
@@ -5274,7 +5273,7 @@ define amdgpu_ps i128 @s_ssubsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s2
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1
-; GFX10-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX10-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc_lo
; GFX10-NEXT: v_mov_b32_e32 v2, s9
; GFX10-NEXT: v_mov_b32_e32 v3, s11
@@ -5317,7 +5316,7 @@ define amdgpu_ps i128 @s_ssubsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s2
; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX11-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1
-; GFX11-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX11-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX11-NEXT: v_dual_cndmask_b32 v1, v3, v2 :: v_dual_mov_b32 v2, s9
; GFX11-NEXT: v_mov_b32_e32 v3, s11
; GFX11-NEXT: v_xor_b32_e32 v0, v1, v0
@@ -5427,9 +5426,8 @@ define amdgpu_ps <4 x float> @ssubsat_i128_sv(i128 inreg %lhs, i128 %rhs) {
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v7
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v8
-; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
-; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v2, v1
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_add_u32_e32 v3, 0x80000000, v2
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
@@ -5456,7 +5454,7 @@ define amdgpu_ps <4 x float> @ssubsat_i128_sv(i128 inreg %lhs, i128 %rhs) {
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3]
; GFX10-NEXT: v_ashrrev_i32_e32 v2, 31, v7
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v3, s0, 0x80000000, v2
+; GFX10-NEXT: v_add_nc_u32_e32 v3, 0x80000000, v2
; GFX10-NEXT: v_xor_b32_e32 v0, v0, v8
; GFX10-NEXT: v_and_b32_e32 v0, 1, v0
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
@@ -5484,8 +5482,7 @@ define amdgpu_ps <4 x float> @ssubsat_i128_sv(i128 inreg %lhs, i128 %rhs) {
; GFX11-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc_lo
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3]
; GFX11-NEXT: v_ashrrev_i32_e32 v2, 31, v7
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v3, null, 0x80000000, v2
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v1, v0 :: v_dual_add_nc_u32 v3, 0x80000000, v2
; GFX11-NEXT: v_xor_b32_e32 v0, v0, v8
; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
@@ -5594,9 +5591,8 @@ define amdgpu_ps <4 x float> @ssubsat_i128_vs(i128 %lhs, i128 inreg %rhs) {
; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v7
-; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
-; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v2, v1
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_add_u32_e32 v3, 0x80000000, v2
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
@@ -5625,7 +5621,7 @@ define amdgpu_ps <4 x float> @ssubsat_i128_vs(i128 %lhs, i128 inreg %rhs) {
; GFX10-NEXT: v_ashrrev_i32_e32 v2, 31, v7
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0
-; GFX10-NEXT: v_add_co_u32 v3, s0, 0x80000000, v2
+; GFX10-NEXT: v_add_nc_u32_e32 v3, 0x80000000, v2
; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v8, vcc_lo
; GFX10-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX10-NEXT: v_and_b32_e32 v0, 1, v0
@@ -5652,12 +5648,12 @@ define amdgpu_ps <4 x float> @ssubsat_i128_vs(i128 %lhs, i128 inreg %rhs) {
; GFX11-NEXT: v_cmp_gt_i64_e64 s0, s[2:3], 0
; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3]
-; GFX11-NEXT: v_ashrrev_i32_e32 v2, 31, v7
; GFX11-NEXT: v_cndmask_b32_e64 v9, 0, 1, s0
; GFX11-NEXT: s_and_b32 s0, 1, s4
-; GFX11-NEXT: v_add_co_u32 v3, null, 0x80000000, v2
+; GFX11-NEXT: v_ashrrev_i32_e32 v2, 31, v7
; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX11-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0
+; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x80000000, v2
; GFX11-NEXT: v_cndmask_b32_e32 v1, v9, v8, vcc_lo
; GFX11-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
@@ -5805,9 +5801,8 @@ define <2 x i128> @v_ssubsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v19
-; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
-; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v2, v1
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_add_u32_e32 v3, 0x80000000, v2
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v16, v2, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v17, v2, vcc
@@ -5831,8 +5826,8 @@ define <2 x i128> @v_ssubsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
; GFX9-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
; GFX9-NEXT: v_xor_b32_e32 v4, v5, v4
; GFX9-NEXT: v_ashrrev_i32_e32 v6, 31, v11
-; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, 0x80000000, v6
; GFX9-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX9-NEXT: v_add_u32_e32 v7, 0x80000000, v6
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
; GFX9-NEXT: v_cndmask_b32_e32 v4, v8, v6, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v6, vcc
@@ -5877,18 +5872,18 @@ define <2 x i128> @v_ssubsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v21
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc_lo
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[14:15]
-; GFX10-NEXT: v_ashrrev_i32_e32 v3, 31, v19
-; GFX10-NEXT: v_add_co_u32 v7, s5, 0x80000000, v6
+; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x80000000, v6
; GFX10-NEXT: v_cndmask_b32_e32 v2, v5, v4, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v4, s4, 0x80000000, v3
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX10-NEXT: v_xor_b32_e32 v1, v2, v1
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v16, v3, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v18, v3, vcc_lo
-; GFX10-NEXT: v_and_b32_e32 v5, 1, v1
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v17, v3, vcc_lo
+; GFX10-NEXT: v_ashrrev_i32_e32 v2, 31, v19
+; GFX10-NEXT: v_and_b32_e32 v3, 1, v1
+; GFX10-NEXT: v_add_nc_u32_e32 v4, 0x80000000, v2
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v16, v2, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v17, v2, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, v3
; GFX10-NEXT: v_cndmask_b32_e32 v3, v19, v4, vcc_lo
-; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, v5
; GFX10-NEXT: v_cndmask_b32_e64 v4, v8, v6, s4
; GFX10-NEXT: v_cndmask_b32_e64 v5, v9, v6, s4
; GFX10-NEXT: v_cndmask_b32_e64 v6, v20, v6, s4
@@ -5931,18 +5926,16 @@ define <2 x i128> @v_ssubsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v21
; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc_lo
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[14:15]
-; GFX11-NEXT: v_ashrrev_i32_e32 v3, 31, v19
+; GFX11-NEXT: v_dual_cndmask_b32 v2, v5, v4 :: v_dual_add_nc_u32 v7, 0x80000000, v6
+; GFX11-NEXT: v_xor_b32_e32 v1, v2, v1
+; GFX11-NEXT: v_ashrrev_i32_e32 v2, 31, v19
; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX11-NEXT: v_add_co_u32 v7, null, 0x80000000, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v5, v4, vcc_lo
+; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x80000000, v2
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: v_add_co_u32 v4, null, 0x80000000, v3
-; GFX11-NEXT: v_xor_b32_e32 v1, v2, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v16, v3, vcc_lo
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v18, v3 :: v_dual_and_b32 v5, 1, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v17, v3, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v19, v4, vcc_lo
-; GFX11-NEXT: v_cmp_ne_u32_e64 s0, 0, v5
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v16, v2 :: v_dual_and_b32 v3, 1, v1
+; GFX11-NEXT: v_cmp_ne_u32_e64 s0, 0, v3
+; GFX11-NEXT: v_cndmask_b32_e32 v1, v17, v2, vcc_lo
+; GFX11-NEXT: v_dual_cndmask_b32 v2, v18, v2 :: v_dual_cndmask_b32 v3, v19, v4
; GFX11-NEXT: v_cndmask_b32_e64 v4, v8, v6, s0
; GFX11-NEXT: v_cndmask_b32_e64 v5, v9, v6, s0
; GFX11-NEXT: v_cndmask_b32_e64 v6, v20, v6, s0
@@ -5978,7 +5971,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX6-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX6-NEXT: s_ashr_i32 s0, s19, 31
; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX6-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX6-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v1, s0
; GFX6-NEXT: v_mov_b32_e32 v2, s16
; GFX6-NEXT: v_mov_b32_e32 v3, s17
@@ -6013,7 +6006,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX6-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX6-NEXT: s_ashr_i32 s4, s3, 31
; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX6-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX6-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v1, s4
; GFX6-NEXT: v_mov_b32_e32 v2, s0
; GFX6-NEXT: v_mov_b32_e32 v3, s1
@@ -6066,7 +6059,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX8-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX8-NEXT: s_ashr_i32 s0, s19, 31
; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX8-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX8-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v1, s0
; GFX8-NEXT: v_mov_b32_e32 v2, s16
; GFX8-NEXT: v_mov_b32_e32 v3, s17
@@ -6107,7 +6100,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX8-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX8-NEXT: s_ashr_i32 s4, s3, 31
; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX8-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX8-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v1, s4
; GFX8-NEXT: v_mov_b32_e32 v2, s0
; GFX8-NEXT: v_mov_b32_e32 v3, s1
@@ -6160,7 +6153,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX9-NEXT: s_ashr_i32 s0, s19, 31
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX9-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: v_mov_b32_e32 v2, s16
; GFX9-NEXT: v_mov_b32_e32 v3, s17
@@ -6201,7 +6194,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX9-NEXT: s_ashr_i32 s4, s3, 31
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX9-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: v_mov_b32_e32 v2, s0
; GFX9-NEXT: v_mov_b32_e32 v3, s1
@@ -6244,7 +6237,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX10-NEXT: s_cselect_b32 s1, 1, 0
; GFX10-NEXT: s_ashr_i32 s8, s17, 31
; GFX10-NEXT: s_and_b32 s1, 1, s1
-; GFX10-NEXT: s_add_u32 s9, s8, 0x80000000
+; GFX10-NEXT: s_add_i32 s9, s8, 0x80000000
; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s2
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1
@@ -6273,7 +6266,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX10-NEXT: s_cselect_b32 s5, 1, 0
; GFX10-NEXT: s_ashr_i32 s4, s3, 31
; GFX10-NEXT: s_and_b32 s5, 1, s5
-; GFX10-NEXT: s_add_u32 s0, s4, 0x80000000
+; GFX10-NEXT: s_add_i32 s0, s4, 0x80000000
; GFX10-NEXT: v_cndmask_b32_e64 v4, 0, 1, s6
; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc_lo
; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s5
@@ -6326,7 +6319,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX11-NEXT: s_cselect_b32 s1, 1, 0
; GFX11-NEXT: s_ashr_i32 s8, s19, 31
; GFX11-NEXT: s_and_b32 s1, 1, s1
-; GFX11-NEXT: s_add_u32 s9, s8, 0x80000000
+; GFX11-NEXT: s_add_i32 s9, s8, 0x80000000
; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s2
; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX11-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1
@@ -6357,7 +6350,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc_lo
; GFX11-NEXT: v_cndmask_b32_e64 v4, 0, 1, s6
; GFX11-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s5
-; GFX11-NEXT: s_add_u32 s0, s4, 0x80000000
+; GFX11-NEXT: s_add_i32 s0, s4, 0x80000000
; GFX11-NEXT: v_dual_cndmask_b32 v2, v4, v3 :: v_dual_mov_b32 v3, s16
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-NEXT: v_mov_b32_e32 v0, s18
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
index 887c43f..d155513 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
@@ -2062,13 +2062,9 @@ define <2 x i64> @v_udiv_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
; GISEL-NEXT: v_mul_hi_u32 v17, v2, v5
; GISEL-NEXT: v_mul_hi_u32 v5, 0, v5
; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v12
-; GISEL-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v6, vcc, v13, v6
-; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v10, vcc, v10, v15
-; GISEL-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v7, vcc, v16, v7
-; GISEL-NEXT: v_cndmask_b32_e64 v16, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v9
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v14
@@ -2077,10 +2073,6 @@ define <2 x i64> @v_udiv_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v17
; GISEL-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v12, v8
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v13, v9
-; GISEL-NEXT: v_add_i32_e32 v10, vcc, v15, v10
-; GISEL-NEXT: v_add_i32_e32 v11, vcc, v16, v11
; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v8
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v10
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
index 5c6bb6d..07480a0 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
@@ -2480,13 +2480,9 @@ define <2 x i64> @v_urem_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
; GISEL-NEXT: v_mul_hi_u32 v17, v2, v5
; GISEL-NEXT: v_mul_hi_u32 v5, 0, v5
; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v12
-; GISEL-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v6, vcc, v13, v6
-; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v10, vcc, v10, v15
-; GISEL-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v7, vcc, v16, v7
-; GISEL-NEXT: v_cndmask_b32_e64 v16, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v9
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v14
@@ -2495,10 +2491,6 @@ define <2 x i64> @v_urem_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v17
; GISEL-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v12, v8
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v13, v9
-; GISEL-NEXT: v_add_i32_e32 v10, vcc, v15, v10
-; GISEL-NEXT: v_add_i32_e32 v11, vcc, v16, v11
; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v8
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v10
diff --git a/llvm/test/CodeGen/AMDGPU/add_sub_u64_pseudos.mir b/llvm/test/CodeGen/AMDGPU/add_sub_u64_pseudos.mir
new file mode 100644
index 0000000..cba114c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/add_sub_u64_pseudos.mir
@@ -0,0 +1,68 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -run-pass=finalize-isel -o - %s | FileCheck -check-prefix=GFX11 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -run-pass=finalize-isel -o - %s | FileCheck -check-prefix=GFX12 %s
+
+---
+name: reg_ops
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GFX11-LABEL: name: reg_ops
+ ; GFX11: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX11-NEXT: [[DEF1:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[DEF1]].sub0
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[DEF1]].sub1
+ ; GFX11-NEXT: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY]], [[COPY2]], implicit-def $scc
+ ; GFX11-NEXT: [[S_ADDC_U32_:%[0-9]+]]:sreg_32 = S_ADDC_U32 [[COPY1]], [[COPY3]], implicit-def $scc, implicit $scc
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_ADDC_U32_]], %subreg.sub1
+ ;
+ ; GFX12-LABEL: name: reg_ops
+ ; GFX12: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX12-NEXT: [[DEF1:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX12-NEXT: [[S_ADD_U64_:%[0-9]+]]:sreg_64 = S_ADD_U64 [[DEF]], [[DEF1]]
+ %0:sreg_64 = IMPLICIT_DEF
+ %1:sreg_64 = IMPLICIT_DEF
+ %2:sreg_64 = S_ADD_U64_PSEUDO %0, %1, implicit-def $scc
+...
+
+---
+name: lhs_imm
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GFX11-LABEL: name: lhs_imm
+ ; GFX11: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub1
+ ; GFX11-NEXT: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 6565, [[COPY]], implicit-def $scc
+ ; GFX11-NEXT: [[S_ADDC_U32_:%[0-9]+]]:sreg_32 = S_ADDC_U32 0, [[COPY1]], implicit-def $scc, implicit $scc
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_ADDC_U32_]], %subreg.sub1
+ ;
+ ; GFX12-LABEL: name: lhs_imm
+ ; GFX12: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX12-NEXT: [[S_ADD_U64_:%[0-9]+]]:sreg_64 = S_ADD_U64 6565, [[DEF]]
+ %0:sreg_64 = IMPLICIT_DEF
+ %1:sreg_64 = S_ADD_U64_PSEUDO 6565, %0, implicit-def $scc
+...
+
+---
+name: rhs_imm
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GFX11-LABEL: name: rhs_imm
+ ; GFX11: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub1
+ ; GFX11-NEXT: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY]], 6565, implicit-def $scc
+ ; GFX11-NEXT: [[S_ADDC_U32_:%[0-9]+]]:sreg_32 = S_ADDC_U32 [[COPY1]], 0, implicit-def $scc, implicit $scc
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_ADDC_U32_]], %subreg.sub1
+ ;
+ ; GFX12-LABEL: name: rhs_imm
+ ; GFX12: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX12-NEXT: [[S_ADD_U64_:%[0-9]+]]:sreg_64 = S_ADD_U64 [[DEF]], 6565
+ %0:sreg_64 = IMPLICIT_DEF
+ %1:sreg_64 = S_ADD_U64_PSEUDO %0, 6565, implicit-def $scc
+...
diff --git a/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll b/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll
index 66034af..cff9ce0 100644
--- a/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll
@@ -233,9 +233,9 @@ attributes #1 = { nounwind }
; AKF_HSA: attributes #[[ATTR1]] = { nounwind }
;.
; ATTRIBUTOR_HSA: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }
-; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
;.
; AKF_HSA: [[META0:![0-9]+]] = !{i32 1, !"amdhsa_code_object_version", i32 500}
;.
diff --git a/llvm/test/CodeGen/AMDGPU/allow-check.ll b/llvm/test/CodeGen/AMDGPU/allow-check.ll
new file mode 100644
index 0000000..d4f5621
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/allow-check.ll
@@ -0,0 +1,30 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -global-isel=0 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -global-isel=1 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -global-isel=0 -fast-isel=1 | FileCheck %s
+
+define i1 @test_runtime() local_unnamed_addr {
+; CHECK-LABEL: test_runtime:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v0, 1
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %allow = call i1 @llvm.allow.runtime.check(metadata !"test_check")
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.runtime.check(metadata) nounwind
+
+define i1 @test_ubsan() local_unnamed_addr {
+; CHECK-LABEL: test_ubsan:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v0, 1
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %allow = call i1 @llvm.allow.ubsan.check(i8 7)
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.ubsan.check(i8) nounwind
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll
new file mode 100644
index 0000000..33b1cc6
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll
@@ -0,0 +1,255 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --check-globals all --version 4
+; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx90a -passes=amdgpu-attributor %s | FileCheck %s
+
+define amdgpu_kernel void @kernel_uses_asm_virtreg() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_virtreg(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "a"(i32 poison)
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_asm_virtreg_def() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_virtreg_def(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: [[DEF:%.*]] = call i32 asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ %def = call i32 asm sideeffect "; def $0", "=a"()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_asm_physreg_def_tuple() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_physreg_def_tuple(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: [[DEF:%.*]] = call i64 asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ %def = call i64 asm sideeffect "; def $0", "={a[0:1]}"()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_asm_virtreg_second_arg() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_virtreg_second_arg(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "v,a"(i32 poison, i32 poison)
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_non_agpr_asm() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_non_agpr_asm(
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "v"(i32 poison)
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_asm_physreg() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_physreg(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "{a0}"(i32 poison)
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_asm_physreg_tuple() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_physreg_tuple(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "{a[0:1]}"(i64 poison)
+ ret void
+}
+
+define void @func_uses_asm_virtreg_agpr() {
+; CHECK-LABEL: define void @func_uses_asm_virtreg_agpr(
+; CHECK-SAME: ) #[[ATTR2:[0-9]+]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "a"(i32 poison)
+ ret void
+}
+
+define void @func_uses_asm_physreg_agpr() {
+; CHECK-LABEL: define void @func_uses_asm_physreg_agpr(
+; CHECK-SAME: ) #[[ATTR2]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "{a0}"(i32 poison)
+ ret void
+}
+
+define void @func_uses_asm_physreg_agpr_tuple() {
+; CHECK-LABEL: define void @func_uses_asm_physreg_agpr_tuple(
+; CHECK-SAME: ) #[[ATTR2]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "{a[0:1]}"(i64 poison)
+ ret void
+}
+
+declare void @unknown()
+
+define amdgpu_kernel void @kernel_calls_extern() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_extern(
+; CHECK-SAME: ) #[[ATTR4:[0-9]+]] {
+; CHECK-NEXT: call void @unknown()
+; CHECK-NEXT: ret void
+;
+ call void @unknown()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_extern_marked_callsite() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_extern_marked_callsite(
+; CHECK-SAME: ) #[[ATTR4]] {
+; CHECK-NEXT: call void @unknown() #[[ATTR9:[0-9]+]]
+; CHECK-NEXT: ret void
+;
+ call void @unknown() #0
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_indirect(ptr %indirect) {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_indirect(
+; CHECK-SAME: ptr [[INDIRECT:%.*]]) #[[ATTR4]] {
+; CHECK-NEXT: call void [[INDIRECT]]()
+; CHECK-NEXT: ret void
+;
+ call void %indirect()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_indirect_marked_callsite(ptr %indirect) {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_indirect_marked_callsite(
+; CHECK-SAME: ptr [[INDIRECT:%.*]]) #[[ATTR4]] {
+; CHECK-NEXT: call void [[INDIRECT]]() #[[ATTR9]]
+; CHECK-NEXT: ret void
+;
+ call void %indirect() #0
+ ret void
+}
+
+define amdgpu_kernel void @kernel_transitively_uses_agpr_asm() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_transitively_uses_agpr_asm(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: call void @func_uses_asm_physreg_agpr()
+; CHECK-NEXT: ret void
+;
+ call void @func_uses_asm_physreg_agpr()
+ ret void
+}
+
+define void @empty() {
+; CHECK-LABEL: define void @empty(
+; CHECK-SAME: ) #[[ATTR5:[0-9]+]] {
+; CHECK-NEXT: ret void
+;
+ ret void
+}
+
+define void @also_empty() {
+; CHECK-LABEL: define void @also_empty(
+; CHECK-SAME: ) #[[ATTR5]] {
+; CHECK-NEXT: ret void
+;
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_empty() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_empty(
+; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-NEXT: call void @empty()
+; CHECK-NEXT: ret void
+;
+ call void @empty()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_non_agpr_and_agpr() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_non_agpr_and_agpr(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: call void @empty()
+; CHECK-NEXT: call void @func_uses_asm_physreg_agpr()
+; CHECK-NEXT: ret void
+;
+ call void @empty()
+ call void @func_uses_asm_physreg_agpr()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_generic_intrinsic(ptr %ptr0, ptr %ptr1, i64 %size) {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_generic_intrinsic(
+; CHECK-SAME: ptr [[PTR0:%.*]], ptr [[PTR1:%.*]], i64 [[SIZE:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[PTR0]], ptr [[PTR1]], i64 [[SIZE]], i1 false)
+; CHECK-NEXT: ret void
+;
+ call void @llvm.memcpy.p0.p0.i64(ptr %ptr0, ptr %ptr1, i64 %size, i1 false)
+ ret void
+}
+
+declare <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float, float, <32 x float>, i32 immarg, i32 immarg, i32 immarg)
+
+define amdgpu_kernel void @kernel_calls_mfma.f32.32x32x1f32(ptr addrspace(1) %out, float %a, float %b, <32 x float> %c) {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_mfma.f32.32x32x1f32(
+; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], float [[A:%.*]], float [[B:%.*]], <32 x float> [[C:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[RESULT:%.*]] = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float [[A]], float [[B]], <32 x float> [[C]], i32 0, i32 0, i32 0)
+; CHECK-NEXT: store <32 x float> [[RESULT]], ptr addrspace(1) [[OUT]], align 128
+; CHECK-NEXT: ret void
+;
+ %result = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %a, float %b, <32 x float> %c, i32 0, i32 0, i32 0)
+ store <32 x float> %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_workitem_id_x(ptr addrspace(1) %out) {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_workitem_id_x(
+; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[RESULT:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT: store i32 [[RESULT]], ptr addrspace(1) [[OUT]], align 4
+; CHECK-NEXT: ret void
+;
+ %result = call i32 @llvm.amdgcn.workitem.id.x()
+ store i32 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @indirect_calls_none_agpr(i1 %cond) {
+; CHECK-LABEL: define amdgpu_kernel void @indirect_calls_none_agpr(
+; CHECK-SAME: i1 [[COND:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[FPTR:%.*]] = select i1 [[COND]], ptr @empty, ptr @also_empty
+; CHECK-NEXT: call void [[FPTR]]()
+; CHECK-NEXT: ret void
+;
+ %fptr = select i1 %cond, ptr @empty, ptr @also_empty
+ call void %fptr()
+ ret void
+}
+
+
+attributes #0 = { "amdgpu-no-agpr" }
+;.
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,8" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR3:[0-9]+]] = { "amdgpu-waves-per-eu"="4,8" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR4]] = { "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR5]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,8" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR6:[0-9]+]] = { convergent nocallback nofree nosync nounwind willreturn memory(none) "target-cpu"="gfx90a" }
+; CHECK: attributes #[[ATTR7:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) "target-cpu"="gfx90a" }
+; CHECK: attributes #[[ATTR8:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) "target-cpu"="gfx90a" }
+; CHECK: attributes #[[ATTR9]] = { "amdgpu-no-agpr" }
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-break-large-phis.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-break-large-phis.ll
index 192bf7c..93b9aea 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-break-large-phis.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-break-large-phis.ll
@@ -1197,3 +1197,54 @@ reallyfinally:
store <5 x double> %val, ptr %out, align 1
ret void
}
+
+define amdgpu_kernel void @pr85718(i1 %Bool, ptr %Ptr, <4 x float> %Vec1, <4 x float> %Vec2) {
+; OPT-LABEL: @pr85718(
+; OPT-NEXT: BB0:
+; OPT-NEXT: [[I:%.*]] = insertelement <4 x float> [[VEC1:%.*]], float 4.200000e+01, i1 true
+; OPT-NEXT: br label [[BB1:%.*]]
+; OPT: BB1:
+; OPT-NEXT: [[TMP0:%.*]] = phi float [ [[LARGEPHI_EXTRACTSLICE0:%.*]], [[BB2:%.*]] ], [ [[LARGEPHI_EXTRACTSLICE1:%.*]], [[BB1]] ], [ 0.000000e+00, [[BB0:%.*]] ]
+; OPT-NEXT: [[TMP1:%.*]] = phi float [ [[LARGEPHI_EXTRACTSLICE3:%.*]], [[BB2]] ], [ [[LARGEPHI_EXTRACTSLICE4:%.*]], [[BB1]] ], [ 0.000000e+00, [[BB0]] ]
+; OPT-NEXT: [[TMP2:%.*]] = phi float [ [[LARGEPHI_EXTRACTSLICE6:%.*]], [[BB2]] ], [ [[LARGEPHI_EXTRACTSLICE7:%.*]], [[BB1]] ], [ 0.000000e+00, [[BB0]] ]
+; OPT-NEXT: [[TMP3:%.*]] = phi float [ [[LARGEPHI_EXTRACTSLICE9:%.*]], [[BB2]] ], [ [[LARGEPHI_EXTRACTSLICE10:%.*]], [[BB1]] ], [ 0.000000e+00, [[BB0]] ]
+; OPT-NEXT: [[LARGEPHI_INSERTSLICE0:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0
+; OPT-NEXT: [[LARGEPHI_INSERTSLICE1:%.*]] = insertelement <4 x float> [[LARGEPHI_INSERTSLICE0]], float [[TMP1]], i64 1
+; OPT-NEXT: [[LARGEPHI_INSERTSLICE2:%.*]] = insertelement <4 x float> [[LARGEPHI_INSERTSLICE1]], float [[TMP2]], i64 2
+; OPT-NEXT: [[LARGEPHI_INSERTSLICE3:%.*]] = insertelement <4 x float> [[LARGEPHI_INSERTSLICE2]], float [[TMP3]], i64 3
+; OPT-NEXT: store <4 x float> [[LARGEPHI_INSERTSLICE3]], ptr [[PTR:%.*]], align 128
+; OPT-NEXT: [[LARGEPHI_EXTRACTSLICE1]] = extractelement <4 x float> [[VEC2:%.*]], i64 0
+; OPT-NEXT: [[LARGEPHI_EXTRACTSLICE4]] = extractelement <4 x float> [[VEC2]], i64 1
+; OPT-NEXT: [[LARGEPHI_EXTRACTSLICE7]] = extractelement <4 x float> [[VEC2]], i64 2
+; OPT-NEXT: [[LARGEPHI_EXTRACTSLICE10]] = extractelement <4 x float> [[VEC2]], i64 3
+; OPT-NEXT: br i1 [[BOOL:%.*]], label [[BB1]], label [[BB2]]
+; OPT: BB2:
+; OPT-NEXT: [[LARGEPHI_EXTRACTSLICE0]] = extractelement <4 x float> [[I]], i64 0
+; OPT-NEXT: [[LARGEPHI_EXTRACTSLICE3]] = extractelement <4 x float> [[I]], i64 1
+; OPT-NEXT: [[LARGEPHI_EXTRACTSLICE6]] = extractelement <4 x float> [[I]], i64 2
+; OPT-NEXT: [[LARGEPHI_EXTRACTSLICE9]] = extractelement <4 x float> [[I]], i64 3
+; OPT-NEXT: br label [[BB1]]
+;
+; NOOPT-LABEL: @pr85718(
+; NOOPT-NEXT: BB0:
+; NOOPT-NEXT: [[I:%.*]] = insertelement <4 x float> [[VEC1:%.*]], float 4.200000e+01, i1 true
+; NOOPT-NEXT: br label [[BB1:%.*]]
+; NOOPT: BB1:
+; NOOPT-NEXT: [[PHI:%.*]] = phi <4 x float> [ [[I]], [[BB2:%.*]] ], [ [[VEC2:%.*]], [[BB1]] ], [ zeroinitializer, [[BB0:%.*]] ]
+; NOOPT-NEXT: store <4 x float> [[PHI]], ptr [[PTR:%.*]], align 128
+; NOOPT-NEXT: br i1 [[BOOL:%.*]], label [[BB1]], label [[BB2]]
+; NOOPT: BB2:
+; NOOPT-NEXT: br label [[BB1]]
+;
+BB0:
+ %I = insertelement <4 x float> %Vec1, float 4.200000e+01, i1 true
+ br label %BB1
+
+BB1: ; preds = %BB0, %BB1, %BB2
+ %PHI = phi <4 x float> [ %I, %BB2 ], [ %Vec2, %BB1 ], [ zeroinitializer, %BB0 ]
+ store <4 x float> %PHI, ptr %Ptr, align 128
+ br i1 %Bool, label %BB1, label %BB2
+
+BB2: ; preds = %BB1
+ br label %BB1
+}
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
index d900165..2ad28b8 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
@@ -10668,3 +10668,111 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
store <2 x i64> %r, ptr addrspace(1) %out
ret void
}
+
+define <2 x i32> @v_sdiv_i32_exact(<2 x i32> %num) {
+; CHECK-LABEL: @v_sdiv_i32_exact(
+; CHECK: %1 = extractelement <2 x i32> %num, i64 0
+; CHECK-NEXT: %2 = sdiv exact i32 %1, 4096
+; CHECK-NEXT: %3 = insertelement <2 x i32> poison, i32 %2, i64 0
+; CHECK-NEXT: %4 = extractelement <2 x i32> %num, i64 1
+; CHECK-NEXT: %5 = sdiv exact i32 %4, 1024
+; CHECK-NEXT: %6 = insertelement <2 x i32> %3, i32 %5, i64 1
+; CHECK-NEXT: ret <2 x i32> %6
+;
+; GFX6-LABEL: v_sdiv_i32_exact:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 12, v0
+; GFX6-NEXT: v_ashrrev_i32_e32 v1, 10, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_sdiv_i32_exact:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_ashrrev_i32_e32 v0, 12, v0
+; GFX9-NEXT: v_ashrrev_i32_e32 v1, 10, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %result = sdiv exact <2 x i32> %num, <i32 4096, i32 1024>
+ ret <2 x i32> %result
+}
+
+define <2 x i64> @v_sdiv_i64_exact(<2 x i64> %num) {
+; CHECK-LABEL: @v_sdiv_i64_exact(
+; CHECK: %1 = extractelement <2 x i64> %num, i64 0
+; CHECK-NEXT: %2 = sdiv exact i64 %1, 4096
+; CHECK-NEXT: %3 = insertelement <2 x i64> poison, i64 %2, i64 0
+; CHECK-NEXT: %4 = extractelement <2 x i64> %num, i64 1
+; CHECK-NEXT: %5 = sdiv exact i64 %4, 1024
+; CHECK-NEXT: %6 = insertelement <2 x i64> %3, i64 %5, i64 1
+; CHECK-NEXT: ret <2 x i64> %6
+;
+; GFX6-LABEL: v_sdiv_i64_exact:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
+; GFX6-NEXT: v_ashr_i64 v[2:3], v[2:3], 10
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_sdiv_i64_exact:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_ashrrev_i64 v[0:1], 12, v[0:1]
+; GFX9-NEXT: v_ashrrev_i64 v[2:3], 10, v[2:3]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %result = sdiv exact <2 x i64> %num, <i64 4096, i64 1024>
+ ret <2 x i64> %result
+}
+
+define <2 x i32> @v_udiv_i32_exact(<2 x i32> %num) {
+; CHECK-LABEL: @v_udiv_i32_exact(
+; CHECK: %1 = extractelement <2 x i32> %num, i64 0
+; CHECK-NEXT: %2 = udiv exact i32 %1, 4096
+; CHECK-NEXT: %3 = insertelement <2 x i32> poison, i32 %2, i64 0
+; CHECK-NEXT: %4 = extractelement <2 x i32> %num, i64 1
+; CHECK-NEXT: %5 = udiv exact i32 %4, 1024
+; CHECK-NEXT: %6 = insertelement <2 x i32> %3, i32 %5, i64 1
+; CHECK-NEXT: ret <2 x i32> %6
+;
+; GFX6-LABEL: v_udiv_i32_exact:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 12, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 10, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_udiv_i32_exact:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 12, v0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %result = udiv exact <2 x i32> %num, <i32 4096, i32 1024>
+ ret <2 x i32> %result
+}
+
+define <2 x i64> @v_udiv_i64_exact(<2 x i64> %num) {
+; CHECK-LABEL: @v_udiv_i64_exact(
+; CHECK: %1 = extractelement <2 x i64> %num, i64 0
+; CHECK-NEXT: %2 = udiv exact i64 %1, 4096
+; CHECK-NEXT: %3 = insertelement <2 x i64> poison, i64 %2, i64 0
+; CHECK-NEXT: %4 = extractelement <2 x i64> %num, i64 1
+; CHECK-NEXT: %5 = udiv exact i64 %4, 1024
+; CHECK-NEXT: %6 = insertelement <2 x i64> %3, i64 %5, i64 1
+; CHECK-NEXT: ret <2 x i64> %6
+;
+; GFX6-LABEL: v_udiv_i64_exact:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshr_b64 v[0:1], v[0:1], 12
+; GFX6-NEXT: v_lshr_b64 v[2:3], v[2:3], 10
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_udiv_i64_exact:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b64 v[0:1], 12, v[0:1]
+; GFX9-NEXT: v_lshrrev_b64 v[2:3], 10, v[2:3]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %result = udiv exact <2 x i64> %num, <i64 4096, i64 1024>
+ ret <2 x i64> %result
+}
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
index 942f459..8ddaf24 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
@@ -808,7 +808,7 @@ define float @test_pown_fast_f32_nobuiltin(float %x, i32 %y) {
; CHECK-LABEL: define float @test_pown_fast_f32_nobuiltin
; CHECK-SAME: (float [[X:%.*]], i32 [[Y:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call fast float @_Z4pownfi(float [[X]], i32 [[Y]]) #[[ATTR3:[0-9]+]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call fast float @_Z4pownfi(float [[X]], i32 [[Y]]) #[[ATTR4:[0-9]+]]
; CHECK-NEXT: ret float [[CALL]]
;
entry:
@@ -820,11 +820,11 @@ define float @test_pown_fast_f32_strictfp(float %x, i32 %y) #1 {
; CHECK-LABEL: define float @test_pown_fast_f32_strictfp
; CHECK-SAME: (float [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__FABS:%.*]] = call fast float @llvm.fabs.f32(float [[X]])
-; CHECK-NEXT: [[__LOG2:%.*]] = call fast float @llvm.log2.f32(float [[__FABS]])
-; CHECK-NEXT: [[POWNI2F:%.*]] = sitofp i32 [[Y]] to float
-; CHECK-NEXT: [[__YLOGX:%.*]] = fmul fast float [[__LOG2]], [[POWNI2F]]
-; CHECK-NEXT: [[__EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[__YLOGX]])
+; CHECK-NEXT: [[__FABS:%.*]] = call fast float @llvm.fabs.f32(float [[X]]) #[[ATTR0]]
+; CHECK-NEXT: [[__LOG2:%.*]] = call fast float @llvm.log2.f32(float [[__FABS]]) #[[ATTR0]]
+; CHECK-NEXT: [[POWNI2F:%.*]] = call fast float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[Y]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT: [[__YLOGX:%.*]] = call fast float @llvm.experimental.constrained.fmul.f32(float [[POWNI2F]], float [[__LOG2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT: [[__EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[__YLOGX]]) #[[ATTR0]]
; CHECK-NEXT: [[__YEVEN:%.*]] = shl i32 [[Y]], 31
; CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[X]] to i32
; CHECK-NEXT: [[__POW_SIGN:%.*]] = and i32 [[__YEVEN]], [[TMP0]]
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll
index 2ffa647..2e64a34 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll
@@ -896,7 +896,7 @@ define float @test_rootn_f32__y_neg2__strictfp(float %x) #1 {
; CHECK-LABEL: define float @test_rootn_f32__y_neg2__strictfp(
; CHECK-SAME: float [[X:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = call float @_Z5rsqrtf(float [[X]])
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = call float @_Z5rsqrtf(float [[X]]) #[[ATTR0]]
; CHECK-NEXT: ret float [[__ROOTN2RSQRT]]
;
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
index af0eb23..3d4ae84d9 100644
--- a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
@@ -1025,33 +1025,33 @@ attributes #6 = { "enqueued-block" }
; AKF_HSA: attributes #[[ATTR8]] = { "amdgpu-calls" }
;.
; ATTRIBUTOR_HSA: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR4]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR5]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR6]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR7]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR8]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR9]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR10]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR11]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR12]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR13]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="gfx900" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR14]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="gfx900" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR15]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR4]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR5]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR6]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR7]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR8]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR9]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR10]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR11]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR12]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR13]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="gfx900" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR14]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="gfx900" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR15]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR16]] = { nounwind "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR17]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR17]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR18]] = { nounwind "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR19]] = { nounwind sanitize_address "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR20]] = { nounwind sanitize_address "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR21]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR22]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR19]] = { nounwind sanitize_address "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR20]] = { nounwind sanitize_address "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR21]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR22]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR23:[0-9]+]] = { nounwind sanitize_address "amdgpu-no-implicitarg-ptr" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR24:[0-9]+]] = { "amdgpu-waves-per-eu"="4,10" "enqueued-block" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR25]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "enqueued-block" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR25]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "enqueued-block" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR26]] = { "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR27]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR27]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR28]] = { nounwind }
; ATTRIBUTOR_HSA: attributes #[[ATTR29]] = { "enqueued-block" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
index 9a9c28a..43cdf85 100644
--- a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
+++ b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
@@ -643,19 +643,19 @@ attributes #1 = { nounwind }
; AKF_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-stack-objects" }
;.
; ATTRIBUTOR_HSA: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR4]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR5]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR6]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR7]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR8]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR9]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR10]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR11]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR12]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR13]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR4]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR5]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR6]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR7]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR8]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR9]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR10]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR11]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR12]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR13]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
;.
; AKF_HSA: [[META0:![0-9]+]] = !{i32 1, !"amdhsa_code_object_version", i32 500}
;.
diff --git a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll
index 6c5e58c..547ff69 100644
--- a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll
+++ b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll
@@ -393,17 +393,18 @@ define amdgpu_kernel void @use_get_local_size_z(ptr addrspace(1) %ptr) #1 {
attributes #0 = { nounwind readnone }
attributes #1 = { nounwind }
+;.
; AKF_CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
; AKF_CHECK: attributes #[[ATTR1]] = { nounwind }
;.
; ATTRIBUTOR_CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR1]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR2]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR3]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR4]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR5]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR6]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR7]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR8]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR9]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR1]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR2]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR3]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR4]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR5]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR6]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR7]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR8]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR9]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll
index 1ebd864..2970495 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll
@@ -477,7 +477,6 @@ define amdgpu_ps void @add_i32_varying(ptr addrspace(8) inreg %out, ptr addrspac
; GFX1032-NEXT: s_cbranch_execz .LBB1_3
; GFX1032-NEXT: ; %bb.2:
; GFX1032-NEXT: v_mov_b32_e32 v0, s11
-; GFX1032-NEXT: s_mov_b32 s10, s11
; GFX1032-NEXT: buffer_atomic_add v0, off, s[4:7], 0 glc
; GFX1032-NEXT: .LBB1_3:
; GFX1032-NEXT: s_waitcnt_depctr 0xffe3
@@ -615,7 +614,6 @@ define amdgpu_ps void @add_i32_varying(ptr addrspace(8) inreg %out, ptr addrspac
; GFX1132-NEXT: s_cbranch_execz .LBB1_3
; GFX1132-NEXT: ; %bb.2:
; GFX1132-NEXT: v_mov_b32_e32 v0, s11
-; GFX1132-NEXT: s_mov_b32 s10, s11
; GFX1132-NEXT: buffer_atomic_add_u32 v0, off, s[4:7], 0 glc
; GFX1132-NEXT: .LBB1_3:
; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s9
diff --git a/llvm/test/CodeGen/AMDGPU/av_spill_cross_bb_usage.mir b/llvm/test/CodeGen/AMDGPU/av_spill_cross_bb_usage.mir
index c1da29e..3228962 100644
--- a/llvm/test/CodeGen/AMDGPU/av_spill_cross_bb_usage.mir
+++ b/llvm/test/CodeGen/AMDGPU/av_spill_cross_bb_usage.mir
@@ -14,6 +14,8 @@
---
name: test_av_spill_cross_bb_usage
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
stack:
- { id: 0, name: '', type: spill-slot, offset: 0, size: 4, alignment: 4 }
machineFunctionInfo:
diff --git a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
new file mode 100644
index 0000000..7108f3d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
@@ -0,0 +1,357 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -march=amdgcn -mcpu=gfx940 < %s | FileCheck --check-prefixes=GCN %s
+
+; TODO: Add global-isel when it can support bf16
+
+define amdgpu_ps float @v_test_cvt_bf16_f32_v(bfloat %v) {
+; GCN-LABEL: v_test_cvt_bf16_f32_v:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = fpext bfloat %v to float
+ ret float %cvt
+}
+
+define amdgpu_ps float @v_test_cvt_bf16_f32_s(bfloat inreg %v) {
+; GCN-LABEL: v_test_cvt_bf16_f32_s:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_lshl_b32 s0, s0, 16
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = fpext bfloat %v to float
+ ret float %cvt
+}
+
+define amdgpu_ps float @v_test_cvt_v2f32_v2bf16_v(<2 x float> %src) {
+; GCN-LABEL: v_test_cvt_v2f32_v2bf16_v:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v2, v2, v0, s0
+; GCN-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GCN-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GCN-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GCN-NEXT: v_add3_u32 v2, v2, v1, s0
+; GCN-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GCN-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GCN-NEXT: s_mov_b32 s0, 0x7060302
+; GCN-NEXT: s_nop 0
+; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
+; GCN-NEXT: v_perm_b32 v0, v1, v0, s0
+; GCN-NEXT: ; return to shader part epilog
+ %res = fptrunc <2 x float> %src to <2 x bfloat>
+ %cast = bitcast <2 x bfloat> %res to float
+ ret float %cast
+}
+
+define amdgpu_ps float @v_test_cvt_v2f32_v2bf16_s(<2 x float> inreg %src) {
+; GCN-LABEL: v_test_cvt_v2f32_v2bf16_s:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_bfe_u32 s2, s1, 0x10010
+; GCN-NEXT: s_add_i32 s2, s2, s1
+; GCN-NEXT: s_or_b32 s4, s1, 0x400000
+; GCN-NEXT: s_add_i32 s5, s2, 0x7fff
+; GCN-NEXT: v_cmp_u_f32_e64 s[2:3], s1, s1
+; GCN-NEXT: s_and_b64 s[2:3], s[2:3], exec
+; GCN-NEXT: s_cselect_b32 s2, s4, s5
+; GCN-NEXT: s_bfe_u32 s1, s0, 0x10010
+; GCN-NEXT: s_add_i32 s1, s1, s0
+; GCN-NEXT: s_or_b32 s3, s0, 0x400000
+; GCN-NEXT: s_add_i32 s4, s1, 0x7fff
+; GCN-NEXT: v_cmp_u_f32_e64 s[0:1], s0, s0
+; GCN-NEXT: s_and_b64 s[0:1], s[0:1], exec
+; GCN-NEXT: s_cselect_b32 s0, s3, s4
+; GCN-NEXT: s_pack_hh_b32_b16 s0, s0, s2
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: ; return to shader part epilog
+ %res = fptrunc <2 x float> %src to <2 x bfloat>
+ %cast = bitcast <2 x bfloat> %res to float
+ ret float %cast
+}
+
+define amdgpu_ps float @v_test_cvt_f32_bf16_v(float %src) {
+; GCN-LABEL: v_test_cvt_f32_bf16_v:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v1, v1, v0, s0
+; GCN-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GCN-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
+; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT: ; return to shader part epilog
+ %trunc = fptrunc float %src to bfloat
+ %ext = fpext bfloat %trunc to float
+ ret float %ext
+}
+
+define amdgpu_ps float @v_test_cvt_v2f64_v2bf16_v(<2 x double> %src) {
+; GCN-LABEL: v_test_cvt_v2f64_v2bf16_v:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
+; GCN-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
+; GCN-NEXT: v_and_b32_e32 v7, 1, v6
+; GCN-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
+; GCN-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
+; GCN-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
+; GCN-NEXT: v_add_u32_e32 v4, v6, v4
+; GCN-NEXT: s_or_b64 vcc, s[0:1], vcc
+; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GCN-NEXT: s_brev_b32 s4, 1
+; GCN-NEXT: v_and_or_b32 v5, v1, s4, v4
+; GCN-NEXT: v_bfe_u32 v4, v4, 16, 1
+; GCN-NEXT: s_movk_i32 s5, 0x7fff
+; GCN-NEXT: v_add3_u32 v4, v4, v5, s5
+; GCN-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GCN-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
+; GCN-NEXT: v_cvt_f32_f64_e64 v5, |v[2:3]|
+; GCN-NEXT: v_cvt_f64_f32_e32 v[0:1], v5
+; GCN-NEXT: v_and_b32_e32 v6, 1, v5
+; GCN-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[2:3]|, v[0:1]
+; GCN-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[2:3]|, v[0:1]
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v6
+; GCN-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[2:3]
+; GCN-NEXT: v_add_u32_e32 v0, v5, v0
+; GCN-NEXT: s_or_b64 vcc, s[0:1], vcc
+; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc
+; GCN-NEXT: v_and_or_b32 v1, v3, s4, v0
+; GCN-NEXT: v_bfe_u32 v0, v0, 16, 1
+; GCN-NEXT: v_add3_u32 v0, v0, v1, s5
+; GCN-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; GCN-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[2:3]
+; GCN-NEXT: s_mov_b32 s0, 0x7060302
+; GCN-NEXT: s_nop 0
+; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-NEXT: v_perm_b32 v0, v0, v4, s0
+; GCN-NEXT: ; return to shader part epilog
+ %res = fptrunc <2 x double> %src to <2 x bfloat>
+ %cast = bitcast <2 x bfloat> %res to float
+ ret float %cast
+}
+
+define amdgpu_ps float @fptrunc_f32_f32_to_v2bf16(float %a, float %b) {
+; GCN-LABEL: fptrunc_f32_f32_to_v2bf16:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v2, v2, v0, s0
+; GCN-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GCN-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GCN-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GCN-NEXT: v_add3_u32 v2, v2, v1, s0
+; GCN-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GCN-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GCN-NEXT: s_mov_b32 s0, 0x7060302
+; GCN-NEXT: s_nop 0
+; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
+; GCN-NEXT: v_perm_b32 v0, v1, v0, s0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %a.cvt = fptrunc float %a to bfloat
+ %b.cvt = fptrunc float %b to bfloat
+ %v2.1 = insertelement <2 x bfloat> undef, bfloat %a.cvt, i32 0
+ %v2.2 = insertelement <2 x bfloat> %v2.1, bfloat %b.cvt, i32 1
+ %ret = bitcast <2 x bfloat> %v2.2 to float
+ ret float %ret
+}
+
+define amdgpu_ps float @fptrunc_f32_f32_to_v2bf16_mods(float %a, float %b) {
+; GCN-LABEL: fptrunc_f32_f32_to_v2bf16_mods:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; GCN-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v3, v3, v2, s0
+; GCN-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GCN-NEXT: v_cmp_u_f32_e64 vcc, -v0, -v0
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GCN-NEXT: v_and_b32_e32 v2, 0x7fffffff, v1
+; GCN-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GCN-NEXT: v_add3_u32 v3, v3, v2, s0
+; GCN-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GCN-NEXT: v_cmp_u_f32_e64 vcc, |v1|, |v1|
+; GCN-NEXT: s_mov_b32 s0, 0x7060302
+; GCN-NEXT: s_nop 0
+; GCN-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GCN-NEXT: v_perm_b32 v0, v1, v0, s0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %a.neg = fneg float %a
+ %a.cvt = fptrunc float %a.neg to bfloat
+ %b.abs = call float @llvm.fabs.f32(float %b)
+ %b.cvt = fptrunc float %b.abs to bfloat
+ %v2.1 = insertelement <2 x bfloat> undef, bfloat %a.cvt, i32 0
+ %v2.2 = insertelement <2 x bfloat> %v2.1, bfloat %b.cvt, i32 1
+ %ret = bitcast <2 x bfloat> %v2.2 to float
+ ret float %ret
+}
+
+define amdgpu_ps void @fptrunc_f32_to_bf16(float %a, ptr %out) {
+; GCN-LABEL: fptrunc_f32_to_bf16:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_mov_b32_e32 v3, v2
+; GCN-NEXT: v_mov_b32_e32 v2, v1
+; GCN-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v1, v1, v0, s0
+; GCN-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GCN-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc
+; GCN-NEXT: flat_store_short_d16_hi v[2:3], v0 sc0 sc1
+; GCN-NEXT: s_endpgm
+entry:
+ %a.cvt = fptrunc float %a to bfloat
+ store bfloat %a.cvt, ptr %out
+ ret void
+}
+
+define amdgpu_ps void @fptrunc_f32_to_bf16_abs(float %a, ptr %out) {
+; GCN-LABEL: fptrunc_f32_to_bf16_abs:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_mov_b32_e32 v3, v2
+; GCN-NEXT: v_mov_b32_e32 v2, v1
+; GCN-NEXT: v_and_b32_e32 v1, 0x7fffffff, v0
+; GCN-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v4, v4, v1, s0
+; GCN-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; GCN-NEXT: v_cmp_u_f32_e64 vcc, |v0|, |v0|
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GCN-NEXT: flat_store_short_d16_hi v[2:3], v0 sc0 sc1
+; GCN-NEXT: s_endpgm
+entry:
+ %a.abs = call float @llvm.fabs.f32(float %a)
+ %a.cvt = fptrunc float %a.abs to bfloat
+ store bfloat %a.cvt, ptr %out
+ ret void
+}
+
+define amdgpu_ps void @fptrunc_f32_to_bf16_neg(float %a, ptr %out) {
+; GCN-LABEL: fptrunc_f32_to_bf16_neg:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_mov_b32_e32 v3, v2
+; GCN-NEXT: v_mov_b32_e32 v2, v1
+; GCN-NEXT: v_xor_b32_e32 v1, 0x80000000, v0
+; GCN-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v4, v4, v1, s0
+; GCN-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; GCN-NEXT: v_cmp_u_f32_e64 vcc, -v0, -v0
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GCN-NEXT: flat_store_short_d16_hi v[2:3], v0 sc0 sc1
+; GCN-NEXT: s_endpgm
+entry:
+ %a.neg = fneg float %a
+ %a.cvt = fptrunc float %a.neg to bfloat
+ store bfloat %a.cvt, ptr %out
+ ret void
+}
+
+define amdgpu_ps void @fptrunc_f64_to_bf16(double %a, ptr %out) {
+; GCN-LABEL: fptrunc_f64_to_bf16:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
+; GCN-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
+; GCN-NEXT: v_and_b32_e32 v7, 1, v6
+; GCN-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
+; GCN-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
+; GCN-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
+; GCN-NEXT: v_add_u32_e32 v4, v6, v4
+; GCN-NEXT: s_or_b64 vcc, s[0:1], vcc
+; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GCN-NEXT: s_brev_b32 s0, 1
+; GCN-NEXT: v_and_or_b32 v5, v1, s0, v4
+; GCN-NEXT: v_bfe_u32 v4, v4, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v4, v4, v5, s0
+; GCN-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GCN-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GCN-NEXT: flat_store_short_d16_hi v[2:3], v0 sc0 sc1
+; GCN-NEXT: s_endpgm
+entry:
+ %a.cvt = fptrunc double %a to bfloat
+ store bfloat %a.cvt, ptr %out
+ ret void
+}
+
+define amdgpu_ps void @fptrunc_f64_to_bf16_neg(double %a, ptr %out) {
+; GCN-LABEL: fptrunc_f64_to_bf16_neg:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_cvt_f32_f64_e64 v7, |v[0:1]|
+; GCN-NEXT: v_cvt_f64_f32_e32 v[4:5], v7
+; GCN-NEXT: v_and_b32_e32 v8, 1, v7
+; GCN-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
+; GCN-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8
+; GCN-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
+; GCN-NEXT: v_add_u32_e32 v4, v7, v4
+; GCN-NEXT: s_or_b64 vcc, s[0:1], vcc
+; GCN-NEXT: s_brev_b32 s4, 1
+; GCN-NEXT: v_xor_b32_e32 v6, 0x80000000, v1
+; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
+; GCN-NEXT: v_and_or_b32 v5, v6, s4, v4
+; GCN-NEXT: v_bfe_u32 v4, v4, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v4, v4, v5, s0
+; GCN-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GCN-NEXT: v_cmp_u_f64_e64 vcc, -v[0:1], -v[0:1]
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GCN-NEXT: flat_store_short_d16_hi v[2:3], v0 sc0 sc1
+; GCN-NEXT: s_endpgm
+entry:
+ %a.neg = fneg double %a
+ %a.cvt = fptrunc double %a.neg to bfloat
+ store bfloat %a.cvt, ptr %out
+ ret void
+}
+
+define amdgpu_ps void @fptrunc_f64_to_bf16_abs(double %a, ptr %out) {
+; GCN-LABEL: fptrunc_f64_to_bf16_abs:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_cvt_f32_f64_e64 v7, |v[0:1]|
+; GCN-NEXT: v_cvt_f64_f32_e32 v[4:5], v7
+; GCN-NEXT: v_and_b32_e32 v8, 1, v7
+; GCN-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
+; GCN-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8
+; GCN-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
+; GCN-NEXT: v_add_u32_e32 v4, v7, v4
+; GCN-NEXT: s_or_b64 vcc, s[0:1], vcc
+; GCN-NEXT: v_and_b32_e32 v6, 0x7fffffff, v1
+; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
+; GCN-NEXT: s_brev_b32 s0, 1
+; GCN-NEXT: v_and_or_b32 v5, v6, s0, v4
+; GCN-NEXT: v_bfe_u32 v4, v4, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v4, v4, v5, s0
+; GCN-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GCN-NEXT: v_cmp_u_f64_e64 vcc, |v[0:1]|, |v[0:1]|
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GCN-NEXT: flat_store_short_d16_hi v[2:3], v0 sc0 sc1
+; GCN-NEXT: s_endpgm
+entry:
+ %a.abs = call double @llvm.fabs.f64(double %a)
+ %a.cvt = fptrunc double %a.abs to bfloat
+ store bfloat %a.cvt, ptr %out
+ ret void
+}
+
+declare float @llvm.fabs.f32(float)
+declare double @llvm.fabs.f64(double)
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index ebb77c1..9865883 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -16968,7 +16968,7 @@ define bfloat @v_fabs_bf16(bfloat %a) {
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e64 v0, 1.0, |v0|
+; GCN-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
;
@@ -16977,7 +16977,7 @@ define bfloat @v_fabs_bf16(bfloat %a) {
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e64 v0, 1.0, |v0|
+; GFX7-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
@@ -17163,9 +17163,9 @@ define bfloat @v_fneg_fabs_bf16(bfloat %a) {
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e64 v0, 1.0, |v0|
+; GCN-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v0, -1.0, v0
+; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
;
@@ -17174,9 +17174,9 @@ define bfloat @v_fneg_fabs_bf16(bfloat %a) {
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e64 v0, 1.0, |v0|
+; GFX7-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v0, -1.0, v0
+; GFX7-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
@@ -17280,8 +17280,6 @@ define bfloat @v_minnum_bf16(bfloat %a, bfloat %b) {
; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_min_f32_e32 v0, v0, v1
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -17293,8 +17291,6 @@ define bfloat @v_minnum_bf16(bfloat %a, bfloat %b) {
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
@@ -17375,10 +17371,6 @@ define <2 x bfloat> @v_minnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_min_f32_e32 v1, v1, v3
; GCN-NEXT: v_min_f32_e32 v0, v0, v2
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
@@ -17396,10 +17388,6 @@ define <2 x bfloat> @v_minnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_min_f32_e32 v1, v1, v3
; GFX7-NEXT: v_min_f32_e32 v0, v0, v2
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
@@ -17522,12 +17510,6 @@ define <3 x bfloat> @v_minnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_min_f32_e32 v2, v2, v5
; GCN-NEXT: v_min_f32_e32 v1, v1, v4
; GCN-NEXT: v_min_f32_e32 v0, v0, v3
@@ -17551,12 +17533,6 @@ define <3 x bfloat> @v_minnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_min_f32_e32 v2, v2, v5
; GFX7-NEXT: v_min_f32_e32 v1, v1, v4
; GFX7-NEXT: v_min_f32_e32 v0, v0, v3
@@ -17688,14 +17664,6 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_min_f32_e32 v3, v3, v7
; GCN-NEXT: v_min_f32_e32 v2, v2, v6
; GCN-NEXT: v_min_f32_e32 v1, v1, v5
@@ -17725,14 +17693,6 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_min_f32_e32 v3, v3, v7
; GFX7-NEXT: v_min_f32_e32 v2, v2, v6
; GFX7-NEXT: v_min_f32_e32 v1, v1, v5
@@ -17951,22 +17911,6 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
-; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14
-; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_min_f32_e32 v7, v7, v15
; GCN-NEXT: v_min_f32_e32 v6, v6, v14
; GCN-NEXT: v_min_f32_e32 v5, v5, v13
@@ -18020,22 +17964,6 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
-; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_min_f32_e32 v7, v7, v15
; GFX7-NEXT: v_min_f32_e32 v6, v6, v14
; GFX7-NEXT: v_min_f32_e32 v5, v5, v13
@@ -18382,71 +18310,51 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30
-; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GCN-NEXT: v_min_f32_e32 v14, v14, v30
; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29
-; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GCN-NEXT: v_min_f32_e32 v13, v13, v29
; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28
-; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GCN-NEXT: v_min_f32_e32 v12, v12, v28
; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27
-; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GCN-NEXT: v_min_f32_e32 v11, v11, v27
; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26
-; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GCN-NEXT: v_min_f32_e32 v10, v10, v26
; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GCN-NEXT: v_min_f32_e32 v9, v9, v25
; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24
-; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GCN-NEXT: v_min_f32_e32 v8, v8, v24
; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
-; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GCN-NEXT: v_min_f32_e32 v7, v7, v23
; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
-; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GCN-NEXT: v_min_f32_e32 v6, v6, v22
; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GCN-NEXT: v_min_f32_e32 v5, v5, v21
; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
@@ -18461,8 +18369,6 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GCN-NEXT: v_min_f32_e32 v4, v4, v20
; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s32
; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
@@ -18474,21 +18380,10 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
-; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_min_f32_e32 v3, v3, v19
; GCN-NEXT: v_min_f32_e32 v2, v2, v18
; GCN-NEXT: v_min_f32_e32 v1, v1, v17
; GCN-NEXT: v_min_f32_e32 v0, v0, v16
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v20
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
@@ -18503,8 +18398,9 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v20
; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GCN-NEXT: v_min_f32_e32 v15, v15, v16
; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
@@ -18513,14 +18409,12 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-LABEL: v_minnum_v16bf16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
-; GFX7-NEXT: v_min_f32_e32 v9, v9, v25
-; GFX7-NEXT: buffer_load_dword v25, off, s[0:3], s32
+; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
+; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
+; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX7-NEXT: v_min_f32_e32 v6, v6, v22
+; GFX7-NEXT: buffer_load_dword v22, off, s[0:3], s32
; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
@@ -18531,13 +18425,13 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
-; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
+; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
+; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
+; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
@@ -18560,13 +18454,13 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
@@ -18579,48 +18473,14 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
-; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
-; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29
-; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
-; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28
-; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
-; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27
-; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
-; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
-; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
-; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
-; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
-; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
-; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
-; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_min_f32_e32 v14, v14, v30
; GFX7-NEXT: v_min_f32_e32 v13, v13, v29
; GFX7-NEXT: v_min_f32_e32 v12, v12, v28
; GFX7-NEXT: v_min_f32_e32 v11, v11, v27
; GFX7-NEXT: v_min_f32_e32 v10, v10, v26
-; GFX7-NEXT: v_min_f32_e32 v15, v15, v25
+; GFX7-NEXT: v_min_f32_e32 v9, v9, v25
; GFX7-NEXT: v_min_f32_e32 v8, v8, v24
; GFX7-NEXT: v_min_f32_e32 v7, v7, v23
-; GFX7-NEXT: v_min_f32_e32 v6, v6, v22
; GFX7-NEXT: v_min_f32_e32 v5, v5, v21
; GFX7-NEXT: v_min_f32_e32 v4, v4, v20
; GFX7-NEXT: v_min_f32_e32 v3, v3, v19
@@ -18634,6 +18494,10 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
+; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX7-NEXT: v_min_f32_e32 v15, v15, v22
; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
@@ -19267,287 +19131,223 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
-; GCN-NEXT: v_mul_f32_e32 v31, 1.0, v31
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:124
; GCN-NEXT: v_min_f32_e32 v31, v31, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124
; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:120
; GCN-NEXT: v_min_f32_e32 v30, v30, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:120
; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:116
; GCN-NEXT: v_min_f32_e32 v29, v29, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:116
; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:112
; GCN-NEXT: v_min_f32_e32 v28, v28, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:112
; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:108
; GCN-NEXT: v_min_f32_e32 v27, v27, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108
; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:104
; GCN-NEXT: v_min_f32_e32 v26, v26, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:104
; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:100
; GCN-NEXT: v_min_f32_e32 v25, v25, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:100
; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:96
; GCN-NEXT: v_min_f32_e32 v24, v24, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96
; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:92
; GCN-NEXT: v_min_f32_e32 v23, v23, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:92
; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:88
; GCN-NEXT: v_min_f32_e32 v22, v22, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:88
; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:84
; GCN-NEXT: v_min_f32_e32 v21, v21, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:84
; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20
; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:80
; GCN-NEXT: v_min_f32_e32 v20, v20, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:80
; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19
; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:76
; GCN-NEXT: v_min_f32_e32 v19, v19, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76
; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18
; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:72
; GCN-NEXT: v_min_f32_e32 v18, v18, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72
; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17
; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:68
; GCN-NEXT: v_min_f32_e32 v17, v17, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68
; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:64
; GCN-NEXT: v_min_f32_e32 v16, v16, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64
; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:60
; GCN-NEXT: v_min_f32_e32 v15, v15, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60
; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:56
; GCN-NEXT: v_min_f32_e32 v14, v14, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56
; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:52
; GCN-NEXT: v_min_f32_e32 v13, v13, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:52
; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48
; GCN-NEXT: v_min_f32_e32 v12, v12, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:48
; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:44
; GCN-NEXT: v_min_f32_e32 v11, v11, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44
; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:40
; GCN-NEXT: v_min_f32_e32 v10, v10, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40
; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:36
; GCN-NEXT: v_min_f32_e32 v9, v9, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:36
; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32
; GCN-NEXT: v_min_f32_e32 v8, v8, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:32
; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:28
; GCN-NEXT: v_min_f32_e32 v7, v7, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:28
; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:24
; GCN-NEXT: v_min_f32_e32 v6, v6, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:24
; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20
; GCN-NEXT: v_min_f32_e32 v5, v5, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:20
; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16
; GCN-NEXT: v_min_f32_e32 v4, v4, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:16
; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12
; GCN-NEXT: v_min_f32_e32 v3, v3, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:12
; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
; GCN-NEXT: v_min_f32_e32 v2, v2, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8
; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:4
; GCN-NEXT: v_min_f32_e32 v1, v1, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GCN-NEXT: v_min_f32_e32 v0, v0, v32
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
@@ -19590,322 +19390,258 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128
; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20
; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20
; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19
; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19
; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18
; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18
; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17
; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17
; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: s_waitcnt vmcnt(1)
-; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
-; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
-; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31
-; GFX7-NEXT: v_min_f32_e32 v31, v31, v32
-; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124
; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX7-NEXT: v_min_f32_e32 v31, v31, v32
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124
+; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
; GFX7-NEXT: v_min_f32_e32 v30, v30, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:120
; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v29, v29, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:116
; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v28, v28, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:112
; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v27, v27, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108
; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v26, v26, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:104
; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v25, v25, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:100
; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v24, v24, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96
; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v23, v23, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:92
; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v22, v22, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:88
; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v21, v21, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:84
; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v20, v20, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:80
; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v19, v19, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76
; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v18, v18, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72
; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v17, v17, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68
; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v16, v16, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64
; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v15, v15, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60
; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v14, v14, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56
; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v13, v13, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:52
; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v12, v12, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:48
; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v11, v11, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44
; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v10, v10, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40
; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v9, v9, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:36
; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v8, v8, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:32
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v7, v7, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:28
; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v6, v6, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:24
; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v5, v5, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:20
; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v4, v4, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:16
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v3, v3, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:12
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v2, v2, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8
; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v1, v1, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v0, v0, v32
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
@@ -21097,8 +20833,6 @@ define bfloat @v_maxnum_bf16(bfloat %a, bfloat %b) {
; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_max_f32_e32 v0, v0, v1
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -21110,8 +20844,6 @@ define bfloat @v_maxnum_bf16(bfloat %a, bfloat %b) {
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
@@ -21192,10 +20924,6 @@ define <2 x bfloat> @v_maxnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_max_f32_e32 v1, v1, v3
; GCN-NEXT: v_max_f32_e32 v0, v0, v2
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
@@ -21213,10 +20941,6 @@ define <2 x bfloat> @v_maxnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_max_f32_e32 v1, v1, v3
; GFX7-NEXT: v_max_f32_e32 v0, v0, v2
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
@@ -21339,12 +21063,6 @@ define <3 x bfloat> @v_maxnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_max_f32_e32 v2, v2, v5
; GCN-NEXT: v_max_f32_e32 v1, v1, v4
; GCN-NEXT: v_max_f32_e32 v0, v0, v3
@@ -21368,12 +21086,6 @@ define <3 x bfloat> @v_maxnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_max_f32_e32 v2, v2, v5
; GFX7-NEXT: v_max_f32_e32 v1, v1, v4
; GFX7-NEXT: v_max_f32_e32 v0, v0, v3
@@ -21505,14 +21217,6 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_max_f32_e32 v3, v3, v7
; GCN-NEXT: v_max_f32_e32 v2, v2, v6
; GCN-NEXT: v_max_f32_e32 v1, v1, v5
@@ -21542,14 +21246,6 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_max_f32_e32 v3, v3, v7
; GFX7-NEXT: v_max_f32_e32 v2, v2, v6
; GFX7-NEXT: v_max_f32_e32 v1, v1, v5
@@ -21768,22 +21464,6 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
-; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14
-; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_max_f32_e32 v7, v7, v15
; GCN-NEXT: v_max_f32_e32 v6, v6, v14
; GCN-NEXT: v_max_f32_e32 v5, v5, v13
@@ -21837,22 +21517,6 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
-; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_max_f32_e32 v7, v7, v15
; GFX7-NEXT: v_max_f32_e32 v6, v6, v14
; GFX7-NEXT: v_max_f32_e32 v5, v5, v13
@@ -22199,71 +21863,51 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30
-; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GCN-NEXT: v_max_f32_e32 v14, v14, v30
; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29
-; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GCN-NEXT: v_max_f32_e32 v13, v13, v29
; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28
-; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GCN-NEXT: v_max_f32_e32 v12, v12, v28
; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27
-; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GCN-NEXT: v_max_f32_e32 v11, v11, v27
; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26
-; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GCN-NEXT: v_max_f32_e32 v10, v10, v26
; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GCN-NEXT: v_max_f32_e32 v9, v9, v25
; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24
-; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GCN-NEXT: v_max_f32_e32 v8, v8, v24
; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
-; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GCN-NEXT: v_max_f32_e32 v7, v7, v23
; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
-; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GCN-NEXT: v_max_f32_e32 v6, v6, v22
; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GCN-NEXT: v_max_f32_e32 v5, v5, v21
; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
@@ -22278,8 +21922,6 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GCN-NEXT: v_max_f32_e32 v4, v4, v20
; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s32
; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
@@ -22291,21 +21933,10 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
-; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_max_f32_e32 v3, v3, v19
; GCN-NEXT: v_max_f32_e32 v2, v2, v18
; GCN-NEXT: v_max_f32_e32 v1, v1, v17
; GCN-NEXT: v_max_f32_e32 v0, v0, v16
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v20
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
@@ -22320,8 +21951,9 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v20
; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GCN-NEXT: v_max_f32_e32 v15, v15, v16
; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
@@ -22330,14 +21962,12 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-LABEL: v_maxnum_v16bf16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
-; GFX7-NEXT: v_max_f32_e32 v9, v9, v25
-; GFX7-NEXT: buffer_load_dword v25, off, s[0:3], s32
+; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
+; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
+; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX7-NEXT: v_max_f32_e32 v6, v6, v22
+; GFX7-NEXT: buffer_load_dword v22, off, s[0:3], s32
; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
@@ -22348,13 +21978,13 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
-; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
+; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
+; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
+; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
@@ -22377,13 +22007,13 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
@@ -22396,48 +22026,14 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
-; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
-; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29
-; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
-; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28
-; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
-; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27
-; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
-; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
-; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
-; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
-; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
-; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
-; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
-; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_max_f32_e32 v14, v14, v30
; GFX7-NEXT: v_max_f32_e32 v13, v13, v29
; GFX7-NEXT: v_max_f32_e32 v12, v12, v28
; GFX7-NEXT: v_max_f32_e32 v11, v11, v27
; GFX7-NEXT: v_max_f32_e32 v10, v10, v26
-; GFX7-NEXT: v_max_f32_e32 v15, v15, v25
+; GFX7-NEXT: v_max_f32_e32 v9, v9, v25
; GFX7-NEXT: v_max_f32_e32 v8, v8, v24
; GFX7-NEXT: v_max_f32_e32 v7, v7, v23
-; GFX7-NEXT: v_max_f32_e32 v6, v6, v22
; GFX7-NEXT: v_max_f32_e32 v5, v5, v21
; GFX7-NEXT: v_max_f32_e32 v4, v4, v20
; GFX7-NEXT: v_max_f32_e32 v3, v3, v19
@@ -22451,6 +22047,10 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
+; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX7-NEXT: v_max_f32_e32 v15, v15, v22
; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
@@ -23084,287 +22684,223 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
-; GCN-NEXT: v_mul_f32_e32 v31, 1.0, v31
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:124
; GCN-NEXT: v_max_f32_e32 v31, v31, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124
; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:120
; GCN-NEXT: v_max_f32_e32 v30, v30, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:120
; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:116
; GCN-NEXT: v_max_f32_e32 v29, v29, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:116
; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:112
; GCN-NEXT: v_max_f32_e32 v28, v28, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:112
; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:108
; GCN-NEXT: v_max_f32_e32 v27, v27, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108
; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:104
; GCN-NEXT: v_max_f32_e32 v26, v26, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:104
; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:100
; GCN-NEXT: v_max_f32_e32 v25, v25, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:100
; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:96
; GCN-NEXT: v_max_f32_e32 v24, v24, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96
; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:92
; GCN-NEXT: v_max_f32_e32 v23, v23, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:92
; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:88
; GCN-NEXT: v_max_f32_e32 v22, v22, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:88
; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:84
; GCN-NEXT: v_max_f32_e32 v21, v21, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:84
; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20
; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:80
; GCN-NEXT: v_max_f32_e32 v20, v20, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:80
; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19
; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:76
; GCN-NEXT: v_max_f32_e32 v19, v19, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76
; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18
; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:72
; GCN-NEXT: v_max_f32_e32 v18, v18, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72
; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17
; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:68
; GCN-NEXT: v_max_f32_e32 v17, v17, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68
; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:64
; GCN-NEXT: v_max_f32_e32 v16, v16, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64
; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:60
; GCN-NEXT: v_max_f32_e32 v15, v15, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60
; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:56
; GCN-NEXT: v_max_f32_e32 v14, v14, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56
; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:52
; GCN-NEXT: v_max_f32_e32 v13, v13, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:52
; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48
; GCN-NEXT: v_max_f32_e32 v12, v12, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:48
; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:44
; GCN-NEXT: v_max_f32_e32 v11, v11, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44
; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:40
; GCN-NEXT: v_max_f32_e32 v10, v10, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40
; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:36
; GCN-NEXT: v_max_f32_e32 v9, v9, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:36
; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32
; GCN-NEXT: v_max_f32_e32 v8, v8, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:32
; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:28
; GCN-NEXT: v_max_f32_e32 v7, v7, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:28
; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:24
; GCN-NEXT: v_max_f32_e32 v6, v6, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:24
; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20
; GCN-NEXT: v_max_f32_e32 v5, v5, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:20
; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16
; GCN-NEXT: v_max_f32_e32 v4, v4, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:16
; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12
; GCN-NEXT: v_max_f32_e32 v3, v3, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:12
; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
; GCN-NEXT: v_max_f32_e32 v2, v2, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8
; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:4
; GCN-NEXT: v_max_f32_e32 v1, v1, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GCN-NEXT: v_max_f32_e32 v0, v0, v32
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
@@ -23407,322 +22943,258 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128
; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20
; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20
; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19
; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19
; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18
; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18
; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17
; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17
; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: s_waitcnt vmcnt(1)
-; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
-; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
-; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31
-; GFX7-NEXT: v_max_f32_e32 v31, v31, v32
-; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124
; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX7-NEXT: v_max_f32_e32 v31, v31, v32
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124
+; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
; GFX7-NEXT: v_max_f32_e32 v30, v30, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:120
; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v29, v29, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:116
; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v28, v28, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:112
; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v27, v27, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108
; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v26, v26, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:104
; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v25, v25, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:100
; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v24, v24, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96
; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v23, v23, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:92
; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v22, v22, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:88
; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v21, v21, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:84
; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v20, v20, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:80
; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v19, v19, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76
; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v18, v18, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72
; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v17, v17, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68
; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v16, v16, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64
; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v15, v15, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60
; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v14, v14, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56
; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v13, v13, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:52
; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v12, v12, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:48
; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v11, v11, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44
; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v10, v10, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40
; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v9, v9, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:36
; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v8, v8, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:32
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v7, v7, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:28
; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v6, v6, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:24
; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v5, v5, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:20
; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v4, v4, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:16
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v3, v3, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:12
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v2, v2, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8
; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v1, v1, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v0, v0, v32
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
@@ -25176,7 +24648,6 @@ define { bfloat, i16 } @v_frexp_bf16_i16(bfloat %a) {
; GCN-NEXT: v_frexp_exp_i32_f32_e32 v2, v0
; GCN-NEXT: v_cmp_lt_f32_e64 vcc, |v0|, s4
; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v2, vcc
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -26818,11 +26289,17 @@ define bfloat @v_canonicalize_bf16(bfloat %a) {
; GCN-LABEL: v_canonicalize_bf16:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_canonicalize_bf16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_canonicalize_bf16:
diff --git a/llvm/test/CodeGen/AMDGPU/clamp.ll b/llvm/test/CodeGen/AMDGPU/clamp.ll
index dfadd8d..9472845 100644
--- a/llvm/test/CodeGen/AMDGPU/clamp.ll
+++ b/llvm/test/CodeGen/AMDGPU/clamp.ll
@@ -2996,18 +2996,16 @@ define amdgpu_kernel void @v_clamp_v2f16_undef_elt(ptr addrspace(1) %out, ptr ad
; GFX6-NEXT: v_mov_b32_e32 v4, 0x7fc00000
; GFX6-NEXT: s_mov_b64 s[2:3], s[6:7]
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v2
-; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v2
-; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX6-NEXT: v_max_f32_e32 v3, 0x7fc00000, v3
-; GFX6-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX6-NEXT: v_med3_f32 v2, v2, 0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX6-NEXT: v_min_f32_e32 v3, 1.0, v3
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_max_f32_e32 v2, 0x7fc00000, v2
+; GFX6-NEXT: v_med3_f32 v3, v3, 0, v4
; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX6-NEXT: v_min_f32_e32 v2, 1.0, v2
+; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
; GFX6-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; GFX6-NEXT: s_endpgm
;
@@ -3095,16 +3093,15 @@ define amdgpu_kernel void @v_clamp_v2f16_not_zero(ptr addrspace(1) %out, ptr add
; GFX6-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
; GFX6-NEXT: s_mov_b64 s[2:3], s[6:7]
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v2
-; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX6-NEXT: v_cvt_f32_f16_e64 v2, v2 clamp
-; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX6-NEXT: v_max_f32_e32 v3, 2.0, v3
-; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX6-NEXT: v_min_f32_e32 v3, 1.0, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX6-NEXT: v_cvt_f32_f16_e64 v3, v3 clamp
+; GFX6-NEXT: v_max_f32_e32 v2, 2.0, v2
; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX6-NEXT: v_min_f32_e32 v2, 1.0, v2
+; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
; GFX6-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; GFX6-NEXT: s_endpgm
;
@@ -3198,9 +3195,8 @@ define amdgpu_kernel void @v_clamp_v2f16_not_one(ptr addrspace(1) %out, ptr addr
; GFX6-NEXT: s_mov_b64 s[2:3], s[6:7]
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v2
; GFX6-NEXT: v_cvt_f32_f16_e64 v3, v3 clamp
-; GFX6-NEXT: v_mul_f32_e32 v2, 1.0, v2
+; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v2
; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
; GFX6-NEXT: v_med3_f32 v2, v2, 0, 0
; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
@@ -3760,19 +3756,17 @@ define amdgpu_kernel void @v_clamp_v2f16_undef_limit_elts0(ptr addrspace(1) %out
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
; GFX6-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
-; GFX6-NEXT: s_mov_b32 s2, 0x7fc00000
; GFX6-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX6-NEXT: s_mov_b64 s[2:3], s[6:7]
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v2
-; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX6-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX6-NEXT: v_med3_f32 v3, v3, s2, 1.0
+; GFX6-NEXT: v_max_f32_e32 v3, 0x7fc00000, v3
+; GFX6-NEXT: v_min_f32_e32 v3, 1.0, v3
; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
; GFX6-NEXT: v_med3_f32 v2, v2, 0, v4
; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX6-NEXT: s_mov_b64 s[2:3], s[6:7]
; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
; GFX6-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
@@ -3863,18 +3857,16 @@ define amdgpu_kernel void @v_clamp_v2f16_undef_limit_elts1(ptr addrspace(1) %out
; GFX6-NEXT: v_mov_b32_e32 v4, 0x7fc00000
; GFX6-NEXT: s_mov_b64 s[2:3], s[6:7]
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v2
-; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v2
-; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX6-NEXT: v_max_f32_e32 v3, 0x7fc00000, v3
-; GFX6-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX6-NEXT: v_med3_f32 v2, v2, 0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX6-NEXT: v_min_f32_e32 v3, 1.0, v3
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_max_f32_e32 v2, 0x7fc00000, v2
+; GFX6-NEXT: v_med3_f32 v3, v3, 0, v4
; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX6-NEXT: v_min_f32_e32 v2, 1.0, v2
+; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
; GFX6-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; GFX6-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll b/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
index 2ed6d7f..1c8725f 100644
--- a/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
+++ b/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
@@ -1,10 +1,12 @@
; RUN: llc --amdgpu-disable-structurizer -stop-after=amdgpu-isel -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck --check-prefixes=CHECK,ISEL %s
; RUN: llc --amdgpu-disable-structurizer -stop-after=dead-mi-elimination -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck --check-prefixes=CHECK,DEADMI %s
+; RUN: llc --amdgpu-disable-structurizer -global-isel -stop-after=irtranslator -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck %s --check-prefixes=CHECK,GISEL
; CHECK-LABEL: name: basic_call
-; CHECK: [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ENTRY
-; ISEL: {{.*}} SI_CALL_ISEL {{.*}}, @foo, [[TOKEN]], csr_amdgpu, {{.*}}
+; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
+; ISEL: {{.*}} SI_CALL_ISEL {{.*}}, @foo, csr_amdgpu, {{.*}}, implicit [[TOKEN]]
; DEADMI: {{.*}} SI_CALL {{.*}}, @foo, csr_amdgpu, {{.*}}, implicit [[TOKEN]]
+; GISEL: {{.*}} G_SI_CALL {{.*}}, @foo, csr_amdgpu, {{.*}}, implicit [[TOKEN]]
define i32 @basic_call(i32 %src) #0 {
%t = call token @llvm.experimental.convergence.entry()
%r = call i32 @foo(i32 %src) [ "convergencectrl"(token %t) ]
@@ -12,10 +14,11 @@ define i32 @basic_call(i32 %src) #0 {
}
; CHECK-LABEL: name: basic_intrinsic
-; CHECK: [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ANCHOR
+; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
; ISEL: CONVERGENCECTRL_GLUE [[TOKEN]]
; DEADMI-NOT: CONVERGENCECTRL_GLUE
-; CHECK: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
+; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
+; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[TOKEN]]
define i32 @basic_intrinsic(i32 %src) #0 {
%t = call token @llvm.experimental.convergence.anchor()
%r = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t) ]
@@ -30,12 +33,13 @@ define i32 @uncontrolled_call(i32 %src) #0 {
}
; CHECK-LABEL: name: basic_branch
-; CHECK: bb.0.entry:
-; CHECK: [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ANCHOR
-; CHECK: bb.1.then:
+; CHECK: bb.[[#]].entry:
+; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
+; CHECK: bb.[[#]].then:
; ISEL: CONVERGENCECTRL_GLUE [[TOKEN]]
; DEADMI-NOT: CONVERGENCECTRL_GLUE
-; CHECK: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
+; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
+; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[TOKEN]]
define i32 @basic_branch(i32 %src, i1 %cond) #0 {
entry:
%t = call token @llvm.experimental.convergence.anchor()
@@ -52,12 +56,13 @@ else:
}
; CHECK-LABEL: name: basic_loop
-; CHECK: [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ANCHOR
-; CHECK: bb.1.loop:
-; CHECK: [[LOOP:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_LOOP [[TOKEN]]
+; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
+; CHECK: bb.[[#]].loop:
+; CHECK: [[LOOP:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_LOOP [[TOKEN]]
; ISEL: CONVERGENCECTRL_GLUE [[LOOP]]
; DEADMI-NOT: CONVERGENCECTRL_GLUE
-; CHECK: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[LOOP]]
+; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[LOOP]]
+; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[LOOP]]
define i32 @basic_loop(i32 %src, i1 %cond) #0 {
%t1 = call token @llvm.experimental.convergence.anchor()
br label %loop
@@ -71,6 +76,32 @@ end:
ret i32 %r
}
+; CHECK-LABEL: name: nested
+; CHECK: [[ENTRY:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
+; CHECK: [[ANCHOR:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
+; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[ANCHOR]]
+; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[ANCHOR]]
+; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[ENTRY]]
+; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[ENTRY]]
+define i32 @nested(i32 %src) #0 {
+ %t1 = call token @llvm.experimental.convergence.entry()
+ %t2 = call token @llvm.experimental.convergence.anchor()
+ %r2 = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t2) ]
+ %r1 = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t1) ]
+ %sum = add i32 %r1, %r2
+ ret i32 %sum
+}
+
+; CHECK-LABEL: name: tail_call_void_func_void
+; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
+; CHECK: {{.*}} SI_TCRETURN {{.*}}, @external_void_func_void, 0, csr_amdgpu, {{.*}}implicit [[TOKEN]]
+define void @tail_call_void_func_void() #0 {
+ %t1 = call token @llvm.experimental.convergence.entry()
+ tail call void @external_void_func_void() [ "convergencectrl"(token %t1) ]
+ ret void
+}
+
+declare hidden void @external_void_func_void() #0
declare i32 @foo(i32 %x) #0
declare i32 @llvm.amdgcn.readfirstlane(i32) #0
diff --git a/llvm/test/CodeGen/AMDGPU/copy-vgpr-clobber-spill-vgpr.mir b/llvm/test/CodeGen/AMDGPU/copy-vgpr-clobber-spill-vgpr.mir
index 895185c..577d38e 100644
--- a/llvm/test/CodeGen/AMDGPU/copy-vgpr-clobber-spill-vgpr.mir
+++ b/llvm/test/CodeGen/AMDGPU/copy-vgpr-clobber-spill-vgpr.mir
@@ -333,7 +333,7 @@
ret void
}
- attributes #0 = { "amdgpu-waves-per-eu"="4,4" }
+ attributes #0 = { "amdgpu-waves-per-eu"="4,4" "amdgpu-no-agpr" }
...
---
diff --git a/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll b/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll
index 0c03419..386f9cd 100644
--- a/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll
@@ -35,6 +35,6 @@ define amdgpu_kernel void @test_direct_indirect_call() {
ret void
}
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; CHECK: attributes #[[ATTR1]] = { "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/div_i128.ll b/llvm/test/CodeGen/AMDGPU/div_i128.ll
index 2f3d5d9..cf99b5d 100644
--- a/llvm/test/CodeGen/AMDGPU/div_i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/div_i128.ll
@@ -1,10 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9,GFX9-SDAG %s
-; RUN: llc -O0 -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9-O0,GFX9-SDAG-O0 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -O0 -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9-O0 %s
-; FIXME: GlobalISel missing the power-of-2 cases in legalization. https://github.com/llvm/llvm-project/issues/80671
-; xUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9,GFX9 %s
-; xUN: llc -O0 -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9-O0,GFX9-O0 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9-G %s
+; RUN: llc -O0 -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9-G-O0 %s
define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-LABEL: v_sdiv_i128_vv:
@@ -1223,6 +1222,1158 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-LABEL: v_sdiv_i128_vv:
+; GFX9-G: ; %bb.0: ; %_udiv-special-cases
+; GFX9-G-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v16, 31, v3
+; GFX9-G-NEXT: v_xor_b32_e32 v0, v16, v0
+; GFX9-G-NEXT: v_xor_b32_e32 v1, v16, v1
+; GFX9-G-NEXT: v_sub_co_u32_e32 v10, vcc, v0, v16
+; GFX9-G-NEXT: v_xor_b32_e32 v2, v16, v2
+; GFX9-G-NEXT: v_subb_co_u32_e32 v11, vcc, v1, v16, vcc
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v17, 31, v7
+; GFX9-G-NEXT: v_xor_b32_e32 v3, v16, v3
+; GFX9-G-NEXT: v_subb_co_u32_e32 v12, vcc, v2, v16, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v13, vcc, v3, v16, vcc
+; GFX9-G-NEXT: v_xor_b32_e32 v0, v17, v4
+; GFX9-G-NEXT: v_xor_b32_e32 v1, v17, v5
+; GFX9-G-NEXT: v_sub_co_u32_e32 v18, vcc, v0, v17
+; GFX9-G-NEXT: v_xor_b32_e32 v2, v17, v6
+; GFX9-G-NEXT: v_subb_co_u32_e32 v19, vcc, v1, v17, vcc
+; GFX9-G-NEXT: v_xor_b32_e32 v3, v17, v7
+; GFX9-G-NEXT: v_subb_co_u32_e32 v4, vcc, v2, v17, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v5, vcc, v3, v17, vcc
+; GFX9-G-NEXT: v_or_b32_e32 v0, v18, v4
+; GFX9-G-NEXT: v_or_b32_e32 v1, v19, v5
+; GFX9-G-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GFX9-G-NEXT: v_or_b32_e32 v0, v10, v12
+; GFX9-G-NEXT: v_or_b32_e32 v1, v11, v13
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
+; GFX9-G-NEXT: v_ffbh_u32_e32 v1, v18
+; GFX9-G-NEXT: v_ffbh_u32_e32 v0, v19
+; GFX9-G-NEXT: v_add_u32_e32 v1, 32, v1
+; GFX9-G-NEXT: v_ffbh_u32_e32 v2, v4
+; GFX9-G-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX9-G-NEXT: v_ffbh_u32_e32 v1, v5
+; GFX9-G-NEXT: v_add_u32_e32 v2, 32, v2
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[4:5]
+; GFX9-G-NEXT: v_add_u32_e32 v0, 64, v0
+; GFX9-G-NEXT: v_min_u32_e32 v1, v1, v2
+; GFX9-G-NEXT: v_ffbh_u32_e32 v2, v10
+; GFX9-G-NEXT: v_cndmask_b32_e64 v0, v1, v0, s[6:7]
+; GFX9-G-NEXT: v_ffbh_u32_e32 v1, v11
+; GFX9-G-NEXT: v_add_u32_e32 v2, 32, v2
+; GFX9-G-NEXT: v_ffbh_u32_e32 v3, v12
+; GFX9-G-NEXT: v_min_u32_e32 v1, v1, v2
+; GFX9-G-NEXT: v_ffbh_u32_e32 v2, v13
+; GFX9-G-NEXT: v_add_u32_e32 v3, 32, v3
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[12:13]
+; GFX9-G-NEXT: v_add_u32_e32 v1, 64, v1
+; GFX9-G-NEXT: v_min_u32_e32 v2, v2, v3
+; GFX9-G-NEXT: v_cndmask_b32_e64 v1, v2, v1, s[6:7]
+; GFX9-G-NEXT: v_sub_co_u32_e64 v0, s[6:7], v0, v1
+; GFX9-G-NEXT: v_subb_co_u32_e64 v1, s[6:7], 0, 0, s[6:7]
+; GFX9-G-NEXT: v_mov_b32_e32 v6, 0x7f
+; GFX9-G-NEXT: v_subb_co_u32_e64 v2, s[6:7], 0, 0, s[6:7]
+; GFX9-G-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-G-NEXT: v_subb_co_u32_e64 v3, s[6:7], 0, 0, s[6:7]
+; GFX9-G-NEXT: v_cmp_gt_u64_e64 s[6:7], v[0:1], v[6:7]
+; GFX9-G-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[6:7]
+; GFX9-G-NEXT: v_cmp_lt_u64_e64 s[6:7], 0, v[2:3]
+; GFX9-G-NEXT: v_or_b32_e32 v15, v1, v3
+; GFX9-G-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[6:7]
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[2:3]
+; GFX9-G-NEXT: s_mov_b64 s[8:9], 0
+; GFX9-G-NEXT: v_cndmask_b32_e64 v6, v7, v6, s[6:7]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[4:5]
+; GFX9-G-NEXT: v_or_b32_e32 v20, v7, v6
+; GFX9-G-NEXT: v_xor_b32_e32 v6, 0x7f, v0
+; GFX9-G-NEXT: v_or_b32_e32 v14, v6, v2
+; GFX9-G-NEXT: v_and_b32_e32 v6, 1, v20
+; GFX9-G-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX9-G-NEXT: v_cndmask_b32_e64 v6, v10, 0, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v7, v11, 0, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v8, v12, 0, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v9, v13, 0, vcc
+; GFX9-G-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[14:15]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v14, 0, 1, vcc
+; GFX9-G-NEXT: v_or_b32_e32 v14, v20, v14
+; GFX9-G-NEXT: v_and_b32_e32 v14, 1, v14
+; GFX9-G-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; GFX9-G-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GFX9-G-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GFX9-G-NEXT: s_cbranch_execz .LBB0_6
+; GFX9-G-NEXT: ; %bb.1: ; %udiv-bb1
+; GFX9-G-NEXT: v_add_co_u32_e32 v20, vcc, 1, v0
+; GFX9-G-NEXT: v_addc_co_u32_e32 v21, vcc, 0, v1, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v22, vcc, 0, v2, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v23, vcc, 0, v3, vcc
+; GFX9-G-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GFX9-G-NEXT: v_sub_co_u32_e32 v8, vcc, 0x7f, v0
+; GFX9-G-NEXT: v_sub_u32_e32 v0, 64, v8
+; GFX9-G-NEXT: v_lshrrev_b64 v[0:1], v0, v[10:11]
+; GFX9-G-NEXT: v_lshlrev_b64 v[2:3], v8, v[12:13]
+; GFX9-G-NEXT: v_subrev_u32_e32 v9, 64, v8
+; GFX9-G-NEXT: v_lshlrev_b64 v[6:7], v8, v[10:11]
+; GFX9-G-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX9-G-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX9-G-NEXT: v_lshlrev_b64 v[0:1], v9, v[10:11]
+; GFX9-G-NEXT: v_cmp_gt_u32_e32 vcc, 64, v8
+; GFX9-G-NEXT: v_cndmask_b32_e32 v6, 0, v6, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v7, 0, v7, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-G-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8
+; GFX9-G-NEXT: v_cndmask_b32_e32 v8, v0, v12, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v9, v1, v13, vcc
+; GFX9-G-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GFX9-G-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-G-NEXT: v_mov_b32_e32 v2, s10
+; GFX9-G-NEXT: v_mov_b32_e32 v3, s11
+; GFX9-G-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GFX9-G-NEXT: s_xor_b64 s[12:13], exec, s[8:9]
+; GFX9-G-NEXT: s_cbranch_execz .LBB0_5
+; GFX9-G-NEXT: ; %bb.2: ; %udiv-preheader
+; GFX9-G-NEXT: v_sub_u32_e32 v2, 64, v20
+; GFX9-G-NEXT: v_lshrrev_b64 v[0:1], v20, v[10:11]
+; GFX9-G-NEXT: v_lshlrev_b64 v[2:3], v2, v[12:13]
+; GFX9-G-NEXT: v_subrev_u32_e32 v24, 64, v20
+; GFX9-G-NEXT: v_lshrrev_b64 v[14:15], v20, v[12:13]
+; GFX9-G-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX9-G-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX9-G-NEXT: v_lshrrev_b64 v[0:1], v24, v[12:13]
+; GFX9-G-NEXT: v_cmp_gt_u32_e32 vcc, 64, v20
+; GFX9-G-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v14, 0, v14, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v15, 0, v15, vcc
+; GFX9-G-NEXT: v_add_co_u32_e32 v24, vcc, -1, v18
+; GFX9-G-NEXT: s_mov_b64 s[8:9], 0
+; GFX9-G-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v20
+; GFX9-G-NEXT: v_addc_co_u32_e32 v25, vcc, -1, v19, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v12, v0, v10, s[4:5]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v13, v1, v11, s[4:5]
+; GFX9-G-NEXT: v_addc_co_u32_e32 v26, vcc, -1, v4, vcc
+; GFX9-G-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GFX9-G-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-NEXT: v_addc_co_u32_e32 v27, vcc, -1, v5, vcc
+; GFX9-G-NEXT: v_mov_b32_e32 v11, 0
+; GFX9-G-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-G-NEXT: v_mov_b32_e32 v2, s10
+; GFX9-G-NEXT: v_mov_b32_e32 v3, s11
+; GFX9-G-NEXT: .LBB0_3: ; %udiv-do-while
+; GFX9-G-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-G-NEXT: v_lshlrev_b64 v[2:3], 1, v[6:7]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v10, 31, v7
+; GFX9-G-NEXT: v_or_b32_e32 v6, v0, v2
+; GFX9-G-NEXT: v_or_b32_e32 v7, v1, v3
+; GFX9-G-NEXT: v_lshlrev_b64 v[2:3], 1, v[12:13]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v12, 31, v9
+; GFX9-G-NEXT: v_lshlrev_b64 v[0:1], 1, v[14:15]
+; GFX9-G-NEXT: v_or_b32_e32 v2, v2, v12
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v14, 31, v13
+; GFX9-G-NEXT: v_sub_co_u32_e32 v12, vcc, v24, v2
+; GFX9-G-NEXT: v_or_b32_e32 v0, v0, v14
+; GFX9-G-NEXT: v_subb_co_u32_e32 v12, vcc, v25, v3, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v12, vcc, v26, v0, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v12, vcc, v27, v1, vcc
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v28, 31, v12
+; GFX9-G-NEXT: v_and_b32_e32 v12, v28, v18
+; GFX9-G-NEXT: v_sub_co_u32_e32 v12, vcc, v2, v12
+; GFX9-G-NEXT: v_and_b32_e32 v2, v28, v19
+; GFX9-G-NEXT: v_subb_co_u32_e32 v13, vcc, v3, v2, vcc
+; GFX9-G-NEXT: v_and_b32_e32 v2, v28, v4
+; GFX9-G-NEXT: v_subb_co_u32_e32 v14, vcc, v0, v2, vcc
+; GFX9-G-NEXT: v_and_b32_e32 v0, v28, v5
+; GFX9-G-NEXT: v_subb_co_u32_e32 v15, vcc, v1, v0, vcc
+; GFX9-G-NEXT: v_add_co_u32_e32 v20, vcc, -1, v20
+; GFX9-G-NEXT: v_addc_co_u32_e32 v21, vcc, -1, v21, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v22, vcc, -1, v22, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v23, vcc, -1, v23, vcc
+; GFX9-G-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
+; GFX9-G-NEXT: v_or_b32_e32 v0, v20, v22
+; GFX9-G-NEXT: v_or_b32_e32 v1, v21, v23
+; GFX9-G-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GFX9-G-NEXT: v_or_b32_e32 v8, v8, v10
+; GFX9-G-NEXT: v_and_b32_e32 v10, 1, v28
+; GFX9-G-NEXT: v_mov_b32_e32 v0, v10
+; GFX9-G-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX9-G-NEXT: v_mov_b32_e32 v1, v11
+; GFX9-G-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX9-G-NEXT: s_cbranch_execnz .LBB0_3
+; GFX9-G-NEXT: ; %bb.4: ; %Flow
+; GFX9-G-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX9-G-NEXT: .LBB0_5: ; %Flow2
+; GFX9-G-NEXT: s_or_b64 exec, exec, s[12:13]
+; GFX9-G-NEXT: v_lshlrev_b64 v[2:3], 1, v[6:7]
+; GFX9-G-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v4, 31, v7
+; GFX9-G-NEXT: v_or_b32_e32 v8, v8, v4
+; GFX9-G-NEXT: v_or_b32_e32 v6, v0, v2
+; GFX9-G-NEXT: v_or_b32_e32 v7, v1, v3
+; GFX9-G-NEXT: .LBB0_6: ; %Flow3
+; GFX9-G-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX9-G-NEXT: v_xor_b32_e32 v3, v17, v16
+; GFX9-G-NEXT: v_xor_b32_e32 v0, v6, v3
+; GFX9-G-NEXT: v_xor_b32_e32 v1, v7, v3
+; GFX9-G-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v3
+; GFX9-G-NEXT: v_xor_b32_e32 v2, v8, v3
+; GFX9-G-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-G-NEXT: v_xor_b32_e32 v4, v9, v3
+; GFX9-G-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v3, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v3, vcc, v4, v3, vcc
+; GFX9-G-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-O0-LABEL: v_sdiv_i128_vv:
+; GFX9-G-O0: ; %bb.0: ; %_udiv-special-cases
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: ; implicit-def: $vgpr8 : SGPR spill to VGPR lane
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v0
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9_vgpr10_vgpr11 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v3
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v7
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14_vgpr15_vgpr16 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v1
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-G-O0-NEXT: s_mov_b64 s[12:13], 0x7f
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1_vgpr2 killed $vgpr4_vgpr5 killed $exec
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v7
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr1 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v2
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s6
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v12, v3, v8
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr1 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr1_vgpr2 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v10, v1, v2
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1_vgpr2 killed $vgpr13_vgpr14 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v16
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr1 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v2
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s6
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v11, v3, v8
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr1 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr1_vgpr2 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v9, v1, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v1, v12, v1
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v4, v12, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v6
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v3, v10, v3
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v2, v10, v2
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v1, s[6:7], v1, v12
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v7, s[6:7], v4, v12, s[6:7]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v6, s[6:7], v3, v10, s[6:7]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v5, s[6:7], v2, v10, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2_vgpr3_vgpr4 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v5
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v16
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v5, v11, v5
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v8, v11, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v14
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v7, v9, v7
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v6, v9, v6
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v5, s[6:7], v5, v11
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v15, s[6:7], v8, v11, s[6:7]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v14, s[6:7], v7, v9, s[6:7]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v13, s[6:7], v6, v9, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6_vgpr7_vgpr8 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v13
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v13, v11, v12
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v11, v11, v12
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v11, v9, v10
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v9, v9, v10
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v14
+; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v9, v12
+; GFX9-G-O0-NEXT: v_or_b32_e64 v11, v10, v11
+; GFX9-G-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[6:7], v[9:10], v[11:12]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v14
+; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v9, v12
+; GFX9-G-O0-NEXT: v_or_b32_e64 v11, v10, v11
+; GFX9-G-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], v[11:12]
+; GFX9-G-O0-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[8:9], v[5:6]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v11
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v5, v5
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v6, v6, v7
+; GFX9-G-O0-NEXT: v_min_u32_e64 v5, v5, v6
+; GFX9-G-O0-NEXT: s_mov_b32 s10, 64
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s10
+; GFX9-G-O0-NEXT: v_add_u32_e64 v6, v5, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v5, v5
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v7, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v7, v7, v8
+; GFX9-G-O0-NEXT: v_min_u32_e64 v5, v5, v7
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b32 s16, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], v[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v12
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v7, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v7, v7, v8
+; GFX9-G-O0-NEXT: v_min_u32_e64 v6, v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s10
+; GFX9-G-O0-NEXT: v_add_u32_e64 v7, v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v8, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v8, v8, v9
+; GFX9-G-O0-NEXT: v_min_u32_e64 v6, v6, v8
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b32 s15, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s11, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s14, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s10, 0
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v6, s[8:9], v5, v6
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s16
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s16
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v7, s[8:9], v5, v7, s[8:9]
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s14
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v9, s[8:9], v5, v8, s[8:9]
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s10
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v8, s[8:9], v5, v8, s[8:9]
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s4
+; GFX9-G-O0-NEXT: v_cmp_gt_u64_e64 s[10:11], v[12:13], v[14:15]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[12:13], v[14:15]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, s13
+; GFX9-G-O0-NEXT: v_cmp_gt_u64_e64 s[12:13], v[10:11], v[12:13]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v10, v5, v10, s[12:13]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[10:11]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v10, v5, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[6:7]
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v5, v10
+; GFX9-G-O0-NEXT: s_mov_b32 s7, 0x7f
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v6, v6, s7
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v7, v7, s6
+; GFX9-G-O0-NEXT: v_or_b32_e64 v6, v6, v9
+; GFX9-G-O0-NEXT: v_or_b32_e64 v8, v7, v8
+; GFX9-G-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[6:7], v[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v3
+; GFX9-G-O0-NEXT: v_and_b32_e32 v1, 1, v5
+; GFX9-G-O0-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v7
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v1, v1, v4, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v2, v3, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-G-O0-NEXT: v_and_b32_e32 v3, 1, v5
+; GFX9-G-O0-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v3
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1_vgpr2 killed $vgpr1_vgpr2 def $vgpr1_vgpr2_vgpr3_vgpr4 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v5, v6
+; GFX9-G-O0-NEXT: v_and_b32_e32 v5, 1, v5
+; GFX9-G-O0-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v5
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], -1
+; GFX9-G-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], exec
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s4, 0
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s5, 1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: s_cbranch_execz .LBB0_3
+; GFX9-G-O0-NEXT: s_branch .LBB0_8
+; GFX9-G-O0-NEXT: .LBB0_1: ; %Flow
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s4, v0, 2
+; GFX9-G-O0-NEXT: v_readlane_b32 s5, v0, 3
+; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: ; %bb.2: ; %Flow
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(4)
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB0_5
+; GFX9-G-O0-NEXT: .LBB0_3: ; %Flow2
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s4, v4, 0
+; GFX9-G-O0-NEXT: v_readlane_b32 s5, v4, 1
+; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB0_9
+; GFX9-G-O0-NEXT: .LBB0_4: ; %udiv-loop-exit
+; GFX9-G-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v7
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[10:11], v0, v[2:3]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[0:1], v0, v[4:5]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr2 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v6, v2, v3
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v16
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v11
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v7
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v1, v5
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-G-O0-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX9-G-O0-NEXT: v_or3_b32 v2, v2, v3, v5
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v2
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr0_vgpr1 def $vgpr0_vgpr1_vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB0_3
+; GFX9-G-O0-NEXT: .LBB0_5: ; %Flow1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s4, v8, 4
+; GFX9-G-O0-NEXT: v_readlane_b32 s5, v8, 5
+; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB0_4
+; GFX9-G-O0-NEXT: .LBB0_6: ; %udiv-do-while
+; GFX9-G-O0-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s6, v16, 6
+; GFX9-G-O0-NEXT: v_readlane_b32 s7, v16, 7
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(16)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v5
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[21:22], v2, v[0:1]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[4:5], v2, v[3:4]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr0 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v3, v0, v1
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v5
+; GFX9-G-O0-NEXT: v_or_b32_e64 v7, v2, v3
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v0, v1
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr12_vgpr13 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v15
+; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr0 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v3, v0, v1
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v22
+; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v2, v3
+; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[23:24], v0, v[2:3]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[0:1], v0, v[12:13]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12 killed $vgpr2 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v14, v2, v3
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(8)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v29, v31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v30, v32
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v33
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v34
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v29
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v30
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v23
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v24
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v15
+; GFX9-G-O0-NEXT: v_or_b32_e64 v13, v1, v13
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v22
+; GFX9-G-O0-NEXT: v_or3_b32 v12, v12, v14, v15
+; GFX9-G-O0-NEXT: v_or3_b32 v2, v2, v3, v13
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v2
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr0_vgpr1 def $vgpr0_vgpr1_vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v11, s[8:9], v11, v4
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v10, s[8:9], v10, v9, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v8, s[8:9], v8, v7, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v10, s[8:9], v6, v5, s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s8
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v8, v6, v10
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s8
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v6, v6, v10
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 1
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: v_and_b32_e64 v12, v8, s9
+; GFX9-G-O0-NEXT: v_and_b32_e64 v10, v8, s8
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, s4
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12_vgpr13 killed $vgpr12_vgpr13 def $vgpr12_vgpr13_vgpr14_vgpr15 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v23, v25
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v24, v26
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v27
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v28
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v23
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v24
+; GFX9-G-O0-NEXT: v_and_b32_e64 v11, v8, v11
+; GFX9-G-O0-NEXT: v_and_b32_e64 v10, v8, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v22
+; GFX9-G-O0-NEXT: v_and_b32_e64 v8, v6, v8
+; GFX9-G-O0-NEXT: v_and_b32_e64 v6, v6, v21
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v4, s[8:9], v4, v11
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v10, s[8:9], v9, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v9, s[8:9], v7, v8, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v8, s[8:9], v5, v6, s[8:9]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5_vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v20
+; GFX9-G-O0-NEXT: s_mov_b32 s8, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s12, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s11, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s10, -1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, s8
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v17, s[8:9], v11, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s12
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v18, s[8:9], v10, v11, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, s11
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v20, s[8:9], v9, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, s10
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v19, s[8:9], v8, v9, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v20
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v19
+; GFX9-G-O0-NEXT: v_or_b32_e64 v17, v17, v20
+; GFX9-G-O0-NEXT: v_or_b32_e64 v19, v18, v19
+; GFX9-G-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v19, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[17:18], v[19:20]
+; GFX9-G-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v19, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, v0
+; GFX9-G-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v19, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, v12
+; GFX9-G-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s6, 2
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s7, 3
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s6, 6
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s7, 7
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: s_cbranch_execnz .LBB0_6
+; GFX9-G-O0-NEXT: s_branch .LBB0_1
+; GFX9-G-O0-NEXT: .LBB0_7: ; %udiv-preheader
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 64
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s4
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v4, v13, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s4
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v5, v5, v13
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s4
+; GFX9-G-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v13, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s6
+; GFX9-G-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v13, v6
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[6:7], v13, v[21:22]
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[26:27], v13, v[15:16]
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[24:25], v5, v[21:22]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v26
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v27
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v23, v24
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v25
+; GFX9-G-O0-NEXT: v_or_b32_e64 v14, v14, v23
+; GFX9-G-O0-NEXT: v_or_b32_e64 v13, v5, v13
+; GFX9-G-O0-NEXT: s_mov_b64 s[8:9], 0
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[21:22], v4, v[21:22]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v22
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v4, v4, v14, s[4:5]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v13, s[4:5]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v16
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v4, v4, v14, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v13, v5, v13, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-G-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[4:5]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4_vgpr5 killed $vgpr4_vgpr5 def $vgpr4_vgpr5_vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v20
+; GFX9-G-O0-NEXT: s_mov_b32 s4, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s10, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s7, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s6, -1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, s4
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v16, s[4:5], v16, v17
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, s10
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v15, s[4:5], v15, v16, s[4:5]
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s7
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v14, s[4:5], v14, v15, s[4:5]
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s6
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v13, s[4:5], v13, v14, s[4:5]
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[8:9]
+; GFX9-G-O0-NEXT: v_writelane_b32 v12, s8, 6
+; GFX9-G-O0-NEXT: v_writelane_b32 v12, s9, 7
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s4
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB0_6
+; GFX9-G-O0-NEXT: .LBB0_8: ; %udiv-bb1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 1
+; GFX9-G-O0-NEXT: s_mov_b32 s10, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s6
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(3)
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v5, s[6:7], v2, v5
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s10
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(1)
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v6, s[6:7], v4, v6, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s9
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v8, s[6:7], v3, v4, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v7, s[6:7], v1, v3, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v7
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0x7f
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v4, s[6:7], v1, v2
+; GFX9-G-O0-NEXT: s_mov_b32 s7, 64
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v3, v4, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v9, v1, v4
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-G-O0-NEXT: v_cmp_lt_u32_e64 s[8:9], v4, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-G-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v4, v1
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[1:2], v4, v[13:14]
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[18:19], v9, v[13:14]
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[16:17], v4, v[11:12]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v16
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v17
+; GFX9-G-O0-NEXT: v_or_b32_e64 v10, v10, v15
+; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v4, v9
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[13:14], v3, v[13:14]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v1, v1, v9, s[8:9]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v2, v3, s[8:9]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v14
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v9, v9, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v12
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v9, v9, v10, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v3
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1_vgpr2 killed $vgpr1_vgpr2 def $vgpr1_vgpr2_vgpr3_vgpr4 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[8:9], s[4:5]
+; GFX9-G-O0-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v5, v8
+; GFX9-G-O0-NEXT: v_or_b32_e64 v7, v6, v7
+; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s4
+; GFX9-G-O0-NEXT: v_cmp_ne_u64_e64 s[4:5], v[5:6], v[7:8]
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s11
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], exec
+; GFX9-G-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; GFX9-G-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s6, 4
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s7, 5
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: s_cbranch_execz .LBB0_5
+; GFX9-G-O0-NEXT: s_branch .LBB0_7
+; GFX9-G-O0-NEXT: .LBB0_9: ; %udiv-end
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v2
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v0, v0, v8
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v1, v1, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v10
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v2, v2, v6
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v3, v3, v5
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v0, s[4:5], v0, v8
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v1, s[4:5], v1, v7, s[4:5]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v2, s[4:5], v2, v6, s[4:5]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v3, s[4:5], v3, v5, s[4:5]
+; GFX9-G-O0-NEXT: ; kill: killed $vgpr4
+; GFX9-G-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_nop 0
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: s_setpc_b64 s[30:31]
%div = sdiv i128 %lhs, %rhs
ret i128 %div
}
@@ -2306,6 +3457,1043 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-LABEL: v_udiv_i128_vv:
+; GFX9-G: ; %bb.0: ; %_udiv-special-cases
+; GFX9-G-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-NEXT: v_or_b32_e32 v8, v4, v6
+; GFX9-G-NEXT: v_or_b32_e32 v9, v5, v7
+; GFX9-G-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GFX9-G-NEXT: v_or_b32_e32 v8, v0, v2
+; GFX9-G-NEXT: v_or_b32_e32 v9, v1, v3
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[8:9]
+; GFX9-G-NEXT: v_ffbh_u32_e32 v9, v4
+; GFX9-G-NEXT: v_ffbh_u32_e32 v8, v5
+; GFX9-G-NEXT: v_add_u32_e32 v9, 32, v9
+; GFX9-G-NEXT: v_ffbh_u32_e32 v10, v6
+; GFX9-G-NEXT: v_min_u32_e32 v8, v8, v9
+; GFX9-G-NEXT: v_ffbh_u32_e32 v9, v7
+; GFX9-G-NEXT: v_add_u32_e32 v10, 32, v10
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[6:7]
+; GFX9-G-NEXT: v_add_u32_e32 v8, 64, v8
+; GFX9-G-NEXT: v_min_u32_e32 v9, v9, v10
+; GFX9-G-NEXT: v_ffbh_u32_e32 v10, v0
+; GFX9-G-NEXT: v_cndmask_b32_e64 v8, v9, v8, s[6:7]
+; GFX9-G-NEXT: v_ffbh_u32_e32 v9, v1
+; GFX9-G-NEXT: v_add_u32_e32 v10, 32, v10
+; GFX9-G-NEXT: v_ffbh_u32_e32 v11, v2
+; GFX9-G-NEXT: v_min_u32_e32 v9, v9, v10
+; GFX9-G-NEXT: v_ffbh_u32_e32 v10, v3
+; GFX9-G-NEXT: v_add_u32_e32 v11, 32, v11
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[2:3]
+; GFX9-G-NEXT: v_add_u32_e32 v9, 64, v9
+; GFX9-G-NEXT: v_min_u32_e32 v10, v10, v11
+; GFX9-G-NEXT: v_cndmask_b32_e64 v9, v10, v9, s[6:7]
+; GFX9-G-NEXT: v_sub_co_u32_e64 v12, s[6:7], v8, v9
+; GFX9-G-NEXT: v_subb_co_u32_e64 v13, s[6:7], 0, 0, s[6:7]
+; GFX9-G-NEXT: v_mov_b32_e32 v8, 0x7f
+; GFX9-G-NEXT: v_subb_co_u32_e64 v14, s[6:7], 0, 0, s[6:7]
+; GFX9-G-NEXT: v_mov_b32_e32 v9, 0
+; GFX9-G-NEXT: v_subb_co_u32_e64 v15, s[6:7], 0, 0, s[6:7]
+; GFX9-G-NEXT: v_cmp_gt_u64_e64 s[6:7], v[12:13], v[8:9]
+; GFX9-G-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v8, 0, 1, s[6:7]
+; GFX9-G-NEXT: v_cmp_lt_u64_e64 s[6:7], 0, v[14:15]
+; GFX9-G-NEXT: v_or_b32_e32 v17, v13, v15
+; GFX9-G-NEXT: v_cndmask_b32_e64 v9, 0, 1, s[6:7]
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[14:15]
+; GFX9-G-NEXT: s_mov_b64 s[8:9], 0
+; GFX9-G-NEXT: v_cndmask_b32_e64 v8, v9, v8, s[6:7]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v9, 0, 1, s[4:5]
+; GFX9-G-NEXT: v_or_b32_e32 v18, v9, v8
+; GFX9-G-NEXT: v_xor_b32_e32 v8, 0x7f, v12
+; GFX9-G-NEXT: v_or_b32_e32 v16, v8, v14
+; GFX9-G-NEXT: v_and_b32_e32 v8, 1, v18
+; GFX9-G-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-G-NEXT: v_cndmask_b32_e64 v10, v0, 0, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v11, v1, 0, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v8, v2, 0, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v9, v3, 0, vcc
+; GFX9-G-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v16, 0, 1, vcc
+; GFX9-G-NEXT: v_or_b32_e32 v16, v18, v16
+; GFX9-G-NEXT: v_and_b32_e32 v16, 1, v16
+; GFX9-G-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-G-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GFX9-G-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GFX9-G-NEXT: s_cbranch_execz .LBB1_6
+; GFX9-G-NEXT: ; %bb.1: ; %udiv-bb1
+; GFX9-G-NEXT: v_add_co_u32_e32 v18, vcc, 1, v12
+; GFX9-G-NEXT: v_addc_co_u32_e32 v19, vcc, 0, v13, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v20, vcc, 0, v14, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v21, vcc, 0, v15, vcc
+; GFX9-G-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GFX9-G-NEXT: v_sub_co_u32_e32 v16, vcc, 0x7f, v12
+; GFX9-G-NEXT: v_sub_u32_e32 v8, 64, v16
+; GFX9-G-NEXT: v_lshrrev_b64 v[8:9], v8, v[0:1]
+; GFX9-G-NEXT: v_lshlrev_b64 v[10:11], v16, v[2:3]
+; GFX9-G-NEXT: v_subrev_u32_e32 v14, 64, v16
+; GFX9-G-NEXT: v_lshlrev_b64 v[12:13], v16, v[0:1]
+; GFX9-G-NEXT: v_or_b32_e32 v10, v8, v10
+; GFX9-G-NEXT: v_or_b32_e32 v11, v9, v11
+; GFX9-G-NEXT: v_lshlrev_b64 v[8:9], v14, v[0:1]
+; GFX9-G-NEXT: v_cmp_gt_u32_e32 vcc, 64, v16
+; GFX9-G-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GFX9-G-NEXT: v_cndmask_b32_e32 v14, 0, v12, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v15, 0, v13, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
+; GFX9-G-NEXT: v_cmp_eq_u32_e32 vcc, 0, v16
+; GFX9-G-NEXT: v_mov_b32_e32 v13, s11
+; GFX9-G-NEXT: v_cndmask_b32_e32 v8, v8, v2, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v9, v9, v3, vcc
+; GFX9-G-NEXT: v_mov_b32_e32 v11, s9
+; GFX9-G-NEXT: v_mov_b32_e32 v10, s8
+; GFX9-G-NEXT: v_mov_b32_e32 v12, s10
+; GFX9-G-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GFX9-G-NEXT: s_xor_b64 s[12:13], exec, s[8:9]
+; GFX9-G-NEXT: s_cbranch_execz .LBB1_5
+; GFX9-G-NEXT: ; %bb.2: ; %udiv-preheader
+; GFX9-G-NEXT: v_sub_u32_e32 v12, 64, v18
+; GFX9-G-NEXT: v_subrev_u32_e32 v22, 64, v18
+; GFX9-G-NEXT: v_lshrrev_b64 v[10:11], v18, v[0:1]
+; GFX9-G-NEXT: v_lshlrev_b64 v[12:13], v12, v[2:3]
+; GFX9-G-NEXT: v_lshrrev_b64 v[16:17], v18, v[2:3]
+; GFX9-G-NEXT: v_lshrrev_b64 v[2:3], v22, v[2:3]
+; GFX9-G-NEXT: v_or_b32_e32 v10, v10, v12
+; GFX9-G-NEXT: v_or_b32_e32 v11, v11, v13
+; GFX9-G-NEXT: v_cmp_gt_u32_e32 vcc, 64, v18
+; GFX9-G-NEXT: s_mov_b64 s[8:9], 0
+; GFX9-G-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v16, 0, v16, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v17, 0, v17, vcc
+; GFX9-G-NEXT: v_add_co_u32_e32 v22, vcc, -1, v4
+; GFX9-G-NEXT: v_addc_co_u32_e32 v23, vcc, -1, v5, vcc
+; GFX9-G-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GFX9-G-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v18
+; GFX9-G-NEXT: v_addc_co_u32_e32 v24, vcc, -1, v6, vcc
+; GFX9-G-NEXT: v_mov_b32_e32 v13, s11
+; GFX9-G-NEXT: v_cndmask_b32_e64 v2, v2, v0, s[4:5]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v3, v3, v1, s[4:5]
+; GFX9-G-NEXT: v_addc_co_u32_e32 v25, vcc, -1, v7, vcc
+; GFX9-G-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-G-NEXT: v_mov_b32_e32 v11, s9
+; GFX9-G-NEXT: v_mov_b32_e32 v10, s8
+; GFX9-G-NEXT: v_mov_b32_e32 v12, s10
+; GFX9-G-NEXT: .LBB1_3: ; %udiv-do-while
+; GFX9-G-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-G-NEXT: v_lshlrev_b64 v[12:13], 1, v[14:15]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v0, 31, v15
+; GFX9-G-NEXT: v_or_b32_e32 v14, v10, v12
+; GFX9-G-NEXT: v_or_b32_e32 v15, v11, v13
+; GFX9-G-NEXT: v_lshlrev_b64 v[12:13], 1, v[16:17]
+; GFX9-G-NEXT: v_lshlrev_b64 v[10:11], 1, v[2:3]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v2, 31, v3
+; GFX9-G-NEXT: v_or_b32_e32 v12, v12, v2
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v2, 31, v9
+; GFX9-G-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
+; GFX9-G-NEXT: v_or_b32_e32 v2, v10, v2
+; GFX9-G-NEXT: v_or_b32_e32 v8, v8, v0
+; GFX9-G-NEXT: v_sub_co_u32_e32 v0, vcc, v22, v2
+; GFX9-G-NEXT: v_subb_co_u32_e32 v0, vcc, v23, v11, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v0, vcc, v24, v12, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v0, vcc, v25, v13, vcc
+; GFX9-G-NEXT: v_add_co_u32_e64 v18, s[4:5], -1, v18
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v3, 31, v0
+; GFX9-G-NEXT: v_addc_co_u32_e64 v19, s[4:5], -1, v19, s[4:5]
+; GFX9-G-NEXT: v_and_b32_e32 v10, v3, v4
+; GFX9-G-NEXT: v_addc_co_u32_e64 v20, s[4:5], -1, v20, s[4:5]
+; GFX9-G-NEXT: v_and_b32_e32 v16, v3, v5
+; GFX9-G-NEXT: v_sub_co_u32_e32 v2, vcc, v2, v10
+; GFX9-G-NEXT: v_addc_co_u32_e64 v21, s[4:5], -1, v21, s[4:5]
+; GFX9-G-NEXT: v_and_b32_e32 v0, 1, v3
+; GFX9-G-NEXT: v_and_b32_e32 v17, v3, v6
+; GFX9-G-NEXT: v_and_b32_e32 v26, v3, v7
+; GFX9-G-NEXT: v_subb_co_u32_e32 v3, vcc, v11, v16, vcc
+; GFX9-G-NEXT: v_or_b32_e32 v10, v18, v20
+; GFX9-G-NEXT: v_or_b32_e32 v11, v19, v21
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[10:11]
+; GFX9-G-NEXT: v_subb_co_u32_e32 v16, vcc, v12, v17, vcc
+; GFX9-G-NEXT: v_mov_b32_e32 v11, v1
+; GFX9-G-NEXT: v_subb_co_u32_e32 v17, vcc, v13, v26, vcc
+; GFX9-G-NEXT: s_or_b64 s[8:9], s[4:5], s[8:9]
+; GFX9-G-NEXT: v_mov_b32_e32 v10, v0
+; GFX9-G-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX9-G-NEXT: s_cbranch_execnz .LBB1_3
+; GFX9-G-NEXT: ; %bb.4: ; %Flow
+; GFX9-G-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX9-G-NEXT: .LBB1_5: ; %Flow2
+; GFX9-G-NEXT: s_or_b64 exec, exec, s[12:13]
+; GFX9-G-NEXT: v_lshlrev_b64 v[0:1], 1, v[14:15]
+; GFX9-G-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v2, 31, v15
+; GFX9-G-NEXT: v_or_b32_e32 v8, v8, v2
+; GFX9-G-NEXT: v_or_b32_e32 v10, v10, v0
+; GFX9-G-NEXT: v_or_b32_e32 v11, v11, v1
+; GFX9-G-NEXT: .LBB1_6: ; %Flow3
+; GFX9-G-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX9-G-NEXT: v_mov_b32_e32 v0, v10
+; GFX9-G-NEXT: v_mov_b32_e32 v1, v11
+; GFX9-G-NEXT: v_mov_b32_e32 v2, v8
+; GFX9-G-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-G-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-O0-LABEL: v_udiv_i128_vv:
+; GFX9-G-O0: ; %bb.0: ; %_udiv-special-cases
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: ; implicit-def: $vgpr8 : SGPR spill to VGPR lane
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v0
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9_vgpr10_vgpr11 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v3
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v4
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_nop 0
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v5
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v7
+; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6_vgpr7_vgpr8 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v9
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v14
+; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v9, v12
+; GFX9-G-O0-NEXT: v_or_b32_e64 v11, v10, v11
+; GFX9-G-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[6:7], v[9:10], v[11:12]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v14
+; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v9, v12
+; GFX9-G-O0-NEXT: v_or_b32_e64 v11, v10, v11
+; GFX9-G-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], v[11:12]
+; GFX9-G-O0-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[8:9], v[5:6]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v11
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v5, v5
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v6, v6, v7
+; GFX9-G-O0-NEXT: v_min_u32_e64 v5, v5, v6
+; GFX9-G-O0-NEXT: s_mov_b32 s10, 64
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s10
+; GFX9-G-O0-NEXT: v_add_u32_e64 v6, v5, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v5, v5
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v7, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v7, v7, v8
+; GFX9-G-O0-NEXT: v_min_u32_e64 v5, v5, v7
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b32 s14, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], v[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v12
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v7, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v7, v7, v8
+; GFX9-G-O0-NEXT: v_min_u32_e64 v6, v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s10
+; GFX9-G-O0-NEXT: v_add_u32_e64 v7, v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v8, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v8, v8, v9
+; GFX9-G-O0-NEXT: v_min_u32_e64 v6, v6, v8
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b32 s13, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s11, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s12, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s10, 0
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v6, s[8:9], v5, v6
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s14
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v7, s[8:9], v5, v7, s[8:9]
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s12
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v9, s[8:9], v5, v8, s[8:9]
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s10
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v8, s[8:9], v5, v8, s[8:9]
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[12:13], 0x7f
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s4
+; GFX9-G-O0-NEXT: v_cmp_gt_u64_e64 s[10:11], v[12:13], v[14:15]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[12:13], v[14:15]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, s13
+; GFX9-G-O0-NEXT: v_cmp_gt_u64_e64 s[12:13], v[10:11], v[12:13]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v10, v5, v10, s[12:13]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[10:11]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v10, v5, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[6:7]
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v5, v10
+; GFX9-G-O0-NEXT: s_mov_b32 s7, 0x7f
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v6, v6, s7
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v7, v7, s6
+; GFX9-G-O0-NEXT: v_or_b32_e64 v6, v6, v9
+; GFX9-G-O0-NEXT: v_or_b32_e64 v8, v7, v8
+; GFX9-G-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[6:7], v[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v3
+; GFX9-G-O0-NEXT: v_and_b32_e32 v1, 1, v5
+; GFX9-G-O0-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v7
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v1, v1, v4, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v2, v3, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-G-O0-NEXT: v_and_b32_e32 v3, 1, v5
+; GFX9-G-O0-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v3
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1_vgpr2 killed $vgpr1_vgpr2 def $vgpr1_vgpr2_vgpr3_vgpr4 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v5, v6
+; GFX9-G-O0-NEXT: v_and_b32_e32 v5, 1, v5
+; GFX9-G-O0-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v5
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], -1
+; GFX9-G-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], exec
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s4, 0
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s5, 1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: s_cbranch_execz .LBB1_3
+; GFX9-G-O0-NEXT: s_branch .LBB1_8
+; GFX9-G-O0-NEXT: .LBB1_1: ; %Flow
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s4, v0, 2
+; GFX9-G-O0-NEXT: v_readlane_b32 s5, v0, 3
+; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: ; %bb.2: ; %Flow
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(4)
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB1_5
+; GFX9-G-O0-NEXT: .LBB1_3: ; %Flow2
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s4, v4, 0
+; GFX9-G-O0-NEXT: v_readlane_b32 s5, v4, 1
+; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB1_9
+; GFX9-G-O0-NEXT: .LBB1_4: ; %udiv-loop-exit
+; GFX9-G-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v7
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[10:11], v0, v[2:3]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[0:1], v0, v[4:5]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr2 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v6, v2, v3
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v16
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v11
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v7
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v1, v5
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-G-O0-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX9-G-O0-NEXT: v_or3_b32 v2, v2, v3, v5
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v2
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr0_vgpr1 def $vgpr0_vgpr1_vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB1_3
+; GFX9-G-O0-NEXT: .LBB1_5: ; %Flow1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s4, v8, 4
+; GFX9-G-O0-NEXT: v_readlane_b32 s5, v8, 5
+; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB1_4
+; GFX9-G-O0-NEXT: .LBB1_6: ; %udiv-do-while
+; GFX9-G-O0-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s6, v16, 6
+; GFX9-G-O0-NEXT: v_readlane_b32 s7, v16, 7
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(16)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v5
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[21:22], v2, v[0:1]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[4:5], v2, v[3:4]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr0 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v3, v0, v1
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v5
+; GFX9-G-O0-NEXT: v_or_b32_e64 v7, v2, v3
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v0, v1
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr12_vgpr13 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v15
+; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr0 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v3, v0, v1
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v22
+; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v2, v3
+; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[23:24], v0, v[2:3]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[0:1], v0, v[12:13]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12 killed $vgpr2 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v14, v2, v3
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(8)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v29, v31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v30, v32
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v33
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v34
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v29
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v30
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v23
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v24
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v15
+; GFX9-G-O0-NEXT: v_or_b32_e64 v13, v1, v13
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v22
+; GFX9-G-O0-NEXT: v_or3_b32 v12, v12, v14, v15
+; GFX9-G-O0-NEXT: v_or3_b32 v2, v2, v3, v13
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v2
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr0_vgpr1 def $vgpr0_vgpr1_vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v11, s[8:9], v11, v4
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v10, s[8:9], v10, v9, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v8, s[8:9], v8, v7, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v10, s[8:9], v6, v5, s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s8
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v8, v6, v10
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s8
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v6, v6, v10
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 1
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: v_and_b32_e64 v12, v8, s9
+; GFX9-G-O0-NEXT: v_and_b32_e64 v10, v8, s8
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, s4
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12_vgpr13 killed $vgpr12_vgpr13 def $vgpr12_vgpr13_vgpr14_vgpr15 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v23, v25
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v24, v26
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v27
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v28
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v23
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v24
+; GFX9-G-O0-NEXT: v_and_b32_e64 v11, v8, v11
+; GFX9-G-O0-NEXT: v_and_b32_e64 v10, v8, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v22
+; GFX9-G-O0-NEXT: v_and_b32_e64 v8, v6, v8
+; GFX9-G-O0-NEXT: v_and_b32_e64 v6, v6, v21
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v4, s[8:9], v4, v11
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v10, s[8:9], v9, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v9, s[8:9], v7, v8, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v8, s[8:9], v5, v6, s[8:9]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5_vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v20
+; GFX9-G-O0-NEXT: s_mov_b32 s8, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s12, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s11, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s10, -1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, s8
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v17, s[8:9], v11, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s12
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v18, s[8:9], v10, v11, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, s11
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v20, s[8:9], v9, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, s10
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v19, s[8:9], v8, v9, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v20
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v19
+; GFX9-G-O0-NEXT: v_or_b32_e64 v17, v17, v20
+; GFX9-G-O0-NEXT: v_or_b32_e64 v19, v18, v19
+; GFX9-G-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v19, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[17:18], v[19:20]
+; GFX9-G-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v19, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, v0
+; GFX9-G-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v19, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, v12
+; GFX9-G-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s6, 2
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s7, 3
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s6, 6
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s7, 7
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: s_cbranch_execnz .LBB1_6
+; GFX9-G-O0-NEXT: s_branch .LBB1_1
+; GFX9-G-O0-NEXT: .LBB1_7: ; %udiv-preheader
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 64
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s4
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v4, v13, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s4
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v5, v5, v13
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s4
+; GFX9-G-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v13, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s6
+; GFX9-G-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v13, v6
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[6:7], v13, v[21:22]
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[26:27], v13, v[15:16]
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[24:25], v5, v[21:22]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v26
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v27
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v23, v24
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v25
+; GFX9-G-O0-NEXT: v_or_b32_e64 v14, v14, v23
+; GFX9-G-O0-NEXT: v_or_b32_e64 v13, v5, v13
+; GFX9-G-O0-NEXT: s_mov_b64 s[8:9], 0
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[21:22], v4, v[21:22]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v22
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v4, v4, v14, s[4:5]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v13, s[4:5]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v16
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v4, v4, v14, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v13, v5, v13, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-G-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[4:5]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4_vgpr5 killed $vgpr4_vgpr5 def $vgpr4_vgpr5_vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v20
+; GFX9-G-O0-NEXT: s_mov_b32 s4, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s10, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s7, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s6, -1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, s4
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v16, s[4:5], v16, v17
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, s10
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v15, s[4:5], v15, v16, s[4:5]
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s7
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v14, s[4:5], v14, v15, s[4:5]
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s6
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v13, s[4:5], v13, v14, s[4:5]
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[8:9]
+; GFX9-G-O0-NEXT: v_writelane_b32 v12, s8, 6
+; GFX9-G-O0-NEXT: v_writelane_b32 v12, s9, 7
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s4
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB1_6
+; GFX9-G-O0-NEXT: .LBB1_8: ; %udiv-bb1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 1
+; GFX9-G-O0-NEXT: s_mov_b32 s10, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s6
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(3)
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v5, s[6:7], v2, v5
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s10
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(1)
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v6, s[6:7], v4, v6, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s9
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v8, s[6:7], v3, v4, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v7, s[6:7], v1, v3, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v7
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0x7f
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v4, s[6:7], v1, v2
+; GFX9-G-O0-NEXT: s_mov_b32 s7, 64
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v3, v4, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v9, v1, v4
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-G-O0-NEXT: v_cmp_lt_u32_e64 s[8:9], v4, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-G-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v4, v1
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[1:2], v4, v[13:14]
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[18:19], v9, v[13:14]
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[16:17], v4, v[11:12]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v16
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v17
+; GFX9-G-O0-NEXT: v_or_b32_e64 v10, v10, v15
+; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v4, v9
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[13:14], v3, v[13:14]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v1, v1, v9, s[8:9]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v2, v3, s[8:9]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v14
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v9, v9, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v12
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v9, v9, v10, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v3
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1_vgpr2 killed $vgpr1_vgpr2 def $vgpr1_vgpr2_vgpr3_vgpr4 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[8:9], s[4:5]
+; GFX9-G-O0-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v5, v8
+; GFX9-G-O0-NEXT: v_or_b32_e64 v7, v6, v7
+; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s4
+; GFX9-G-O0-NEXT: v_cmp_ne_u64_e64 s[4:5], v[5:6], v[7:8]
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s11
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], exec
+; GFX9-G-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; GFX9-G-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s6, 4
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s7, 5
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: s_cbranch_execz .LBB1_5
+; GFX9-G-O0-NEXT: s_branch .LBB1_7
+; GFX9-G-O0-NEXT: .LBB1_9: ; %udiv-end
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v8
+; GFX9-G-O0-NEXT: ; kill: killed $vgpr4
+; GFX9-G-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_nop 0
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: s_setpc_b64 s[30:31]
%div = udiv i128 %lhs, %rhs
ret i128 %div
}
@@ -2388,6 +4576,66 @@ define i128 @v_sdiv_i128_v_pow2k(i128 %lhs) {
; GFX9-O0-NEXT: v_lshrrev_b64 v[3:4], s4, v[3:4]
; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr3_vgpr4 killed $exec
; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-LABEL: v_sdiv_i128_v_pow2k:
+; GFX9-G: ; %bb.0:
+; GFX9-G-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v4, 31, v3
+; GFX9-G-NEXT: v_mov_b32_e32 v5, v4
+; GFX9-G-NEXT: v_lshrrev_b64 v[4:5], 31, v[4:5]
+; GFX9-G-NEXT: v_add_co_u32_e32 v0, vcc, v0, v4
+; GFX9-G-NEXT: v_addc_co_u32_e32 v4, vcc, v1, v5, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v2, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v3, vcc
+; GFX9-G-NEXT: v_lshlrev_b64 v[0:1], 31, v[1:2]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v3, 1, v4
+; GFX9-G-NEXT: v_or_b32_e32 v0, v3, v0
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v2, 1, v2
+; GFX9-G-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-O0-LABEL: v_sdiv_i128_v_pow2k:
+; GFX9-G-O0: ; %bb.0:
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v0
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v0, v0, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v0
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[6:7], v0, v[5:6]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v7
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s5, 0
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v4, s[6:7], v4, v5
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v1, s[6:7], v1, v0, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v5, s[6:7], v2, v0, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v4, s[6:7], v3, v0, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-G-O0-NEXT: s_mov_b32 s5, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v0, v0, v1
+; GFX9-G-O0-NEXT: s_mov_b32 s5, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[5:6], v2, v[5:6]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v6
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v3
+; GFX9-G-O0-NEXT: v_or_b32_e64 v1, v1, v2
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v3, v2, v4
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v2, v2, v4
+; GFX9-G-O0-NEXT: s_setpc_b64 s[30:31]
%div = sdiv i128 %lhs, 8589934592
ret i128 %div
}
@@ -2434,10 +4682,42 @@ define i128 @v_udiv_i128_v_pow2k(i128 %lhs) {
; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr2_vgpr3 killed $exec
; GFX9-O0-NEXT: v_mov_b32_e32 v3, 0
; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-LABEL: v_udiv_i128_v_pow2k:
+; GFX9-G: ; %bb.0:
+; GFX9-G-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-NEXT: v_mov_b32_e32 v4, v1
+; GFX9-G-NEXT: v_lshlrev_b64 v[0:1], 31, v[2:3]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v2, 1, v4
+; GFX9-G-NEXT: v_or_b32_e32 v0, v2, v0
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v2, 1, v3
+; GFX9-G-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-G-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-O0-LABEL: v_udiv_i128_v_pow2k:
+; GFX9-G-O0: ; %bb.0:
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v0, v0, v1
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[5:6], v2, v[4:5]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v6
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v4
+; GFX9-G-O0-NEXT: v_or_b32_e64 v1, v1, v2
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v2, v2, v3
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s4
+; GFX9-G-O0-NEXT: s_setpc_b64 s[30:31]
%div = udiv i128 %lhs, 8589934592
ret i128 %div
}
-
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX9-SDAG: {{.*}}
-; GFX9-SDAG-O0: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/div_v2i128.ll b/llvm/test/CodeGen/AMDGPU/div_v2i128.ll
index 46e2632..16a03ba 100644
--- a/llvm/test/CodeGen/AMDGPU/div_v2i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/div_v2i128.ll
@@ -1,25 +1,3248 @@
-; RUN: not --crash llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -o - %s 2>&1 | FileCheck -check-prefix=SDAG-ERR %s
-; RUN: not --crash llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -o - %s 2>&1 | FileCheck -check-prefix=GISEL-ERR %s
-
-; SDAG-ERR: LLVM ERROR: unsupported libcall legalization
-; GISEL-ERR: LLVM ERROR: unable to legalize instruction: %{{[0-9]+}}:_(s128) = G_SDIV %{{[0-9]+}}:_, %{{[0-9]+}}:_ (in function: v_sdiv_v2i128_vv)
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -o - %s | FileCheck -check-prefix=SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -o - %s | FileCheck -check-prefix=GISEL %s
define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
+; SDAG-LABEL: v_sdiv_v2i128_vv:
+; SDAG: ; %bb.0: ; %_udiv-special-cases_udiv-special-cases
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_ashrrev_i32_e32 v24, 31, v3
+; SDAG-NEXT: v_ashrrev_i32_e32 v25, 31, v11
+; SDAG-NEXT: v_mov_b32_e32 v16, 0
+; SDAG-NEXT: s_mov_b64 s[10:11], 0x7f
+; SDAG-NEXT: v_mov_b32_e32 v26, v24
+; SDAG-NEXT: v_mov_b32_e32 v27, v25
+; SDAG-NEXT: v_xor_b32_e32 v17, v24, v3
+; SDAG-NEXT: v_xor_b32_e32 v18, v24, v2
+; SDAG-NEXT: v_xor_b32_e32 v1, v24, v1
+; SDAG-NEXT: v_xor_b32_e32 v0, v24, v0
+; SDAG-NEXT: v_xor_b32_e32 v19, v25, v11
+; SDAG-NEXT: v_xor_b32_e32 v20, v25, v10
+; SDAG-NEXT: v_xor_b32_e32 v9, v25, v9
+; SDAG-NEXT: v_xor_b32_e32 v8, v25, v8
+; SDAG-NEXT: v_sub_i32_e32 v2, vcc, v0, v24
+; SDAG-NEXT: v_subb_u32_e32 v3, vcc, v1, v24, vcc
+; SDAG-NEXT: v_ffbh_u32_e32 v0, v2
+; SDAG-NEXT: v_subb_u32_e32 v10, vcc, v18, v24, vcc
+; SDAG-NEXT: v_add_i32_e64 v1, s[4:5], 32, v0
+; SDAG-NEXT: v_ffbh_u32_e32 v18, v3
+; SDAG-NEXT: v_subb_u32_e32 v11, vcc, v17, v24, vcc
+; SDAG-NEXT: v_or_b32_e32 v0, v2, v10
+; SDAG-NEXT: v_ffbh_u32_e32 v17, v10
+; SDAG-NEXT: v_min_u32_e32 v18, v1, v18
+; SDAG-NEXT: v_sub_i32_e32 v28, vcc, v8, v25
+; SDAG-NEXT: v_or_b32_e32 v1, v3, v11
+; SDAG-NEXT: v_add_i32_e64 v8, s[4:5], 32, v17
+; SDAG-NEXT: v_ffbh_u32_e32 v17, v11
+; SDAG-NEXT: v_add_i32_e64 v18, s[4:5], 64, v18
+; SDAG-NEXT: v_addc_u32_e64 v21, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_subb_u32_e32 v29, vcc, v9, v25, vcc
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
+; SDAG-NEXT: v_ffbh_u32_e32 v1, v28
+; SDAG-NEXT: v_min_u32_e32 v8, v8, v17
+; SDAG-NEXT: v_cmp_ne_u64_e64 s[6:7], 0, v[10:11]
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v21, 0, s[6:7]
+; SDAG-NEXT: v_subb_u32_e32 v0, vcc, v20, v25, vcc
+; SDAG-NEXT: v_add_i32_e64 v9, s[8:9], 32, v1
+; SDAG-NEXT: v_ffbh_u32_e32 v20, v29
+; SDAG-NEXT: v_cndmask_b32_e64 v18, v18, v8, s[6:7]
+; SDAG-NEXT: v_subb_u32_e32 v1, vcc, v19, v25, vcc
+; SDAG-NEXT: v_or_b32_e32 v8, v28, v0
+; SDAG-NEXT: v_ffbh_u32_e32 v19, v0
+; SDAG-NEXT: v_min_u32_e32 v20, v9, v20
+; SDAG-NEXT: v_or_b32_e32 v9, v29, v1
+; SDAG-NEXT: v_add_i32_e32 v19, vcc, 32, v19
+; SDAG-NEXT: v_ffbh_u32_e32 v21, v1
+; SDAG-NEXT: v_add_i32_e32 v20, vcc, 64, v20
+; SDAG-NEXT: v_addc_u32_e64 v22, s[6:7], 0, 0, vcc
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; SDAG-NEXT: v_min_u32_e32 v8, v19, v21
+; SDAG-NEXT: v_cmp_ne_u64_e64 s[6:7], 0, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v9, v22, 0, s[6:7]
+; SDAG-NEXT: s_or_b64 s[8:9], vcc, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v20, v8, s[6:7]
+; SDAG-NEXT: v_sub_i32_e32 v8, vcc, v8, v18
+; SDAG-NEXT: v_subb_u32_e32 v9, vcc, v9, v17, vcc
+; SDAG-NEXT: v_xor_b32_e32 v17, 0x7f, v8
+; SDAG-NEXT: v_subbrev_u32_e32 v18, vcc, 0, v16, vcc
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[10:11], v[8:9]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, 0, 1, s[4:5]
+; SDAG-NEXT: v_subbrev_u32_e32 v19, vcc, 0, v16, vcc
+; SDAG-NEXT: v_or_b32_e32 v16, v17, v18
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[18:19]
+; SDAG-NEXT: v_cndmask_b32_e64 v21, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v17, v9, v19
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
+; SDAG-NEXT: v_cndmask_b32_e32 v20, v21, v20, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[16:17]
+; SDAG-NEXT: v_and_b32_e32 v16, 1, v20
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v16
+; SDAG-NEXT: s_or_b64 s[4:5], s[8:9], s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, v11, 0, s[4:5]
+; SDAG-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v10, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v21, v3, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v2, 0, s[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB0_6
+; SDAG-NEXT: ; %bb.1: ; %udiv-bb15
+; SDAG-NEXT: v_add_i32_e32 v30, vcc, 1, v8
+; SDAG-NEXT: v_sub_i32_e64 v20, s[4:5], 63, v8
+; SDAG-NEXT: v_mov_b32_e32 v16, 0
+; SDAG-NEXT: v_mov_b32_e32 v17, 0
+; SDAG-NEXT: v_addc_u32_e32 v31, vcc, 0, v9, vcc
+; SDAG-NEXT: v_lshl_b64 v[20:21], v[2:3], v20
+; SDAG-NEXT: v_addc_u32_e32 v32, vcc, 0, v18, vcc
+; SDAG-NEXT: v_addc_u32_e32 v33, vcc, 0, v19, vcc
+; SDAG-NEXT: v_or_b32_e32 v18, v30, v32
+; SDAG-NEXT: v_sub_i32_e32 v34, vcc, 0x7f, v8
+; SDAG-NEXT: v_or_b32_e32 v19, v31, v33
+; SDAG-NEXT: v_lshl_b64 v[8:9], v[10:11], v34
+; SDAG-NEXT: v_sub_i32_e32 v35, vcc, 64, v34
+; SDAG-NEXT: v_lshl_b64 v[22:23], v[2:3], v34
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[18:19]
+; SDAG-NEXT: v_lshr_b64 v[18:19], v[2:3], v35
+; SDAG-NEXT: v_or_b32_e32 v9, v9, v19
+; SDAG-NEXT: v_or_b32_e32 v8, v8, v18
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v34
+; SDAG-NEXT: v_cndmask_b32_e64 v9, v21, v9, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v20, v8, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v21, 0, v23, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, 0, v22, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v34
+; SDAG-NEXT: v_cndmask_b32_e64 v9, v9, v11, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v8, v10, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v18, 0
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB0_5
+; SDAG-NEXT: ; %bb.2: ; %udiv-preheader4
+; SDAG-NEXT: v_lshr_b64 v[16:17], v[2:3], v30
+; SDAG-NEXT: v_sub_i32_e32 v35, vcc, 64, v30
+; SDAG-NEXT: v_subrev_i32_e32 v36, vcc, 64, v30
+; SDAG-NEXT: v_lshr_b64 v[37:38], v[10:11], v30
+; SDAG-NEXT: v_add_i32_e32 v34, vcc, -1, v28
+; SDAG-NEXT: s_mov_b64 s[10:11], 0
+; SDAG-NEXT: v_mov_b32_e32 v22, 0
+; SDAG-NEXT: v_mov_b32_e32 v23, 0
+; SDAG-NEXT: v_mov_b32_e32 v18, 0
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: v_lshl_b64 v[48:49], v[10:11], v35
+; SDAG-NEXT: v_lshr_b64 v[10:11], v[10:11], v36
+; SDAG-NEXT: v_addc_u32_e32 v35, vcc, -1, v29, vcc
+; SDAG-NEXT: v_or_b32_e32 v17, v17, v49
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v48
+; SDAG-NEXT: v_addc_u32_e32 v36, vcc, -1, v0, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v30
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v11, v17, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v10, v16, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v11, 0, v38, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v10, 0, v37, s[4:5]
+; SDAG-NEXT: v_addc_u32_e32 v37, vcc, -1, v1, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 0, v30
+; SDAG-NEXT: v_cndmask_b32_e32 v3, v17, v3, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v2, v16, v2, vcc
+; SDAG-NEXT: v_mov_b32_e32 v17, 0
+; SDAG-NEXT: .LBB0_3: ; %udiv-do-while3
+; SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; SDAG-NEXT: v_lshl_b64 v[10:11], v[10:11], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v16, 31, v3
+; SDAG-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v38, 31, v9
+; SDAG-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v39, 31, v21
+; SDAG-NEXT: v_lshl_b64 v[20:21], v[20:21], 1
+; SDAG-NEXT: v_or_b32_e32 v10, v10, v16
+; SDAG-NEXT: v_or_b32_e32 v2, v2, v38
+; SDAG-NEXT: v_or_b32_e32 v8, v8, v39
+; SDAG-NEXT: v_or_b32_e32 v9, v19, v9
+; SDAG-NEXT: v_sub_i32_e32 v16, vcc, v34, v2
+; SDAG-NEXT: v_or_b32_e32 v8, v18, v8
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v35, v3, vcc
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v36, v10, vcc
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v37, v11, vcc
+; SDAG-NEXT: v_ashrrev_i32_e32 v38, 31, v16
+; SDAG-NEXT: v_and_b32_e32 v39, v38, v28
+; SDAG-NEXT: v_and_b32_e32 v48, v38, v29
+; SDAG-NEXT: v_and_b32_e32 v49, v38, v0
+; SDAG-NEXT: v_and_b32_e32 v16, 1, v38
+; SDAG-NEXT: v_and_b32_e32 v38, v38, v1
+; SDAG-NEXT: v_sub_i32_e32 v2, vcc, v2, v39
+; SDAG-NEXT: v_subb_u32_e32 v3, vcc, v3, v48, vcc
+; SDAG-NEXT: v_subb_u32_e32 v10, vcc, v10, v49, vcc
+; SDAG-NEXT: v_subb_u32_e32 v11, vcc, v11, v38, vcc
+; SDAG-NEXT: v_add_i32_e32 v30, vcc, -1, v30
+; SDAG-NEXT: v_addc_u32_e32 v31, vcc, -1, v31, vcc
+; SDAG-NEXT: v_addc_u32_e32 v32, vcc, -1, v32, vcc
+; SDAG-NEXT: v_addc_u32_e32 v33, vcc, -1, v33, vcc
+; SDAG-NEXT: v_or_b32_e32 v38, v30, v32
+; SDAG-NEXT: v_or_b32_e32 v39, v31, v33
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[38:39]
+; SDAG-NEXT: v_or_b32_e32 v21, v23, v21
+; SDAG-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; SDAG-NEXT: v_or_b32_e32 v20, v22, v20
+; SDAG-NEXT: v_mov_b32_e32 v23, v17
+; SDAG-NEXT: v_mov_b32_e32 v22, v16
+; SDAG-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; SDAG-NEXT: s_cbranch_execnz .LBB0_3
+; SDAG-NEXT: ; %bb.4: ; %Flow13
+; SDAG-NEXT: s_or_b64 exec, exec, s[10:11]
+; SDAG-NEXT: .LBB0_5: ; %Flow14
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_lshl_b64 v[0:1], v[8:9], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v8, 31, v21
+; SDAG-NEXT: v_lshl_b64 v[2:3], v[20:21], 1
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v8
+; SDAG-NEXT: v_or_b32_e32 v20, v19, v1
+; SDAG-NEXT: v_or_b32_e32 v21, v17, v3
+; SDAG-NEXT: v_or_b32_e32 v17, v18, v0
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v2
+; SDAG-NEXT: .LBB0_6: ; %Flow16
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_ashrrev_i32_e32 v18, 31, v7
+; SDAG-NEXT: v_ashrrev_i32_e32 v19, 31, v15
+; SDAG-NEXT: v_mov_b32_e32 v9, 0
+; SDAG-NEXT: s_mov_b64 s[10:11], 0x7f
+; SDAG-NEXT: v_mov_b32_e32 v22, v18
+; SDAG-NEXT: v_mov_b32_e32 v23, v19
+; SDAG-NEXT: v_xor_b32_e32 v0, v18, v7
+; SDAG-NEXT: v_xor_b32_e32 v1, v18, v6
+; SDAG-NEXT: v_xor_b32_e32 v3, v18, v5
+; SDAG-NEXT: v_xor_b32_e32 v2, v18, v4
+; SDAG-NEXT: v_xor_b32_e32 v6, v19, v15
+; SDAG-NEXT: v_xor_b32_e32 v7, v19, v14
+; SDAG-NEXT: v_xor_b32_e32 v8, v19, v13
+; SDAG-NEXT: v_xor_b32_e32 v10, v19, v12
+; SDAG-NEXT: v_sub_i32_e32 v2, vcc, v2, v18
+; SDAG-NEXT: v_subb_u32_e32 v3, vcc, v3, v18, vcc
+; SDAG-NEXT: v_ffbh_u32_e32 v5, v2
+; SDAG-NEXT: v_subb_u32_e32 v4, vcc, v1, v18, vcc
+; SDAG-NEXT: v_add_i32_e64 v1, s[4:5], 32, v5
+; SDAG-NEXT: v_ffbh_u32_e32 v11, v3
+; SDAG-NEXT: v_subb_u32_e32 v5, vcc, v0, v18, vcc
+; SDAG-NEXT: v_or_b32_e32 v0, v2, v4
+; SDAG-NEXT: v_ffbh_u32_e32 v12, v4
+; SDAG-NEXT: v_min_u32_e32 v11, v1, v11
+; SDAG-NEXT: v_sub_i32_e32 v28, vcc, v10, v19
+; SDAG-NEXT: v_or_b32_e32 v1, v3, v5
+; SDAG-NEXT: v_add_i32_e64 v10, s[4:5], 32, v12
+; SDAG-NEXT: v_ffbh_u32_e32 v12, v5
+; SDAG-NEXT: v_add_i32_e64 v11, s[4:5], 64, v11
+; SDAG-NEXT: v_addc_u32_e64 v13, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_subb_u32_e32 v29, vcc, v8, v19, vcc
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
+; SDAG-NEXT: v_ffbh_u32_e32 v1, v28
+; SDAG-NEXT: v_min_u32_e32 v8, v10, v12
+; SDAG-NEXT: v_cmp_ne_u64_e64 s[6:7], 0, v[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v10, v13, 0, s[6:7]
+; SDAG-NEXT: v_subb_u32_e32 v0, vcc, v7, v19, vcc
+; SDAG-NEXT: v_add_i32_e64 v7, s[8:9], 32, v1
+; SDAG-NEXT: v_ffbh_u32_e32 v12, v29
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v11, v8, s[6:7]
+; SDAG-NEXT: v_subb_u32_e32 v1, vcc, v6, v19, vcc
+; SDAG-NEXT: v_or_b32_e32 v6, v28, v0
+; SDAG-NEXT: v_ffbh_u32_e32 v11, v0
+; SDAG-NEXT: v_min_u32_e32 v12, v7, v12
+; SDAG-NEXT: v_or_b32_e32 v7, v29, v1
+; SDAG-NEXT: v_add_i32_e32 v11, vcc, 32, v11
+; SDAG-NEXT: v_ffbh_u32_e32 v13, v1
+; SDAG-NEXT: v_add_i32_e32 v12, vcc, 64, v12
+; SDAG-NEXT: v_addc_u32_e64 v14, s[6:7], 0, 0, vcc
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; SDAG-NEXT: v_min_u32_e32 v6, v11, v13
+; SDAG-NEXT: v_cmp_ne_u64_e64 s[6:7], 0, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v7, v14, 0, s[6:7]
+; SDAG-NEXT: s_or_b64 s[8:9], vcc, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v6, v12, v6, s[6:7]
+; SDAG-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; SDAG-NEXT: v_subb_u32_e32 v7, vcc, v7, v10, vcc
+; SDAG-NEXT: v_xor_b32_e32 v10, 0x7f, v6
+; SDAG-NEXT: v_subbrev_u32_e32 v8, vcc, 0, v9, vcc
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[10:11], v[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v12, 0, 1, s[4:5]
+; SDAG-NEXT: v_subbrev_u32_e32 v9, vcc, 0, v9, vcc
+; SDAG-NEXT: v_or_b32_e32 v10, v10, v8
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; SDAG-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v11, v7, v9
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; SDAG-NEXT: v_cndmask_b32_e32 v12, v13, v12, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11]
+; SDAG-NEXT: v_and_b32_e32 v10, 1, v12
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v10
+; SDAG-NEXT: s_or_b64 s[4:5], s[8:9], s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v13, v5, 0, s[4:5]
+; SDAG-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; SDAG-NEXT: v_cndmask_b32_e64 v11, v4, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v14, v3, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v10, v2, 0, s[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB0_12
+; SDAG-NEXT: ; %bb.7: ; %udiv-bb1
+; SDAG-NEXT: v_add_i32_e32 v30, vcc, 1, v6
+; SDAG-NEXT: v_sub_i32_e64 v12, s[4:5], 63, v6
+; SDAG-NEXT: v_mov_b32_e32 v10, 0
+; SDAG-NEXT: v_mov_b32_e32 v11, 0
+; SDAG-NEXT: v_addc_u32_e32 v31, vcc, 0, v7, vcc
+; SDAG-NEXT: v_lshl_b64 v[12:13], v[2:3], v12
+; SDAG-NEXT: v_addc_u32_e32 v32, vcc, 0, v8, vcc
+; SDAG-NEXT: v_addc_u32_e32 v33, vcc, 0, v9, vcc
+; SDAG-NEXT: v_or_b32_e32 v7, v30, v32
+; SDAG-NEXT: v_sub_i32_e32 v9, vcc, 0x7f, v6
+; SDAG-NEXT: v_or_b32_e32 v8, v31, v33
+; SDAG-NEXT: v_lshl_b64 v[14:15], v[4:5], v9
+; SDAG-NEXT: v_sub_i32_e32 v6, vcc, 64, v9
+; SDAG-NEXT: v_lshl_b64 v[34:35], v[2:3], v9
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[7:8]
+; SDAG-NEXT: v_lshr_b64 v[6:7], v[2:3], v6
+; SDAG-NEXT: v_or_b32_e32 v7, v15, v7
+; SDAG-NEXT: v_or_b32_e32 v6, v14, v6
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v9
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v13, v7, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v12, v12, v6, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v7, 0, v35, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v6, 0, v34, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v9
+; SDAG-NEXT: v_cndmask_b32_e64 v9, v8, v5, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v12, v4, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v12, 0
+; SDAG-NEXT: v_mov_b32_e32 v13, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB0_11
+; SDAG-NEXT: ; %bb.8: ; %udiv-preheader
+; SDAG-NEXT: v_lshr_b64 v[10:11], v[2:3], v30
+; SDAG-NEXT: v_sub_i32_e32 v35, vcc, 64, v30
+; SDAG-NEXT: v_subrev_i32_e32 v36, vcc, 64, v30
+; SDAG-NEXT: v_lshr_b64 v[37:38], v[4:5], v30
+; SDAG-NEXT: v_add_i32_e32 v34, vcc, -1, v28
+; SDAG-NEXT: s_mov_b64 s[10:11], 0
+; SDAG-NEXT: v_mov_b32_e32 v14, 0
+; SDAG-NEXT: v_mov_b32_e32 v15, 0
+; SDAG-NEXT: v_mov_b32_e32 v12, 0
+; SDAG-NEXT: v_mov_b32_e32 v13, 0
+; SDAG-NEXT: v_lshl_b64 v[48:49], v[4:5], v35
+; SDAG-NEXT: v_lshr_b64 v[4:5], v[4:5], v36
+; SDAG-NEXT: v_addc_u32_e32 v35, vcc, -1, v29, vcc
+; SDAG-NEXT: v_or_b32_e32 v11, v11, v49
+; SDAG-NEXT: v_or_b32_e32 v10, v10, v48
+; SDAG-NEXT: v_addc_u32_e32 v36, vcc, -1, v0, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v30
+; SDAG-NEXT: v_cndmask_b32_e64 v11, v5, v11, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v10, v4, v10, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v5, 0, v38, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v4, 0, v37, s[4:5]
+; SDAG-NEXT: v_addc_u32_e32 v37, vcc, -1, v1, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 0, v30
+; SDAG-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
+; SDAG-NEXT: v_mov_b32_e32 v11, 0
+; SDAG-NEXT: .LBB0_9: ; %udiv-do-while
+; SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; SDAG-NEXT: v_lshl_b64 v[4:5], v[4:5], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v10, 31, v3
+; SDAG-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v38, 31, v9
+; SDAG-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v39, 31, v7
+; SDAG-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
+; SDAG-NEXT: v_or_b32_e32 v4, v4, v10
+; SDAG-NEXT: v_or_b32_e32 v2, v2, v38
+; SDAG-NEXT: v_or_b32_e32 v8, v8, v39
+; SDAG-NEXT: v_or_b32_e32 v9, v13, v9
+; SDAG-NEXT: v_or_b32_e32 v7, v15, v7
+; SDAG-NEXT: v_or_b32_e32 v8, v12, v8
+; SDAG-NEXT: v_sub_i32_e32 v10, vcc, v34, v2
+; SDAG-NEXT: v_subb_u32_e32 v10, vcc, v35, v3, vcc
+; SDAG-NEXT: v_subb_u32_e32 v10, vcc, v36, v4, vcc
+; SDAG-NEXT: v_subb_u32_e32 v10, vcc, v37, v5, vcc
+; SDAG-NEXT: v_ashrrev_i32_e32 v15, 31, v10
+; SDAG-NEXT: v_and_b32_e32 v10, 1, v15
+; SDAG-NEXT: v_and_b32_e32 v38, v15, v1
+; SDAG-NEXT: v_and_b32_e32 v39, v15, v0
+; SDAG-NEXT: v_and_b32_e32 v48, v15, v29
+; SDAG-NEXT: v_and_b32_e32 v15, v15, v28
+; SDAG-NEXT: v_sub_i32_e32 v2, vcc, v2, v15
+; SDAG-NEXT: v_subb_u32_e32 v3, vcc, v3, v48, vcc
+; SDAG-NEXT: v_subb_u32_e32 v4, vcc, v4, v39, vcc
+; SDAG-NEXT: v_subb_u32_e32 v5, vcc, v5, v38, vcc
+; SDAG-NEXT: v_add_i32_e32 v30, vcc, -1, v30
+; SDAG-NEXT: v_addc_u32_e32 v31, vcc, -1, v31, vcc
+; SDAG-NEXT: v_addc_u32_e32 v32, vcc, -1, v32, vcc
+; SDAG-NEXT: v_addc_u32_e32 v33, vcc, -1, v33, vcc
+; SDAG-NEXT: v_or_b32_e32 v39, v31, v33
+; SDAG-NEXT: v_or_b32_e32 v38, v30, v32
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[38:39]
+; SDAG-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; SDAG-NEXT: v_or_b32_e32 v6, v14, v6
+; SDAG-NEXT: v_mov_b32_e32 v15, v11
+; SDAG-NEXT: v_mov_b32_e32 v14, v10
+; SDAG-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; SDAG-NEXT: s_cbranch_execnz .LBB0_9
+; SDAG-NEXT: ; %bb.10: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[10:11]
+; SDAG-NEXT: .LBB0_11: ; %Flow11
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_lshl_b64 v[0:1], v[8:9], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v4, 31, v7
+; SDAG-NEXT: v_lshl_b64 v[2:3], v[6:7], 1
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v4
+; SDAG-NEXT: v_or_b32_e32 v13, v13, v1
+; SDAG-NEXT: v_or_b32_e32 v14, v11, v3
+; SDAG-NEXT: v_or_b32_e32 v11, v12, v0
+; SDAG-NEXT: v_or_b32_e32 v10, v10, v2
+; SDAG-NEXT: .LBB0_12: ; %Flow12
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_xor_b32_e32 v3, v27, v26
+; SDAG-NEXT: v_xor_b32_e32 v2, v25, v24
+; SDAG-NEXT: v_xor_b32_e32 v7, v23, v22
+; SDAG-NEXT: v_xor_b32_e32 v6, v19, v18
+; SDAG-NEXT: v_xor_b32_e32 v4, v20, v3
+; SDAG-NEXT: v_xor_b32_e32 v5, v17, v2
+; SDAG-NEXT: v_xor_b32_e32 v1, v21, v3
+; SDAG-NEXT: v_xor_b32_e32 v0, v16, v2
+; SDAG-NEXT: v_xor_b32_e32 v8, v13, v7
+; SDAG-NEXT: v_xor_b32_e32 v9, v11, v6
+; SDAG-NEXT: v_xor_b32_e32 v11, v14, v7
+; SDAG-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
+; SDAG-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; SDAG-NEXT: v_subb_u32_e32 v2, vcc, v5, v2, vcc
+; SDAG-NEXT: v_subb_u32_e32 v3, vcc, v4, v3, vcc
+; SDAG-NEXT: v_xor_b32_e32 v4, v10, v6
+; SDAG-NEXT: v_sub_i32_e32 v4, vcc, v4, v6
+; SDAG-NEXT: v_subb_u32_e32 v5, vcc, v11, v7, vcc
+; SDAG-NEXT: v_subb_u32_e32 v6, vcc, v9, v6, vcc
+; SDAG-NEXT: v_subb_u32_e32 v7, vcc, v8, v7, vcc
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: v_sdiv_v2i128_vv:
+; GISEL: ; %bb.0: ; %_udiv-special-cases_udiv-special-cases
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: s_mov_b64 s[8:9], 0
+; GISEL-NEXT: v_ashrrev_i32_e32 v24, 31, v3
+; GISEL-NEXT: v_ashrrev_i32_e32 v25, 31, v11
+; GISEL-NEXT: v_mov_b32_e32 v20, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v21, 0
+; GISEL-NEXT: v_xor_b32_e32 v0, v24, v0
+; GISEL-NEXT: v_xor_b32_e32 v1, v24, v1
+; GISEL-NEXT: v_xor_b32_e32 v2, v24, v2
+; GISEL-NEXT: v_xor_b32_e32 v3, v24, v3
+; GISEL-NEXT: v_xor_b32_e32 v8, v25, v8
+; GISEL-NEXT: v_xor_b32_e32 v9, v25, v9
+; GISEL-NEXT: v_xor_b32_e32 v10, v25, v10
+; GISEL-NEXT: v_xor_b32_e32 v11, v25, v11
+; GISEL-NEXT: v_sub_i32_e32 v16, vcc, v0, v24
+; GISEL-NEXT: v_subb_u32_e32 v17, vcc, v1, v24, vcc
+; GISEL-NEXT: v_sub_i32_e64 v26, s[4:5], v8, v25
+; GISEL-NEXT: v_subb_u32_e64 v27, s[4:5], v9, v25, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v18, vcc, v2, v24, vcc
+; GISEL-NEXT: v_subb_u32_e32 v19, vcc, v3, v24, vcc
+; GISEL-NEXT: v_subb_u32_e64 v10, vcc, v10, v25, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v11, vcc, v11, v25, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v8, v27
+; GISEL-NEXT: v_ffbh_u32_e32 v9, v26
+; GISEL-NEXT: v_ffbh_u32_e32 v22, v17
+; GISEL-NEXT: v_ffbh_u32_e32 v23, v16
+; GISEL-NEXT: v_or_b32_e32 v0, v26, v10
+; GISEL-NEXT: v_or_b32_e32 v1, v27, v11
+; GISEL-NEXT: v_or_b32_e32 v2, v16, v18
+; GISEL-NEXT: v_or_b32_e32 v3, v17, v19
+; GISEL-NEXT: v_add_i32_e32 v9, vcc, 32, v9
+; GISEL-NEXT: v_ffbh_u32_e32 v28, v11
+; GISEL-NEXT: v_ffbh_u32_e32 v29, v10
+; GISEL-NEXT: v_add_i32_e32 v23, vcc, 32, v23
+; GISEL-NEXT: v_ffbh_u32_e32 v30, v19
+; GISEL-NEXT: v_ffbh_u32_e32 v31, v18
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[2:3]
+; GISEL-NEXT: v_min_u32_e32 v0, v8, v9
+; GISEL-NEXT: v_add_i32_e64 v1, s[6:7], 32, v29
+; GISEL-NEXT: v_min_u32_e32 v2, v22, v23
+; GISEL-NEXT: v_add_i32_e64 v3, s[6:7], 32, v31
+; GISEL-NEXT: v_add_i32_e64 v0, s[6:7], 64, v0
+; GISEL-NEXT: v_min_u32_e32 v1, v28, v1
+; GISEL-NEXT: v_add_i32_e64 v2, s[6:7], 64, v2
+; GISEL-NEXT: v_min_u32_e32 v3, v30, v3
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v22, 0, 1, s[4:5]
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GISEL-NEXT: v_subb_u32_e64 v1, s[4:5], 0, 0, vcc
+; GISEL-NEXT: v_subb_u32_e64 v2, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[20:21]
+; GISEL-NEXT: v_cndmask_b32_e64 v20, 0, 1, vcc
+; GISEL-NEXT: v_xor_b32_e32 v8, 0x7f, v0
+; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e64 v21, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v8, v8, v2
+; GISEL-NEXT: v_or_b32_e32 v9, v1, v3
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e32 v20, v21, v20, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v9, v22, v20
+; GISEL-NEXT: v_and_b32_e32 v20, 1, v9
+; GISEL-NEXT: v_or_b32_e32 v8, v9, v8
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v20
+; GISEL-NEXT: v_cndmask_b32_e64 v20, v16, 0, vcc
+; GISEL-NEXT: v_and_b32_e32 v22, 1, v8
+; GISEL-NEXT: v_cndmask_b32_e64 v21, v17, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v8, v18, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v9, v19, 0, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v22
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB0_6
+; GISEL-NEXT: ; %bb.1: ; %udiv-bb15
+; GISEL-NEXT: v_add_i32_e32 v28, vcc, 1, v0
+; GISEL-NEXT: v_addc_u32_e64 v29, s[4:5], 0, v1, vcc
+; GISEL-NEXT: v_sub_i32_e32 v32, vcc, 0x7f, v0
+; GISEL-NEXT: v_addc_u32_e64 v30, vcc, 0, v2, s[4:5]
+; GISEL-NEXT: v_addc_u32_e32 v31, vcc, 0, v3, vcc
+; GISEL-NEXT: v_subrev_i32_e64 v20, s[4:5], 64, v32
+; GISEL-NEXT: v_sub_i32_e64 v8, s[4:5], 64, v32
+; GISEL-NEXT: v_lshl_b64 v[0:1], v[16:17], v32
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[18:19], v32
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: v_lshr_b64 v[8:9], v[16:17], v8
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[16:17], v20
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v32
+; GISEL-NEXT: v_cndmask_b32_e32 v20, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v21, 0, v1, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v8, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v9, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v22, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v23, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v32
+; GISEL-NEXT: v_cndmask_b32_e32 v8, v0, v18, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v9, v1, v19, vcc
+; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GISEL-NEXT: v_mov_b32_e32 v0, s8
+; GISEL-NEXT: v_mov_b32_e32 v1, s9
+; GISEL-NEXT: v_mov_b32_e32 v2, s10
+; GISEL-NEXT: v_mov_b32_e32 v3, s11
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GISEL-NEXT: s_xor_b64 s[14:15], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB0_5
+; GISEL-NEXT: ; %bb.2: ; %udiv-preheader4
+; GISEL-NEXT: v_subrev_i32_e32 v34, vcc, 64, v28
+; GISEL-NEXT: v_sub_i32_e32 v22, vcc, 64, v28
+; GISEL-NEXT: v_lshr_b64 v[0:1], v[18:19], v28
+; GISEL-NEXT: v_lshr_b64 v[2:3], v[16:17], v28
+; GISEL-NEXT: v_add_i32_e32 v32, vcc, -1, v26
+; GISEL-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v28
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v28
+; GISEL-NEXT: v_addc_u32_e32 v33, vcc, -1, v27, vcc
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[18:19], v22
+; GISEL-NEXT: v_lshr_b64 v[36:37], v[18:19], v34
+; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GISEL-NEXT: v_cndmask_b32_e64 v18, 0, v0, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v19, 0, v1, s[4:5]
+; GISEL-NEXT: v_addc_u32_e32 v34, vcc, -1, v10, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v2, v22
+; GISEL-NEXT: v_or_b32_e32 v1, v3, v23
+; GISEL-NEXT: v_addc_u32_e32 v35, vcc, -1, v11, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v0, v36, v0, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v1, v37, v1, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v22, v0, v16, s[6:7]
+; GISEL-NEXT: v_cndmask_b32_e64 v23, v1, v17, s[6:7]
+; GISEL-NEXT: v_mov_b32_e32 v17, 0
+; GISEL-NEXT: v_mov_b32_e32 v0, s8
+; GISEL-NEXT: v_mov_b32_e32 v1, s9
+; GISEL-NEXT: v_mov_b32_e32 v2, s10
+; GISEL-NEXT: v_mov_b32_e32 v3, s11
+; GISEL-NEXT: .LBB0_3: ; %udiv-do-while3
+; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GISEL-NEXT: v_lshrrev_b32_e32 v16, 31, v21
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[20:21], 1
+; GISEL-NEXT: v_lshl_b64 v[36:37], v[22:23], 1
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[18:19], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v22, 31, v23
+; GISEL-NEXT: v_lshrrev_b32_e32 v23, 31, v9
+; GISEL-NEXT: v_add_i32_e32 v28, vcc, -1, v28
+; GISEL-NEXT: v_addc_u32_e32 v29, vcc, -1, v29, vcc
+; GISEL-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GISEL-NEXT: v_or_b32_e32 v20, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v21, v1, v3
+; GISEL-NEXT: v_or_b32_e32 v2, v18, v22
+; GISEL-NEXT: v_or_b32_e32 v3, v36, v23
+; GISEL-NEXT: v_addc_u32_e32 v30, vcc, -1, v30, vcc
+; GISEL-NEXT: v_addc_u32_e32 v31, vcc, -1, v31, vcc
+; GISEL-NEXT: v_or_b32_e32 v8, v8, v16
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v32, v3
+; GISEL-NEXT: v_subb_u32_e32 v0, vcc, v33, v37, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v28, v30
+; GISEL-NEXT: v_or_b32_e32 v1, v29, v31
+; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v34, v2, vcc
+; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v35, v19, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_ashrrev_i32_e32 v0, 31, v16
+; GISEL-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GISEL-NEXT: v_and_b32_e32 v1, v0, v26
+; GISEL-NEXT: v_and_b32_e32 v18, v0, v27
+; GISEL-NEXT: v_and_b32_e32 v16, 1, v0
+; GISEL-NEXT: v_and_b32_e32 v36, v0, v10
+; GISEL-NEXT: v_and_b32_e32 v0, v0, v11
+; GISEL-NEXT: v_sub_i32_e32 v22, vcc, v3, v1
+; GISEL-NEXT: v_subb_u32_e32 v23, vcc, v37, v18, vcc
+; GISEL-NEXT: v_subb_u32_e32 v18, vcc, v2, v36, vcc
+; GISEL-NEXT: v_subb_u32_e32 v19, vcc, v19, v0, vcc
+; GISEL-NEXT: v_mov_b32_e32 v0, v16
+; GISEL-NEXT: v_mov_b32_e32 v1, v17
+; GISEL-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GISEL-NEXT: s_cbranch_execnz .LBB0_3
+; GISEL-NEXT: ; %bb.4: ; %Flow13
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: .LBB0_5: ; %Flow14
+; GISEL-NEXT: s_or_b64 exec, exec, s[14:15]
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[20:21], 1
+; GISEL-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v10, 31, v21
+; GISEL-NEXT: v_or_b32_e32 v8, v8, v10
+; GISEL-NEXT: v_or_b32_e32 v20, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v21, v1, v3
+; GISEL-NEXT: .LBB0_6: ; %Flow16
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: s_mov_b64 s[8:9], 0
+; GISEL-NEXT: v_ashrrev_i32_e32 v18, 31, v7
+; GISEL-NEXT: v_ashrrev_i32_e32 v19, 31, v15
+; GISEL-NEXT: v_mov_b32_e32 v10, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v11, 0
+; GISEL-NEXT: v_xor_b32_e32 v0, v18, v4
+; GISEL-NEXT: v_xor_b32_e32 v1, v18, v5
+; GISEL-NEXT: v_xor_b32_e32 v2, v18, v6
+; GISEL-NEXT: v_xor_b32_e32 v3, v18, v7
+; GISEL-NEXT: v_xor_b32_e32 v4, v19, v12
+; GISEL-NEXT: v_xor_b32_e32 v5, v19, v13
+; GISEL-NEXT: v_xor_b32_e32 v14, v19, v14
+; GISEL-NEXT: v_xor_b32_e32 v15, v19, v15
+; GISEL-NEXT: v_sub_i32_e32 v6, vcc, v0, v18
+; GISEL-NEXT: v_subb_u32_e32 v7, vcc, v1, v18, vcc
+; GISEL-NEXT: v_sub_i32_e64 v22, s[4:5], v4, v19
+; GISEL-NEXT: v_subb_u32_e64 v23, s[4:5], v5, v19, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v12, vcc, v2, v18, vcc
+; GISEL-NEXT: v_subb_u32_e32 v13, vcc, v3, v18, vcc
+; GISEL-NEXT: v_subb_u32_e64 v4, vcc, v14, v19, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v15, v19, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v14, v23
+; GISEL-NEXT: v_ffbh_u32_e32 v15, v22
+; GISEL-NEXT: v_ffbh_u32_e32 v16, v7
+; GISEL-NEXT: v_ffbh_u32_e32 v17, v6
+; GISEL-NEXT: v_or_b32_e32 v0, v22, v4
+; GISEL-NEXT: v_or_b32_e32 v1, v23, v5
+; GISEL-NEXT: v_or_b32_e32 v2, v6, v12
+; GISEL-NEXT: v_or_b32_e32 v3, v7, v13
+; GISEL-NEXT: v_add_i32_e32 v15, vcc, 32, v15
+; GISEL-NEXT: v_ffbh_u32_e32 v26, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v27, v4
+; GISEL-NEXT: v_add_i32_e32 v17, vcc, 32, v17
+; GISEL-NEXT: v_ffbh_u32_e32 v28, v13
+; GISEL-NEXT: v_ffbh_u32_e32 v29, v12
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[2:3]
+; GISEL-NEXT: v_min_u32_e32 v0, v14, v15
+; GISEL-NEXT: v_add_i32_e64 v1, s[6:7], 32, v27
+; GISEL-NEXT: v_min_u32_e32 v2, v16, v17
+; GISEL-NEXT: v_add_i32_e64 v3, s[6:7], 32, v29
+; GISEL-NEXT: v_add_i32_e64 v0, s[6:7], 64, v0
+; GISEL-NEXT: v_min_u32_e32 v1, v26, v1
+; GISEL-NEXT: v_add_i32_e64 v2, s[6:7], 64, v2
+; GISEL-NEXT: v_min_u32_e32 v3, v28, v3
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v14, 0, 1, s[4:5]
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[12:13]
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GISEL-NEXT: v_subb_u32_e64 v1, s[4:5], 0, 0, vcc
+; GISEL-NEXT: v_subb_u32_e64 v2, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[10:11]
+; GISEL-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
+; GISEL-NEXT: v_xor_b32_e32 v10, 0x7f, v0
+; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e64 v16, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v10, v10, v2
+; GISEL-NEXT: v_or_b32_e32 v11, v1, v3
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e32 v15, v16, v15, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v11, v14, v15
+; GISEL-NEXT: v_and_b32_e32 v14, 1, v11
+; GISEL-NEXT: v_or_b32_e32 v10, v11, v10
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; GISEL-NEXT: v_cndmask_b32_e64 v14, v6, 0, vcc
+; GISEL-NEXT: v_and_b32_e32 v16, 1, v10
+; GISEL-NEXT: v_cndmask_b32_e64 v15, v7, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v10, v12, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v11, v13, 0, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB0_12
+; GISEL-NEXT: ; %bb.7: ; %udiv-bb1
+; GISEL-NEXT: v_add_i32_e32 v26, vcc, 1, v0
+; GISEL-NEXT: v_addc_u32_e64 v27, s[4:5], 0, v1, vcc
+; GISEL-NEXT: v_sub_i32_e32 v30, vcc, 0x7f, v0
+; GISEL-NEXT: v_addc_u32_e64 v28, vcc, 0, v2, s[4:5]
+; GISEL-NEXT: v_addc_u32_e32 v29, vcc, 0, v3, vcc
+; GISEL-NEXT: v_subrev_i32_e64 v14, s[4:5], 64, v30
+; GISEL-NEXT: v_sub_i32_e64 v10, s[4:5], 64, v30
+; GISEL-NEXT: v_lshl_b64 v[0:1], v[6:7], v30
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[12:13], v30
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: v_lshr_b64 v[10:11], v[6:7], v10
+; GISEL-NEXT: v_lshl_b64 v[16:17], v[6:7], v14
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v30
+; GISEL-NEXT: v_cndmask_b32_e32 v14, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v15, 0, v1, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v10, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v11, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v30
+; GISEL-NEXT: v_cndmask_b32_e32 v10, v0, v12, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v11, v1, v13, vcc
+; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GISEL-NEXT: v_mov_b32_e32 v0, s8
+; GISEL-NEXT: v_mov_b32_e32 v1, s9
+; GISEL-NEXT: v_mov_b32_e32 v2, s10
+; GISEL-NEXT: v_mov_b32_e32 v3, s11
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GISEL-NEXT: s_xor_b64 s[8:9], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB0_11
+; GISEL-NEXT: ; %bb.8: ; %udiv-preheader
+; GISEL-NEXT: v_subrev_i32_e32 v32, vcc, 64, v26
+; GISEL-NEXT: v_sub_i32_e32 v16, vcc, 64, v26
+; GISEL-NEXT: v_lshr_b64 v[0:1], v[12:13], v26
+; GISEL-NEXT: v_lshr_b64 v[2:3], v[6:7], v26
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_add_i32_e32 v30, vcc, -1, v22
+; GISEL-NEXT: v_addc_u32_e32 v31, vcc, -1, v23, vcc
+; GISEL-NEXT: v_lshl_b64 v[16:17], v[12:13], v16
+; GISEL-NEXT: v_lshr_b64 v[12:13], v[12:13], v32
+; GISEL-NEXT: v_addc_u32_e32 v32, vcc, -1, v4, vcc
+; GISEL-NEXT: v_addc_u32_e32 v33, vcc, -1, v5, vcc
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_or_b32_e32 v2, v2, v16
+; GISEL-NEXT: v_or_b32_e32 v3, v3, v17
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v26
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v12, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v13, v3, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v16, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v17, 0, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v26
+; GISEL-NEXT: v_cndmask_b32_e32 v12, v2, v6, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v13, v3, v7, vcc
+; GISEL-NEXT: v_mov_b32_e32 v7, 0
+; GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GISEL-NEXT: .LBB0_9: ; %udiv-do-while
+; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[12:13], 1
+; GISEL-NEXT: v_lshl_b64 v[16:17], v[16:17], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v6, 31, v13
+; GISEL-NEXT: v_lshrrev_b32_e32 v34, 31, v11
+; GISEL-NEXT: v_lshl_b64 v[12:13], v[14:15], 1
+; GISEL-NEXT: v_lshl_b64 v[10:11], v[10:11], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v14, 31, v15
+; GISEL-NEXT: v_add_i32_e32 v26, vcc, -1, v26
+; GISEL-NEXT: v_addc_u32_e32 v27, vcc, -1, v27, vcc
+; GISEL-NEXT: v_or_b32_e32 v16, v16, v6
+; GISEL-NEXT: v_or_b32_e32 v2, v2, v34
+; GISEL-NEXT: v_or_b32_e32 v10, v10, v14
+; GISEL-NEXT: v_or_b32_e32 v14, v0, v12
+; GISEL-NEXT: v_or_b32_e32 v15, v1, v13
+; GISEL-NEXT: v_addc_u32_e32 v28, vcc, -1, v28, vcc
+; GISEL-NEXT: v_addc_u32_e32 v29, vcc, -1, v29, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v30, v2
+; GISEL-NEXT: v_subb_u32_e32 v0, vcc, v31, v3, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v26, v28
+; GISEL-NEXT: v_or_b32_e32 v1, v27, v29
+; GISEL-NEXT: v_subb_u32_e32 v6, vcc, v32, v16, vcc
+; GISEL-NEXT: v_subb_u32_e32 v6, vcc, v33, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_ashrrev_i32_e32 v0, 31, v6
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v6, 1, v0
+; GISEL-NEXT: v_and_b32_e32 v12, v0, v22
+; GISEL-NEXT: v_and_b32_e32 v13, v0, v23
+; GISEL-NEXT: v_and_b32_e32 v34, v0, v4
+; GISEL-NEXT: v_and_b32_e32 v35, v0, v5
+; GISEL-NEXT: v_mov_b32_e32 v0, v6
+; GISEL-NEXT: v_mov_b32_e32 v1, v7
+; GISEL-NEXT: v_sub_i32_e32 v12, vcc, v2, v12
+; GISEL-NEXT: v_subb_u32_e32 v13, vcc, v3, v13, vcc
+; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v16, v34, vcc
+; GISEL-NEXT: v_subb_u32_e32 v17, vcc, v17, v35, vcc
+; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execnz .LBB0_9
+; GISEL-NEXT: ; %bb.10: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB0_11: ; %Flow11
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[14:15], 1
+; GISEL-NEXT: v_lshl_b64 v[10:11], v[10:11], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v4, 31, v15
+; GISEL-NEXT: v_or_b32_e32 v10, v10, v4
+; GISEL-NEXT: v_or_b32_e32 v14, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v15, v1, v3
+; GISEL-NEXT: .LBB0_12: ; %Flow12
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: v_xor_b32_e32 v3, v25, v24
+; GISEL-NEXT: v_xor_b32_e32 v7, v19, v18
+; GISEL-NEXT: v_xor_b32_e32 v0, v20, v3
+; GISEL-NEXT: v_xor_b32_e32 v1, v21, v3
+; GISEL-NEXT: v_xor_b32_e32 v2, v8, v3
+; GISEL-NEXT: v_xor_b32_e32 v6, v9, v3
+; GISEL-NEXT: v_xor_b32_e32 v4, v14, v7
+; GISEL-NEXT: v_xor_b32_e32 v5, v15, v7
+; GISEL-NEXT: v_xor_b32_e32 v8, v10, v7
+; GISEL-NEXT: v_xor_b32_e32 v9, v11, v7
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v3
+; GISEL-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GISEL-NEXT: v_sub_i32_e64 v4, s[4:5], v4, v7
+; GISEL-NEXT: v_subb_u32_e64 v5, s[4:5], v5, v7, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v2, vcc, v2, v3, vcc
+; GISEL-NEXT: v_subb_u32_e32 v3, vcc, v6, v3, vcc
+; GISEL-NEXT: v_subb_u32_e64 v6, vcc, v8, v7, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v7, vcc, v9, v7, vcc
+; GISEL-NEXT: s_setpc_b64 s[30:31]
%shl = sdiv <2 x i128> %lhs, %rhs
ret <2 x i128> %shl
}
define <2 x i128> @v_udiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
+; SDAG-LABEL: v_udiv_v2i128_vv:
+; SDAG: ; %bb.0: ; %_udiv-special-cases_udiv-special-cases
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_or_b32_e32 v17, v9, v11
+; SDAG-NEXT: v_or_b32_e32 v16, v8, v10
+; SDAG-NEXT: v_or_b32_e32 v19, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v18, v0, v2
+; SDAG-NEXT: v_ffbh_u32_e32 v20, v10
+; SDAG-NEXT: v_ffbh_u32_e32 v21, v11
+; SDAG-NEXT: v_ffbh_u32_e32 v22, v8
+; SDAG-NEXT: v_ffbh_u32_e32 v23, v9
+; SDAG-NEXT: v_ffbh_u32_e32 v24, v2
+; SDAG-NEXT: v_ffbh_u32_e32 v25, v3
+; SDAG-NEXT: v_ffbh_u32_e32 v26, v0
+; SDAG-NEXT: v_ffbh_u32_e32 v27, v1
+; SDAG-NEXT: v_mov_b32_e32 v28, 0
+; SDAG-NEXT: s_mov_b64 s[8:9], 0x7f
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[18:19]
+; SDAG-NEXT: v_add_i32_e64 v16, s[6:7], 32, v20
+; SDAG-NEXT: v_add_i32_e64 v17, s[6:7], 32, v22
+; SDAG-NEXT: v_add_i32_e64 v18, s[6:7], 32, v24
+; SDAG-NEXT: v_add_i32_e64 v19, s[6:7], 32, v26
+; SDAG-NEXT: s_or_b64 s[6:7], vcc, s[4:5]
+; SDAG-NEXT: v_min_u32_e32 v16, v16, v21
+; SDAG-NEXT: v_min_u32_e32 v17, v17, v23
+; SDAG-NEXT: v_min_u32_e32 v18, v18, v25
+; SDAG-NEXT: v_min_u32_e32 v19, v19, v27
+; SDAG-NEXT: v_add_i32_e32 v17, vcc, 64, v17
+; SDAG-NEXT: v_addc_u32_e64 v20, s[4:5], 0, 0, vcc
+; SDAG-NEXT: v_add_i32_e32 v19, vcc, 64, v19
+; SDAG-NEXT: v_addc_u32_e64 v21, s[4:5], 0, 0, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, v20, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v16, v17, v16, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v21, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v18, v19, v18, vcc
+; SDAG-NEXT: v_sub_i32_e32 v23, vcc, v16, v18
+; SDAG-NEXT: v_subb_u32_e32 v24, vcc, v20, v17, vcc
+; SDAG-NEXT: v_xor_b32_e32 v16, 0x7f, v23
+; SDAG-NEXT: v_subbrev_u32_e32 v25, vcc, 0, v28, vcc
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[8:9], v[23:24]
+; SDAG-NEXT: v_cndmask_b32_e64 v18, 0, 1, s[4:5]
+; SDAG-NEXT: v_subbrev_u32_e32 v26, vcc, 0, v28, vcc
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v25
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[25:26]
+; SDAG-NEXT: v_cndmask_b32_e64 v19, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v17, v24, v26
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[25:26]
+; SDAG-NEXT: v_cndmask_b32_e32 v18, v19, v18, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[16:17]
+; SDAG-NEXT: v_and_b32_e32 v16, 1, v18
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v16
+; SDAG-NEXT: s_or_b64 s[4:5], s[6:7], s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v3, 0, s[4:5]
+; SDAG-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v2, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v18, v1, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v19, v0, 0, s[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB1_6
+; SDAG-NEXT: ; %bb.1: ; %udiv-bb15
+; SDAG-NEXT: v_add_i32_e32 v18, vcc, 1, v23
+; SDAG-NEXT: v_sub_i32_e64 v16, s[4:5], 63, v23
+; SDAG-NEXT: v_mov_b32_e32 v21, 0
+; SDAG-NEXT: v_mov_b32_e32 v22, 0
+; SDAG-NEXT: v_addc_u32_e32 v27, vcc, 0, v24, vcc
+; SDAG-NEXT: v_lshl_b64 v[16:17], v[0:1], v16
+; SDAG-NEXT: v_addc_u32_e32 v28, vcc, 0, v25, vcc
+; SDAG-NEXT: v_addc_u32_e32 v29, vcc, 0, v26, vcc
+; SDAG-NEXT: v_or_b32_e32 v19, v18, v28
+; SDAG-NEXT: v_sub_i32_e32 v30, vcc, 0x7f, v23
+; SDAG-NEXT: v_or_b32_e32 v20, v27, v29
+; SDAG-NEXT: v_lshl_b64 v[23:24], v[2:3], v30
+; SDAG-NEXT: v_sub_i32_e32 v31, vcc, 64, v30
+; SDAG-NEXT: v_lshl_b64 v[25:26], v[0:1], v30
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[19:20]
+; SDAG-NEXT: v_lshr_b64 v[19:20], v[0:1], v31
+; SDAG-NEXT: v_or_b32_e32 v20, v24, v20
+; SDAG-NEXT: v_or_b32_e32 v19, v23, v19
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v30
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v17, v20, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v16, v19, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v24, 0, v26, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v23, 0, v25, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v30
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v17, v3, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v16, v2, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: v_mov_b32_e32 v20, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB1_5
+; SDAG-NEXT: ; %bb.2: ; %udiv-preheader4
+; SDAG-NEXT: v_lshr_b64 v[21:22], v[0:1], v18
+; SDAG-NEXT: v_sub_i32_e32 v31, vcc, 64, v18
+; SDAG-NEXT: v_subrev_i32_e32 v36, vcc, 64, v18
+; SDAG-NEXT: v_lshr_b64 v[32:33], v[2:3], v18
+; SDAG-NEXT: v_add_i32_e32 v30, vcc, -1, v8
+; SDAG-NEXT: s_mov_b64 s[12:13], 0
+; SDAG-NEXT: v_mov_b32_e32 v25, 0
+; SDAG-NEXT: v_mov_b32_e32 v26, 0
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: v_mov_b32_e32 v20, 0
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v18
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v18
+; SDAG-NEXT: v_lshl_b64 v[34:35], v[2:3], v31
+; SDAG-NEXT: v_lshr_b64 v[36:37], v[2:3], v36
+; SDAG-NEXT: v_addc_u32_e32 v31, vcc, -1, v9, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v3, 0, v33, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, v32, s[4:5]
+; SDAG-NEXT: v_or_b32_e32 v22, v22, v35
+; SDAG-NEXT: v_or_b32_e32 v21, v21, v34
+; SDAG-NEXT: v_addc_u32_e32 v32, vcc, -1, v10, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v22, v37, v22, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v21, v36, v21, s[4:5]
+; SDAG-NEXT: v_addc_u32_e32 v33, vcc, -1, v11, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v1, v22, v1, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v0, v21, v0, s[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v22, 0
+; SDAG-NEXT: .LBB1_3: ; %udiv-do-while3
+; SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; SDAG-NEXT: v_lshrrev_b32_e32 v21, 31, v24
+; SDAG-NEXT: v_lshl_b64 v[23:24], v[23:24], 1
+; SDAG-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v34, 31, v1
+; SDAG-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v35, 31, v17
+; SDAG-NEXT: v_lshl_b64 v[16:17], v[16:17], 1
+; SDAG-NEXT: v_or_b32_e32 v24, v26, v24
+; SDAG-NEXT: v_or_b32_e32 v23, v25, v23
+; SDAG-NEXT: v_or_b32_e32 v2, v2, v34
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v35
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v21
+; SDAG-NEXT: v_sub_i32_e32 v21, vcc, v30, v0
+; SDAG-NEXT: v_subb_u32_e32 v21, vcc, v31, v1, vcc
+; SDAG-NEXT: v_subb_u32_e32 v21, vcc, v32, v2, vcc
+; SDAG-NEXT: v_subb_u32_e32 v21, vcc, v33, v3, vcc
+; SDAG-NEXT: v_ashrrev_i32_e32 v21, 31, v21
+; SDAG-NEXT: v_and_b32_e32 v25, v21, v8
+; SDAG-NEXT: v_and_b32_e32 v26, v21, v9
+; SDAG-NEXT: v_and_b32_e32 v34, v21, v10
+; SDAG-NEXT: v_and_b32_e32 v35, v21, v11
+; SDAG-NEXT: v_and_b32_e32 v21, 1, v21
+; SDAG-NEXT: v_sub_i32_e32 v0, vcc, v0, v25
+; SDAG-NEXT: v_subb_u32_e32 v1, vcc, v1, v26, vcc
+; SDAG-NEXT: v_subb_u32_e32 v2, vcc, v2, v34, vcc
+; SDAG-NEXT: v_subb_u32_e32 v3, vcc, v3, v35, vcc
+; SDAG-NEXT: v_add_i32_e32 v18, vcc, -1, v18
+; SDAG-NEXT: v_addc_u32_e32 v27, vcc, -1, v27, vcc
+; SDAG-NEXT: v_addc_u32_e32 v28, vcc, -1, v28, vcc
+; SDAG-NEXT: v_addc_u32_e32 v29, vcc, -1, v29, vcc
+; SDAG-NEXT: v_or_b32_e32 v25, v18, v28
+; SDAG-NEXT: v_or_b32_e32 v26, v27, v29
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[25:26]
+; SDAG-NEXT: v_or_b32_e32 v17, v20, v17
+; SDAG-NEXT: s_or_b64 s[12:13], vcc, s[12:13]
+; SDAG-NEXT: v_or_b32_e32 v16, v19, v16
+; SDAG-NEXT: v_mov_b32_e32 v26, v22
+; SDAG-NEXT: v_mov_b32_e32 v25, v21
+; SDAG-NEXT: s_andn2_b64 exec, exec, s[12:13]
+; SDAG-NEXT: s_cbranch_execnz .LBB1_3
+; SDAG-NEXT: ; %bb.4: ; %Flow13
+; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
+; SDAG-NEXT: .LBB1_5: ; %Flow14
+; SDAG-NEXT: s_or_b64 exec, exec, s[10:11]
+; SDAG-NEXT: v_lshl_b64 v[0:1], v[16:17], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v8, 31, v24
+; SDAG-NEXT: v_lshl_b64 v[2:3], v[23:24], 1
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v8
+; SDAG-NEXT: v_or_b32_e32 v16, v20, v1
+; SDAG-NEXT: v_or_b32_e32 v18, v22, v3
+; SDAG-NEXT: v_or_b32_e32 v17, v19, v0
+; SDAG-NEXT: v_or_b32_e32 v19, v21, v2
+; SDAG-NEXT: .LBB1_6: ; %Flow16
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_or_b32_e32 v1, v13, v15
+; SDAG-NEXT: v_or_b32_e32 v0, v12, v14
+; SDAG-NEXT: v_or_b32_e32 v3, v5, v7
+; SDAG-NEXT: v_or_b32_e32 v2, v4, v6
+; SDAG-NEXT: v_ffbh_u32_e32 v8, v14
+; SDAG-NEXT: v_ffbh_u32_e32 v9, v15
+; SDAG-NEXT: v_ffbh_u32_e32 v10, v12
+; SDAG-NEXT: v_ffbh_u32_e32 v11, v13
+; SDAG-NEXT: v_ffbh_u32_e32 v20, v6
+; SDAG-NEXT: v_ffbh_u32_e32 v21, v7
+; SDAG-NEXT: v_ffbh_u32_e32 v22, v4
+; SDAG-NEXT: v_ffbh_u32_e32 v23, v5
+; SDAG-NEXT: v_mov_b32_e32 v24, 0
+; SDAG-NEXT: s_mov_b64 s[8:9], 0x7f
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[2:3]
+; SDAG-NEXT: v_add_i32_e64 v0, s[6:7], 32, v8
+; SDAG-NEXT: v_add_i32_e64 v1, s[6:7], 32, v10
+; SDAG-NEXT: v_add_i32_e64 v2, s[6:7], 32, v20
+; SDAG-NEXT: v_add_i32_e64 v3, s[6:7], 32, v22
+; SDAG-NEXT: s_or_b64 s[6:7], vcc, s[4:5]
+; SDAG-NEXT: v_min_u32_e32 v0, v0, v9
+; SDAG-NEXT: v_min_u32_e32 v1, v1, v11
+; SDAG-NEXT: v_min_u32_e32 v2, v2, v21
+; SDAG-NEXT: v_min_u32_e32 v3, v3, v23
+; SDAG-NEXT: v_add_i32_e32 v1, vcc, 64, v1
+; SDAG-NEXT: v_addc_u32_e64 v8, s[4:5], 0, 0, vcc
+; SDAG-NEXT: v_add_i32_e32 v3, vcc, 64, v3
+; SDAG-NEXT: v_addc_u32_e64 v9, s[4:5], 0, 0, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[14:15]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v8, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v1, v9, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; SDAG-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
+; SDAG-NEXT: v_subb_u32_e32 v1, vcc, v8, v1, vcc
+; SDAG-NEXT: v_xor_b32_e32 v8, 0x7f, v0
+; SDAG-NEXT: v_subbrev_u32_e32 v2, vcc, 0, v24, vcc
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[8:9], v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v10, 0, 1, s[4:5]
+; SDAG-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v24, vcc
+; SDAG-NEXT: v_or_b32_e32 v8, v8, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; SDAG-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v9, v1, v3
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; SDAG-NEXT: v_cndmask_b32_e32 v10, v11, v10, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; SDAG-NEXT: v_and_b32_e32 v8, 1, v10
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v8
+; SDAG-NEXT: s_or_b64 s[4:5], s[6:7], s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v7, 0, s[4:5]
+; SDAG-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; SDAG-NEXT: v_cndmask_b32_e64 v9, v6, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v10, v5, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v11, v4, 0, s[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB1_12
+; SDAG-NEXT: ; %bb.7: ; %udiv-bb1
+; SDAG-NEXT: v_add_i32_e32 v8, vcc, 1, v0
+; SDAG-NEXT: v_sub_i32_e64 v9, s[4:5], 63, v0
+; SDAG-NEXT: v_mov_b32_e32 v20, 0
+; SDAG-NEXT: v_mov_b32_e32 v21, 0
+; SDAG-NEXT: v_addc_u32_e32 v11, vcc, 0, v1, vcc
+; SDAG-NEXT: v_lshl_b64 v[9:10], v[4:5], v9
+; SDAG-NEXT: v_addc_u32_e32 v24, vcc, 0, v2, vcc
+; SDAG-NEXT: v_addc_u32_e32 v25, vcc, 0, v3, vcc
+; SDAG-NEXT: v_or_b32_e32 v1, v8, v24
+; SDAG-NEXT: v_sub_i32_e32 v3, vcc, 0x7f, v0
+; SDAG-NEXT: v_or_b32_e32 v2, v11, v25
+; SDAG-NEXT: v_lshl_b64 v[22:23], v[6:7], v3
+; SDAG-NEXT: v_sub_i32_e32 v0, vcc, 64, v3
+; SDAG-NEXT: v_lshl_b64 v[26:27], v[4:5], v3
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[1:2]
+; SDAG-NEXT: v_lshr_b64 v[0:1], v[4:5], v0
+; SDAG-NEXT: v_or_b32_e32 v1, v23, v1
+; SDAG-NEXT: v_or_b32_e32 v0, v22, v0
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v3
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v10, v1, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v9, v9, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v1, 0, v27, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, v26, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v3
+; SDAG-NEXT: v_cndmask_b32_e64 v3, v2, v7, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v9, v6, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v9, 0
+; SDAG-NEXT: v_mov_b32_e32 v10, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB1_11
+; SDAG-NEXT: ; %bb.8: ; %udiv-preheader
+; SDAG-NEXT: v_lshr_b64 v[20:21], v[4:5], v8
+; SDAG-NEXT: v_sub_i32_e32 v27, vcc, 64, v8
+; SDAG-NEXT: v_subrev_i32_e32 v28, vcc, 64, v8
+; SDAG-NEXT: v_lshr_b64 v[29:30], v[6:7], v8
+; SDAG-NEXT: v_add_i32_e32 v26, vcc, -1, v12
+; SDAG-NEXT: s_mov_b64 s[10:11], 0
+; SDAG-NEXT: v_mov_b32_e32 v22, 0
+; SDAG-NEXT: v_mov_b32_e32 v23, 0
+; SDAG-NEXT: v_mov_b32_e32 v9, 0
+; SDAG-NEXT: v_mov_b32_e32 v10, 0
+; SDAG-NEXT: v_lshl_b64 v[31:32], v[6:7], v27
+; SDAG-NEXT: v_lshr_b64 v[6:7], v[6:7], v28
+; SDAG-NEXT: v_addc_u32_e32 v27, vcc, -1, v13, vcc
+; SDAG-NEXT: v_or_b32_e32 v21, v21, v32
+; SDAG-NEXT: v_or_b32_e32 v20, v20, v31
+; SDAG-NEXT: v_addc_u32_e32 v28, vcc, -1, v14, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v8
+; SDAG-NEXT: v_cndmask_b32_e64 v21, v7, v21, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, v6, v20, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v7, 0, v30, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v6, 0, v29, s[4:5]
+; SDAG-NEXT: v_addc_u32_e32 v29, vcc, -1, v15, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8
+; SDAG-NEXT: v_cndmask_b32_e32 v5, v21, v5, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v4, v20, v4, vcc
+; SDAG-NEXT: v_mov_b32_e32 v21, 0
+; SDAG-NEXT: .LBB1_9: ; %udiv-do-while
+; SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; SDAG-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v20, 31, v5
+; SDAG-NEXT: v_lshl_b64 v[4:5], v[4:5], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v30, 31, v3
+; SDAG-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v31, 31, v1
+; SDAG-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
+; SDAG-NEXT: v_or_b32_e32 v6, v6, v20
+; SDAG-NEXT: v_or_b32_e32 v4, v4, v30
+; SDAG-NEXT: v_or_b32_e32 v2, v2, v31
+; SDAG-NEXT: v_or_b32_e32 v3, v10, v3
+; SDAG-NEXT: v_or_b32_e32 v1, v23, v1
+; SDAG-NEXT: v_or_b32_e32 v2, v9, v2
+; SDAG-NEXT: v_sub_i32_e32 v20, vcc, v26, v4
+; SDAG-NEXT: v_subb_u32_e32 v20, vcc, v27, v5, vcc
+; SDAG-NEXT: v_subb_u32_e32 v20, vcc, v28, v6, vcc
+; SDAG-NEXT: v_subb_u32_e32 v20, vcc, v29, v7, vcc
+; SDAG-NEXT: v_ashrrev_i32_e32 v23, 31, v20
+; SDAG-NEXT: v_and_b32_e32 v20, 1, v23
+; SDAG-NEXT: v_and_b32_e32 v30, v23, v15
+; SDAG-NEXT: v_and_b32_e32 v31, v23, v14
+; SDAG-NEXT: v_and_b32_e32 v32, v23, v13
+; SDAG-NEXT: v_and_b32_e32 v23, v23, v12
+; SDAG-NEXT: v_sub_i32_e32 v4, vcc, v4, v23
+; SDAG-NEXT: v_subb_u32_e32 v5, vcc, v5, v32, vcc
+; SDAG-NEXT: v_subb_u32_e32 v6, vcc, v6, v31, vcc
+; SDAG-NEXT: v_subb_u32_e32 v7, vcc, v7, v30, vcc
+; SDAG-NEXT: v_add_i32_e32 v8, vcc, -1, v8
+; SDAG-NEXT: v_addc_u32_e32 v11, vcc, -1, v11, vcc
+; SDAG-NEXT: v_addc_u32_e32 v24, vcc, -1, v24, vcc
+; SDAG-NEXT: v_addc_u32_e32 v25, vcc, -1, v25, vcc
+; SDAG-NEXT: v_or_b32_e32 v31, v11, v25
+; SDAG-NEXT: v_or_b32_e32 v30, v8, v24
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[30:31]
+; SDAG-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; SDAG-NEXT: v_or_b32_e32 v0, v22, v0
+; SDAG-NEXT: v_mov_b32_e32 v23, v21
+; SDAG-NEXT: v_mov_b32_e32 v22, v20
+; SDAG-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; SDAG-NEXT: s_cbranch_execnz .LBB1_9
+; SDAG-NEXT: ; %bb.10: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[10:11]
+; SDAG-NEXT: .LBB1_11: ; %Flow11
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v4, 31, v1
+; SDAG-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
+; SDAG-NEXT: v_or_b32_e32 v2, v2, v4
+; SDAG-NEXT: v_or_b32_e32 v8, v10, v3
+; SDAG-NEXT: v_or_b32_e32 v10, v21, v1
+; SDAG-NEXT: v_or_b32_e32 v9, v9, v2
+; SDAG-NEXT: v_or_b32_e32 v11, v20, v0
+; SDAG-NEXT: .LBB1_12: ; %Flow12
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v0, v19
+; SDAG-NEXT: v_mov_b32_e32 v1, v18
+; SDAG-NEXT: v_mov_b32_e32 v2, v17
+; SDAG-NEXT: v_mov_b32_e32 v3, v16
+; SDAG-NEXT: v_mov_b32_e32 v4, v11
+; SDAG-NEXT: v_mov_b32_e32 v5, v10
+; SDAG-NEXT: v_mov_b32_e32 v6, v9
+; SDAG-NEXT: v_mov_b32_e32 v7, v8
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: v_udiv_v2i128_vv:
+; GISEL: ; %bb.0: ; %_udiv-special-cases_udiv-special-cases
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_mov_b32_e32 v16, v2
+; GISEL-NEXT: v_mov_b32_e32 v17, v3
+; GISEL-NEXT: s_mov_b64 s[8:9], 0
+; GISEL-NEXT: v_or_b32_e32 v2, v8, v10
+; GISEL-NEXT: v_or_b32_e32 v3, v9, v11
+; GISEL-NEXT: v_or_b32_e32 v18, v0, v16
+; GISEL-NEXT: v_or_b32_e32 v19, v1, v17
+; GISEL-NEXT: v_ffbh_u32_e32 v20, v9
+; GISEL-NEXT: v_ffbh_u32_e32 v21, v8
+; GISEL-NEXT: v_ffbh_u32_e32 v22, v11
+; GISEL-NEXT: v_ffbh_u32_e32 v23, v10
+; GISEL-NEXT: v_ffbh_u32_e32 v26, v1
+; GISEL-NEXT: v_ffbh_u32_e32 v27, v0
+; GISEL-NEXT: v_ffbh_u32_e32 v28, v17
+; GISEL-NEXT: v_ffbh_u32_e32 v29, v16
+; GISEL-NEXT: v_mov_b32_e32 v24, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v25, 0
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[18:19]
+; GISEL-NEXT: v_add_i32_e64 v2, s[6:7], 32, v21
+; GISEL-NEXT: v_add_i32_e64 v3, s[6:7], 32, v23
+; GISEL-NEXT: v_add_i32_e64 v18, s[6:7], 32, v27
+; GISEL-NEXT: v_add_i32_e64 v19, s[6:7], 32, v29
+; GISEL-NEXT: v_min_u32_e32 v2, v20, v2
+; GISEL-NEXT: v_min_u32_e32 v3, v22, v3
+; GISEL-NEXT: v_min_u32_e32 v18, v26, v18
+; GISEL-NEXT: v_min_u32_e32 v19, v28, v19
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v26, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e32 v2, vcc, 64, v2
+; GISEL-NEXT: v_add_i32_e32 v18, vcc, 64, v18
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v19, v18, vcc
+; GISEL-NEXT: v_sub_i32_e32 v20, vcc, v2, v3
+; GISEL-NEXT: v_subb_u32_e64 v21, s[4:5], 0, 0, vcc
+; GISEL-NEXT: v_subb_u32_e64 v22, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e64 v23, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[20:21], v[24:25]
+; GISEL-NEXT: v_cndmask_b32_e64 v18, 0, 1, vcc
+; GISEL-NEXT: v_xor_b32_e32 v2, 0x7f, v20
+; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[22:23]
+; GISEL-NEXT: v_cndmask_b32_e64 v19, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v2, v2, v22
+; GISEL-NEXT: v_or_b32_e32 v3, v21, v23
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[22:23]
+; GISEL-NEXT: v_cndmask_b32_e32 v18, v19, v18, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v3, v26, v18
+; GISEL-NEXT: v_and_b32_e32 v18, 1, v3
+; GISEL-NEXT: v_or_b32_e32 v2, v3, v2
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GISEL-NEXT: v_cndmask_b32_e64 v18, v0, 0, vcc
+; GISEL-NEXT: v_and_b32_e32 v24, 1, v2
+; GISEL-NEXT: v_cndmask_b32_e64 v19, v1, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v2, v16, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v3, v17, 0, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v24
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB1_6
+; GISEL-NEXT: ; %bb.1: ; %udiv-bb15
+; GISEL-NEXT: v_add_i32_e32 v26, vcc, 1, v20
+; GISEL-NEXT: v_addc_u32_e64 v27, s[4:5], 0, v21, vcc
+; GISEL-NEXT: v_sub_i32_e32 v30, vcc, 0x7f, v20
+; GISEL-NEXT: v_addc_u32_e64 v28, vcc, 0, v22, s[4:5]
+; GISEL-NEXT: v_addc_u32_e32 v29, vcc, 0, v23, vcc
+; GISEL-NEXT: v_subrev_i32_e64 v22, s[4:5], 64, v30
+; GISEL-NEXT: v_sub_i32_e64 v20, s[4:5], 64, v30
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[0:1], v30
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[16:17], v30
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: v_lshr_b64 v[20:21], v[0:1], v20
+; GISEL-NEXT: v_lshl_b64 v[24:25], v[0:1], v22
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v30
+; GISEL-NEXT: v_cndmask_b32_e32 v22, 0, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v23, 0, v3, vcc
+; GISEL-NEXT: v_or_b32_e32 v2, v20, v18
+; GISEL-NEXT: v_or_b32_e32 v3, v21, v19
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v24, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v25, v3, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v30
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc
+; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GISEL-NEXT: v_mov_b32_e32 v21, s11
+; GISEL-NEXT: v_mov_b32_e32 v20, s10
+; GISEL-NEXT: v_mov_b32_e32 v19, s9
+; GISEL-NEXT: v_mov_b32_e32 v18, s8
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GISEL-NEXT: s_xor_b64 s[8:9], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB1_5
+; GISEL-NEXT: ; %bb.2: ; %udiv-preheader4
+; GISEL-NEXT: v_subrev_i32_e32 v32, vcc, 64, v26
+; GISEL-NEXT: v_sub_i32_e32 v24, vcc, 64, v26
+; GISEL-NEXT: v_lshr_b64 v[18:19], v[16:17], v26
+; GISEL-NEXT: v_lshr_b64 v[20:21], v[0:1], v26
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_add_i32_e32 v30, vcc, -1, v8
+; GISEL-NEXT: v_addc_u32_e32 v31, vcc, -1, v9, vcc
+; GISEL-NEXT: v_lshl_b64 v[24:25], v[16:17], v24
+; GISEL-NEXT: v_lshr_b64 v[16:17], v[16:17], v32
+; GISEL-NEXT: v_addc_u32_e32 v32, vcc, -1, v10, vcc
+; GISEL-NEXT: v_addc_u32_e32 v33, vcc, -1, v11, vcc
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_or_b32_e32 v20, v20, v24
+; GISEL-NEXT: v_or_b32_e32 v21, v21, v25
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v26
+; GISEL-NEXT: v_cndmask_b32_e32 v20, v16, v20, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v21, v17, v21, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v16, 0, v18, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v17, 0, v19, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v26
+; GISEL-NEXT: v_cndmask_b32_e32 v24, v20, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v25, v21, v1, vcc
+; GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-NEXT: v_mov_b32_e32 v21, s7
+; GISEL-NEXT: v_mov_b32_e32 v20, s6
+; GISEL-NEXT: v_mov_b32_e32 v19, s5
+; GISEL-NEXT: v_mov_b32_e32 v18, s4
+; GISEL-NEXT: .LBB1_3: ; %udiv-do-while3
+; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GISEL-NEXT: v_lshrrev_b32_e32 v34, 31, v23
+; GISEL-NEXT: v_lshl_b64 v[20:21], v[22:23], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v0, 31, v25
+; GISEL-NEXT: v_lshl_b64 v[24:25], v[24:25], 1
+; GISEL-NEXT: v_lshl_b64 v[16:17], v[16:17], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v35, 31, v3
+; GISEL-NEXT: v_add_i32_e32 v26, vcc, -1, v26
+; GISEL-NEXT: v_addc_u32_e32 v27, vcc, -1, v27, vcc
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
+; GISEL-NEXT: v_or_b32_e32 v22, v18, v20
+; GISEL-NEXT: v_or_b32_e32 v23, v19, v21
+; GISEL-NEXT: v_or_b32_e32 v16, v16, v0
+; GISEL-NEXT: v_or_b32_e32 v20, v24, v35
+; GISEL-NEXT: v_addc_u32_e32 v28, vcc, -1, v28, vcc
+; GISEL-NEXT: v_addc_u32_e32 v29, vcc, -1, v29, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v30, v20
+; GISEL-NEXT: v_subb_u32_e32 v0, vcc, v31, v25, vcc
+; GISEL-NEXT: v_or_b32_e32 v18, v26, v28
+; GISEL-NEXT: v_or_b32_e32 v19, v27, v29
+; GISEL-NEXT: v_subb_u32_e32 v0, vcc, v32, v16, vcc
+; GISEL-NEXT: v_subb_u32_e32 v0, vcc, v33, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
+; GISEL-NEXT: v_ashrrev_i32_e32 v0, 31, v0
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v18, v0, v8
+; GISEL-NEXT: v_and_b32_e32 v19, v0, v9
+; GISEL-NEXT: v_and_b32_e32 v21, v0, v10
+; GISEL-NEXT: v_and_b32_e32 v35, v0, v11
+; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-NEXT: v_sub_i32_e32 v24, vcc, v20, v18
+; GISEL-NEXT: v_subb_u32_e32 v25, vcc, v25, v19, vcc
+; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v16, v21, vcc
+; GISEL-NEXT: v_subb_u32_e32 v17, vcc, v17, v35, vcc
+; GISEL-NEXT: v_or_b32_e32 v2, v2, v34
+; GISEL-NEXT: v_mov_b32_e32 v19, v1
+; GISEL-NEXT: v_mov_b32_e32 v18, v0
+; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execnz .LBB1_3
+; GISEL-NEXT: ; %bb.4: ; %Flow13
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB1_5: ; %Flow14
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_lshl_b64 v[0:1], v[22:23], 1
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v8, 31, v23
+; GISEL-NEXT: v_or_b32_e32 v2, v2, v8
+; GISEL-NEXT: v_or_b32_e32 v18, v18, v0
+; GISEL-NEXT: v_or_b32_e32 v19, v19, v1
+; GISEL-NEXT: .LBB1_6: ; %Flow16
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: s_mov_b64 s[8:9], 0
+; GISEL-NEXT: v_or_b32_e32 v0, v12, v14
+; GISEL-NEXT: v_or_b32_e32 v1, v13, v15
+; GISEL-NEXT: v_or_b32_e32 v8, v4, v6
+; GISEL-NEXT: v_or_b32_e32 v9, v5, v7
+; GISEL-NEXT: v_ffbh_u32_e32 v16, v13
+; GISEL-NEXT: v_ffbh_u32_e32 v17, v12
+; GISEL-NEXT: v_ffbh_u32_e32 v20, v15
+; GISEL-NEXT: v_ffbh_u32_e32 v21, v14
+; GISEL-NEXT: v_ffbh_u32_e32 v22, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v23, v4
+; GISEL-NEXT: v_ffbh_u32_e32 v24, v7
+; GISEL-NEXT: v_ffbh_u32_e32 v25, v6
+; GISEL-NEXT: v_mov_b32_e32 v10, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v11, 0
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[8:9]
+; GISEL-NEXT: v_add_i32_e64 v0, s[6:7], 32, v17
+; GISEL-NEXT: v_add_i32_e64 v1, s[6:7], 32, v21
+; GISEL-NEXT: v_add_i32_e64 v8, s[6:7], 32, v23
+; GISEL-NEXT: v_add_i32_e64 v9, s[6:7], 32, v25
+; GISEL-NEXT: v_min_u32_e32 v0, v16, v0
+; GISEL-NEXT: v_min_u32_e32 v1, v20, v1
+; GISEL-NEXT: v_min_u32_e32 v8, v22, v8
+; GISEL-NEXT: v_min_u32_e32 v9, v24, v9
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v20, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, 64, v0
+; GISEL-NEXT: v_add_i32_e32 v8, vcc, 64, v8
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[14:15]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v9, v8, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GISEL-NEXT: v_subb_u32_e64 v1, s[4:5], 0, 0, vcc
+; GISEL-NEXT: v_subb_u32_e64 v16, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e64 v17, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[10:11]
+; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
+; GISEL-NEXT: v_xor_b32_e32 v8, 0x7f, v0
+; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[16:17]
+; GISEL-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v8, v8, v16
+; GISEL-NEXT: v_or_b32_e32 v9, v1, v17
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; GISEL-NEXT: v_cndmask_b32_e32 v10, v11, v10, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v9, v20, v10
+; GISEL-NEXT: v_and_b32_e32 v10, 1, v9
+; GISEL-NEXT: v_or_b32_e32 v8, v9, v8
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GISEL-NEXT: v_cndmask_b32_e64 v10, v4, 0, vcc
+; GISEL-NEXT: v_and_b32_e32 v20, 1, v8
+; GISEL-NEXT: v_cndmask_b32_e64 v11, v5, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v8, v6, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v9, v7, 0, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v20
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB1_12
+; GISEL-NEXT: ; %bb.7: ; %udiv-bb1
+; GISEL-NEXT: v_add_i32_e32 v8, vcc, 1, v0
+; GISEL-NEXT: v_addc_u32_e64 v11, s[4:5], 0, v1, vcc
+; GISEL-NEXT: v_sub_i32_e32 v26, vcc, 0x7f, v0
+; GISEL-NEXT: v_addc_u32_e64 v24, vcc, 0, v16, s[4:5]
+; GISEL-NEXT: v_addc_u32_e32 v25, vcc, 0, v17, vcc
+; GISEL-NEXT: v_subrev_i32_e64 v9, s[4:5], 64, v26
+; GISEL-NEXT: v_sub_i32_e64 v10, s[4:5], 64, v26
+; GISEL-NEXT: v_lshl_b64 v[0:1], v[4:5], v26
+; GISEL-NEXT: v_lshl_b64 v[16:17], v[6:7], v26
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: v_lshr_b64 v[20:21], v[4:5], v10
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[4:5], v9
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v26
+; GISEL-NEXT: v_cndmask_b32_e32 v9, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v10, 0, v1, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v20, v16
+; GISEL-NEXT: v_or_b32_e32 v1, v21, v17
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v22, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v23, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v26
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc
+; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GISEL-NEXT: v_mov_b32_e32 v23, s11
+; GISEL-NEXT: v_mov_b32_e32 v22, s10
+; GISEL-NEXT: v_mov_b32_e32 v21, s9
+; GISEL-NEXT: v_mov_b32_e32 v20, s8
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GISEL-NEXT: s_xor_b64 s[8:9], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB1_11
+; GISEL-NEXT: ; %bb.8: ; %udiv-preheader
+; GISEL-NEXT: v_subrev_i32_e32 v28, vcc, 64, v8
+; GISEL-NEXT: v_sub_i32_e32 v22, vcc, 64, v8
+; GISEL-NEXT: v_lshr_b64 v[16:17], v[6:7], v8
+; GISEL-NEXT: v_lshr_b64 v[20:21], v[4:5], v8
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_add_i32_e32 v26, vcc, -1, v12
+; GISEL-NEXT: v_addc_u32_e32 v27, vcc, -1, v13, vcc
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[6:7], v22
+; GISEL-NEXT: v_lshr_b64 v[6:7], v[6:7], v28
+; GISEL-NEXT: v_addc_u32_e32 v28, vcc, -1, v14, vcc
+; GISEL-NEXT: v_addc_u32_e32 v29, vcc, -1, v15, vcc
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_or_b32_e32 v20, v20, v22
+; GISEL-NEXT: v_or_b32_e32 v21, v21, v23
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v8
+; GISEL-NEXT: v_cndmask_b32_e32 v6, v6, v20, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v7, v7, v21, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v16, 0, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v17, 0, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8
+; GISEL-NEXT: v_cndmask_b32_e32 v6, v6, v4, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v7, v7, v5, vcc
+; GISEL-NEXT: v_mov_b32_e32 v5, 0
+; GISEL-NEXT: v_mov_b32_e32 v23, s7
+; GISEL-NEXT: v_mov_b32_e32 v22, s6
+; GISEL-NEXT: v_mov_b32_e32 v21, s5
+; GISEL-NEXT: v_mov_b32_e32 v20, s4
+; GISEL-NEXT: .LBB1_9: ; %udiv-do-while
+; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[6:7], 1
+; GISEL-NEXT: v_lshl_b64 v[16:17], v[16:17], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v4, 31, v7
+; GISEL-NEXT: v_lshrrev_b32_e32 v30, 31, v1
+; GISEL-NEXT: v_lshl_b64 v[6:7], v[9:10], 1
+; GISEL-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v9, 31, v10
+; GISEL-NEXT: v_add_i32_e32 v8, vcc, -1, v8
+; GISEL-NEXT: v_addc_u32_e32 v11, vcc, -1, v11, vcc
+; GISEL-NEXT: v_or_b32_e32 v16, v16, v4
+; GISEL-NEXT: v_or_b32_e32 v22, v22, v30
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v9
+; GISEL-NEXT: v_or_b32_e32 v9, v20, v6
+; GISEL-NEXT: v_or_b32_e32 v10, v21, v7
+; GISEL-NEXT: v_addc_u32_e32 v24, vcc, -1, v24, vcc
+; GISEL-NEXT: v_addc_u32_e32 v25, vcc, -1, v25, vcc
+; GISEL-NEXT: v_sub_i32_e32 v4, vcc, v26, v22
+; GISEL-NEXT: v_subb_u32_e32 v4, vcc, v27, v23, vcc
+; GISEL-NEXT: v_or_b32_e32 v6, v8, v24
+; GISEL-NEXT: v_or_b32_e32 v7, v11, v25
+; GISEL-NEXT: v_subb_u32_e32 v4, vcc, v28, v16, vcc
+; GISEL-NEXT: v_subb_u32_e32 v4, vcc, v29, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GISEL-NEXT: v_ashrrev_i32_e32 v6, 31, v4
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v4, 1, v6
+; GISEL-NEXT: v_and_b32_e32 v7, v6, v12
+; GISEL-NEXT: v_and_b32_e32 v30, v6, v13
+; GISEL-NEXT: v_and_b32_e32 v31, v6, v14
+; GISEL-NEXT: v_and_b32_e32 v32, v6, v15
+; GISEL-NEXT: v_mov_b32_e32 v21, v5
+; GISEL-NEXT: v_mov_b32_e32 v20, v4
+; GISEL-NEXT: v_sub_i32_e32 v6, vcc, v22, v7
+; GISEL-NEXT: v_subb_u32_e32 v7, vcc, v23, v30, vcc
+; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v16, v31, vcc
+; GISEL-NEXT: v_subb_u32_e32 v17, vcc, v17, v32, vcc
+; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execnz .LBB1_9
+; GISEL-NEXT: ; %bb.10: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB1_11: ; %Flow11
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_lshl_b64 v[4:5], v[9:10], 1
+; GISEL-NEXT: v_lshl_b64 v[8:9], v[0:1], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v0, 31, v10
+; GISEL-NEXT: v_or_b32_e32 v8, v8, v0
+; GISEL-NEXT: v_or_b32_e32 v10, v20, v4
+; GISEL-NEXT: v_or_b32_e32 v11, v21, v5
+; GISEL-NEXT: .LBB1_12: ; %Flow12
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: v_mov_b32_e32 v0, v18
+; GISEL-NEXT: v_mov_b32_e32 v1, v19
+; GISEL-NEXT: v_mov_b32_e32 v4, v10
+; GISEL-NEXT: v_mov_b32_e32 v5, v11
+; GISEL-NEXT: v_mov_b32_e32 v6, v8
+; GISEL-NEXT: v_mov_b32_e32 v7, v9
+; GISEL-NEXT: s_setpc_b64 s[30:31]
%shl = udiv <2 x i128> %lhs, %rhs
ret <2 x i128> %shl
}
define <2 x i128> @v_srem_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
+; SDAG-LABEL: v_srem_v2i128_vv:
+; SDAG: ; %bb.0: ; %_udiv-special-cases_udiv-special-cases
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
+; SDAG-NEXT: v_ashrrev_i32_e32 v28, 31, v3
+; SDAG-NEXT: v_ashrrev_i32_e32 v16, 31, v11
+; SDAG-NEXT: v_mov_b32_e32 v17, 0
+; SDAG-NEXT: s_mov_b64 s[10:11], 0x7f
+; SDAG-NEXT: v_mov_b32_e32 v29, v28
+; SDAG-NEXT: v_xor_b32_e32 v18, v3, v28
+; SDAG-NEXT: v_xor_b32_e32 v19, v2, v28
+; SDAG-NEXT: v_xor_b32_e32 v1, v1, v28
+; SDAG-NEXT: v_xor_b32_e32 v0, v0, v28
+; SDAG-NEXT: v_xor_b32_e32 v11, v11, v16
+; SDAG-NEXT: v_xor_b32_e32 v10, v10, v16
+; SDAG-NEXT: v_xor_b32_e32 v20, v9, v16
+; SDAG-NEXT: v_xor_b32_e32 v9, v8, v16
+; SDAG-NEXT: v_sub_i32_e32 v2, vcc, v0, v28
+; SDAG-NEXT: v_subb_u32_e32 v3, vcc, v1, v28, vcc
+; SDAG-NEXT: v_ffbh_u32_e32 v1, v2
+; SDAG-NEXT: v_subb_u32_e32 v0, vcc, v19, v28, vcc
+; SDAG-NEXT: v_add_i32_e64 v19, s[4:5], 32, v1
+; SDAG-NEXT: v_ffbh_u32_e32 v21, v3
+; SDAG-NEXT: v_subb_u32_e32 v1, vcc, v18, v28, vcc
+; SDAG-NEXT: v_or_b32_e32 v8, v2, v0
+; SDAG-NEXT: v_ffbh_u32_e32 v18, v0
+; SDAG-NEXT: v_min_u32_e32 v19, v19, v21
+; SDAG-NEXT: v_sub_i32_e32 v31, vcc, v9, v16
+; SDAG-NEXT: v_or_b32_e32 v9, v3, v1
+; SDAG-NEXT: v_add_i32_e64 v18, s[4:5], 32, v18
+; SDAG-NEXT: v_ffbh_u32_e32 v21, v1
+; SDAG-NEXT: v_add_i32_e64 v19, s[4:5], 64, v19
+; SDAG-NEXT: v_addc_u32_e64 v22, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_subb_u32_e32 v30, vcc, v20, v16, vcc
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[8:9]
+; SDAG-NEXT: v_ffbh_u32_e32 v9, v31
+; SDAG-NEXT: v_min_u32_e32 v18, v18, v21
+; SDAG-NEXT: v_cmp_ne_u64_e64 s[6:7], 0, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, v22, 0, s[6:7]
+; SDAG-NEXT: v_subb_u32_e32 v8, vcc, v10, v16, vcc
+; SDAG-NEXT: v_add_i32_e64 v21, s[8:9], 32, v9
+; SDAG-NEXT: v_ffbh_u32_e32 v22, v30
+; SDAG-NEXT: v_cndmask_b32_e64 v18, v19, v18, s[6:7]
+; SDAG-NEXT: v_subb_u32_e32 v9, vcc, v11, v16, vcc
+; SDAG-NEXT: v_or_b32_e32 v10, v31, v8
+; SDAG-NEXT: v_ffbh_u32_e32 v16, v8
+; SDAG-NEXT: v_min_u32_e32 v19, v21, v22
+; SDAG-NEXT: v_or_b32_e32 v11, v30, v9
+; SDAG-NEXT: v_add_i32_e32 v16, vcc, 32, v16
+; SDAG-NEXT: v_ffbh_u32_e32 v21, v9
+; SDAG-NEXT: v_add_i32_e32 v19, vcc, 64, v19
+; SDAG-NEXT: v_addc_u32_e64 v22, s[6:7], 0, 0, vcc
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; SDAG-NEXT: v_min_u32_e32 v10, v16, v21
+; SDAG-NEXT: v_cmp_ne_u64_e64 s[6:7], 0, v[8:9]
+; SDAG-NEXT: v_cndmask_b32_e64 v11, v22, 0, s[6:7]
+; SDAG-NEXT: s_or_b64 s[8:9], vcc, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v10, v19, v10, s[6:7]
+; SDAG-NEXT: v_sub_i32_e32 v10, vcc, v10, v18
+; SDAG-NEXT: v_subb_u32_e32 v11, vcc, v11, v20, vcc
+; SDAG-NEXT: v_xor_b32_e32 v16, 0x7f, v10
+; SDAG-NEXT: v_subbrev_u32_e32 v18, vcc, 0, v17, vcc
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[10:11], v[10:11]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, 0, 1, s[4:5]
+; SDAG-NEXT: v_subbrev_u32_e32 v19, vcc, 0, v17, vcc
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v18
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[18:19]
+; SDAG-NEXT: v_cndmask_b32_e64 v21, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v17, v11, v19
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
+; SDAG-NEXT: v_cndmask_b32_e32 v20, v21, v20, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[16:17]
+; SDAG-NEXT: v_and_b32_e32 v16, 1, v20
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v16
+; SDAG-NEXT: s_or_b64 s[4:5], s[8:9], s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v35, v1, 0, s[4:5]
+; SDAG-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; SDAG-NEXT: v_cndmask_b32_e64 v32, v0, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v27, v3, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v33, v2, 0, s[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB2_6
+; SDAG-NEXT: ; %bb.1: ; %udiv-bb15
+; SDAG-NEXT: v_add_i32_e32 v32, vcc, 1, v10
+; SDAG-NEXT: v_sub_i32_e64 v20, s[4:5], 63, v10
+; SDAG-NEXT: v_mov_b32_e32 v16, 0
+; SDAG-NEXT: v_mov_b32_e32 v17, 0
+; SDAG-NEXT: v_addc_u32_e32 v33, vcc, 0, v11, vcc
+; SDAG-NEXT: v_lshl_b64 v[20:21], v[2:3], v20
+; SDAG-NEXT: v_addc_u32_e32 v34, vcc, 0, v18, vcc
+; SDAG-NEXT: v_addc_u32_e32 v35, vcc, 0, v19, vcc
+; SDAG-NEXT: v_or_b32_e32 v18, v32, v34
+; SDAG-NEXT: v_sub_i32_e32 v24, vcc, 0x7f, v10
+; SDAG-NEXT: v_or_b32_e32 v19, v33, v35
+; SDAG-NEXT: v_lshl_b64 v[10:11], v[0:1], v24
+; SDAG-NEXT: v_sub_i32_e32 v25, vcc, 64, v24
+; SDAG-NEXT: v_lshl_b64 v[22:23], v[2:3], v24
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[18:19]
+; SDAG-NEXT: v_lshr_b64 v[18:19], v[2:3], v25
+; SDAG-NEXT: v_or_b32_e32 v11, v11, v19
+; SDAG-NEXT: v_or_b32_e32 v10, v10, v18
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v24
+; SDAG-NEXT: v_cndmask_b32_e64 v11, v21, v11, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v10, v20, v10, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v21, 0, v23, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, 0, v22, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v24
+; SDAG-NEXT: v_cndmask_b32_e64 v11, v11, v1, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v10, v10, v0, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v18, 0
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB2_5
+; SDAG-NEXT: ; %bb.2: ; %udiv-preheader4
+; SDAG-NEXT: v_lshr_b64 v[16:17], v[2:3], v32
+; SDAG-NEXT: v_sub_i32_e32 v26, vcc, 64, v32
+; SDAG-NEXT: v_subrev_i32_e32 v37, vcc, 64, v32
+; SDAG-NEXT: v_lshr_b64 v[24:25], v[0:1], v32
+; SDAG-NEXT: v_add_i32_e32 v36, vcc, -1, v31
+; SDAG-NEXT: s_mov_b64 s[10:11], 0
+; SDAG-NEXT: v_mov_b32_e32 v22, 0
+; SDAG-NEXT: v_mov_b32_e32 v23, 0
+; SDAG-NEXT: v_mov_b32_e32 v18, 0
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: v_lshl_b64 v[26:27], v[0:1], v26
+; SDAG-NEXT: v_lshr_b64 v[48:49], v[0:1], v37
+; SDAG-NEXT: v_addc_u32_e32 v37, vcc, -1, v30, vcc
+; SDAG-NEXT: v_or_b32_e32 v17, v17, v27
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v26
+; SDAG-NEXT: v_addc_u32_e32 v38, vcc, -1, v8, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v32
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v49, v17, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v48, v16, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v27, 0, v25, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v26, 0, v24, s[4:5]
+; SDAG-NEXT: v_addc_u32_e32 v39, vcc, -1, v9, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 0, v32
+; SDAG-NEXT: v_cndmask_b32_e32 v25, v17, v3, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v24, v16, v2, vcc
+; SDAG-NEXT: v_mov_b32_e32 v17, 0
+; SDAG-NEXT: .LBB2_3: ; %udiv-do-while3
+; SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; SDAG-NEXT: v_lshl_b64 v[26:27], v[26:27], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v16, 31, v25
+; SDAG-NEXT: v_lshl_b64 v[24:25], v[24:25], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v48, 31, v11
+; SDAG-NEXT: v_lshl_b64 v[10:11], v[10:11], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v49, 31, v21
+; SDAG-NEXT: v_lshl_b64 v[20:21], v[20:21], 1
+; SDAG-NEXT: v_or_b32_e32 v26, v26, v16
+; SDAG-NEXT: v_or_b32_e32 v24, v24, v48
+; SDAG-NEXT: v_or_b32_e32 v10, v10, v49
+; SDAG-NEXT: v_or_b32_e32 v11, v19, v11
+; SDAG-NEXT: v_sub_i32_e32 v16, vcc, v36, v24
+; SDAG-NEXT: v_or_b32_e32 v10, v18, v10
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v37, v25, vcc
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v38, v26, vcc
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v39, v27, vcc
+; SDAG-NEXT: v_ashrrev_i32_e32 v16, 31, v16
+; SDAG-NEXT: v_and_b32_e32 v48, v16, v31
+; SDAG-NEXT: v_and_b32_e32 v49, v16, v30
+; SDAG-NEXT: v_and_b32_e32 v50, v16, v8
+; SDAG-NEXT: v_and_b32_e32 v51, v16, v9
+; SDAG-NEXT: v_and_b32_e32 v16, 1, v16
+; SDAG-NEXT: v_sub_i32_e32 v24, vcc, v24, v48
+; SDAG-NEXT: v_subb_u32_e32 v25, vcc, v25, v49, vcc
+; SDAG-NEXT: v_subb_u32_e32 v26, vcc, v26, v50, vcc
+; SDAG-NEXT: v_subb_u32_e32 v27, vcc, v27, v51, vcc
+; SDAG-NEXT: v_add_i32_e32 v32, vcc, -1, v32
+; SDAG-NEXT: v_addc_u32_e32 v33, vcc, -1, v33, vcc
+; SDAG-NEXT: v_addc_u32_e32 v34, vcc, -1, v34, vcc
+; SDAG-NEXT: v_addc_u32_e32 v35, vcc, -1, v35, vcc
+; SDAG-NEXT: v_or_b32_e32 v48, v32, v34
+; SDAG-NEXT: v_or_b32_e32 v49, v33, v35
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[48:49]
+; SDAG-NEXT: v_or_b32_e32 v21, v23, v21
+; SDAG-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; SDAG-NEXT: v_or_b32_e32 v20, v22, v20
+; SDAG-NEXT: v_mov_b32_e32 v23, v17
+; SDAG-NEXT: v_mov_b32_e32 v22, v16
+; SDAG-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; SDAG-NEXT: s_cbranch_execnz .LBB2_3
+; SDAG-NEXT: ; %bb.4: ; %Flow13
+; SDAG-NEXT: s_or_b64 exec, exec, s[10:11]
+; SDAG-NEXT: .LBB2_5: ; %Flow14
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_lshl_b64 v[10:11], v[10:11], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v22, 31, v21
+; SDAG-NEXT: v_lshl_b64 v[20:21], v[20:21], 1
+; SDAG-NEXT: v_or_b32_e32 v10, v10, v22
+; SDAG-NEXT: v_or_b32_e32 v35, v19, v11
+; SDAG-NEXT: v_or_b32_e32 v27, v17, v21
+; SDAG-NEXT: v_or_b32_e32 v32, v18, v10
+; SDAG-NEXT: v_or_b32_e32 v33, v16, v20
+; SDAG-NEXT: .LBB2_6: ; %Flow16
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_ashrrev_i32_e32 v26, 31, v7
+; SDAG-NEXT: v_ashrrev_i32_e32 v16, 31, v15
+; SDAG-NEXT: v_mov_b32_e32 v17, 0
+; SDAG-NEXT: s_mov_b64 s[10:11], 0x7f
+; SDAG-NEXT: v_mov_b32_e32 v34, v26
+; SDAG-NEXT: v_xor_b32_e32 v10, v7, v26
+; SDAG-NEXT: v_xor_b32_e32 v11, v6, v26
+; SDAG-NEXT: v_xor_b32_e32 v5, v5, v26
+; SDAG-NEXT: v_xor_b32_e32 v4, v4, v26
+; SDAG-NEXT: v_xor_b32_e32 v15, v15, v16
+; SDAG-NEXT: v_xor_b32_e32 v14, v14, v16
+; SDAG-NEXT: v_xor_b32_e32 v13, v13, v16
+; SDAG-NEXT: v_xor_b32_e32 v12, v12, v16
+; SDAG-NEXT: v_sub_i32_e32 v6, vcc, v4, v26
+; SDAG-NEXT: v_subb_u32_e32 v7, vcc, v5, v26, vcc
+; SDAG-NEXT: v_ffbh_u32_e32 v5, v6
+; SDAG-NEXT: v_subb_u32_e32 v4, vcc, v11, v26, vcc
+; SDAG-NEXT: v_add_i32_e64 v11, s[4:5], 32, v5
+; SDAG-NEXT: v_ffbh_u32_e32 v18, v7
+; SDAG-NEXT: v_subb_u32_e32 v5, vcc, v10, v26, vcc
+; SDAG-NEXT: v_or_b32_e32 v10, v6, v4
+; SDAG-NEXT: v_ffbh_u32_e32 v19, v4
+; SDAG-NEXT: v_min_u32_e32 v18, v11, v18
+; SDAG-NEXT: v_sub_i32_e32 v37, vcc, v12, v16
+; SDAG-NEXT: v_or_b32_e32 v11, v7, v5
+; SDAG-NEXT: v_add_i32_e64 v12, s[4:5], 32, v19
+; SDAG-NEXT: v_ffbh_u32_e32 v19, v5
+; SDAG-NEXT: v_add_i32_e64 v18, s[4:5], 64, v18
+; SDAG-NEXT: v_addc_u32_e64 v20, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_subb_u32_e32 v36, vcc, v13, v16, vcc
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[10:11]
+; SDAG-NEXT: v_ffbh_u32_e32 v11, v37
+; SDAG-NEXT: v_min_u32_e32 v12, v12, v19
+; SDAG-NEXT: v_cmp_ne_u64_e64 s[6:7], 0, v[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v19, v20, 0, s[6:7]
+; SDAG-NEXT: v_subb_u32_e32 v10, vcc, v14, v16, vcc
+; SDAG-NEXT: v_add_i32_e64 v13, s[8:9], 32, v11
+; SDAG-NEXT: v_ffbh_u32_e32 v14, v36
+; SDAG-NEXT: v_cndmask_b32_e64 v18, v18, v12, s[6:7]
+; SDAG-NEXT: v_subb_u32_e32 v11, vcc, v15, v16, vcc
+; SDAG-NEXT: v_or_b32_e32 v12, v37, v10
+; SDAG-NEXT: v_ffbh_u32_e32 v15, v10
+; SDAG-NEXT: v_min_u32_e32 v14, v13, v14
+; SDAG-NEXT: v_or_b32_e32 v13, v36, v11
+; SDAG-NEXT: v_add_i32_e32 v15, vcc, 32, v15
+; SDAG-NEXT: v_ffbh_u32_e32 v16, v11
+; SDAG-NEXT: v_add_i32_e32 v14, vcc, 64, v14
+; SDAG-NEXT: v_addc_u32_e64 v20, s[6:7], 0, 0, vcc
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[12:13]
+; SDAG-NEXT: v_min_u32_e32 v12, v15, v16
+; SDAG-NEXT: v_cmp_ne_u64_e64 s[6:7], 0, v[10:11]
+; SDAG-NEXT: v_cndmask_b32_e64 v13, v20, 0, s[6:7]
+; SDAG-NEXT: s_or_b64 s[8:9], vcc, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v12, v14, v12, s[6:7]
+; SDAG-NEXT: v_sub_i32_e32 v12, vcc, v12, v18
+; SDAG-NEXT: v_subb_u32_e32 v13, vcc, v13, v19, vcc
+; SDAG-NEXT: v_xor_b32_e32 v16, 0x7f, v12
+; SDAG-NEXT: v_subbrev_u32_e32 v14, vcc, 0, v17, vcc
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[10:11], v[12:13]
+; SDAG-NEXT: v_cndmask_b32_e64 v18, 0, 1, s[4:5]
+; SDAG-NEXT: v_subbrev_u32_e32 v15, vcc, 0, v17, vcc
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v14
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[14:15]
+; SDAG-NEXT: v_cndmask_b32_e64 v19, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v17, v13, v15
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[14:15]
+; SDAG-NEXT: v_cndmask_b32_e32 v18, v19, v18, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[16:17]
+; SDAG-NEXT: v_and_b32_e32 v16, 1, v18
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v16
+; SDAG-NEXT: s_or_b64 s[4:5], s[8:9], s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v19, v5, 0, s[4:5]
+; SDAG-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; SDAG-NEXT: v_cndmask_b32_e64 v18, v4, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v7, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v6, 0, s[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB2_12
+; SDAG-NEXT: ; %bb.7: ; %udiv-bb1
+; SDAG-NEXT: v_add_i32_e32 v38, vcc, 1, v12
+; SDAG-NEXT: v_sub_i32_e64 v18, s[4:5], 63, v12
+; SDAG-NEXT: v_mov_b32_e32 v16, 0
+; SDAG-NEXT: v_mov_b32_e32 v17, 0
+; SDAG-NEXT: v_addc_u32_e32 v39, vcc, 0, v13, vcc
+; SDAG-NEXT: v_lshl_b64 v[18:19], v[6:7], v18
+; SDAG-NEXT: v_addc_u32_e32 v48, vcc, 0, v14, vcc
+; SDAG-NEXT: v_addc_u32_e32 v49, vcc, 0, v15, vcc
+; SDAG-NEXT: v_or_b32_e32 v13, v38, v48
+; SDAG-NEXT: v_sub_i32_e32 v15, vcc, 0x7f, v12
+; SDAG-NEXT: v_or_b32_e32 v14, v39, v49
+; SDAG-NEXT: v_lshl_b64 v[20:21], v[4:5], v15
+; SDAG-NEXT: v_sub_i32_e32 v12, vcc, 64, v15
+; SDAG-NEXT: v_lshl_b64 v[22:23], v[6:7], v15
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[13:14]
+; SDAG-NEXT: v_lshr_b64 v[12:13], v[6:7], v12
+; SDAG-NEXT: v_or_b32_e32 v13, v21, v13
+; SDAG-NEXT: v_or_b32_e32 v12, v20, v12
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v15
+; SDAG-NEXT: v_cndmask_b32_e64 v14, v19, v13, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v18, v18, v12, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v13, 0, v23, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v12, 0, v22, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v15
+; SDAG-NEXT: v_cndmask_b32_e64 v15, v14, v5, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v14, v18, v4, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v18, 0
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB2_11
+; SDAG-NEXT: ; %bb.8: ; %udiv-preheader
+; SDAG-NEXT: v_lshr_b64 v[16:17], v[6:7], v38
+; SDAG-NEXT: v_sub_i32_e32 v24, vcc, 64, v38
+; SDAG-NEXT: v_subrev_i32_e32 v51, vcc, 64, v38
+; SDAG-NEXT: v_lshr_b64 v[22:23], v[4:5], v38
+; SDAG-NEXT: v_add_i32_e32 v50, vcc, -1, v37
+; SDAG-NEXT: s_mov_b64 s[10:11], 0
+; SDAG-NEXT: v_mov_b32_e32 v20, 0
+; SDAG-NEXT: v_mov_b32_e32 v21, 0
+; SDAG-NEXT: v_mov_b32_e32 v18, 0
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: v_lshl_b64 v[24:25], v[4:5], v24
+; SDAG-NEXT: v_lshr_b64 v[53:54], v[4:5], v51
+; SDAG-NEXT: v_addc_u32_e32 v51, vcc, -1, v36, vcc
+; SDAG-NEXT: v_or_b32_e32 v17, v17, v25
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v24
+; SDAG-NEXT: v_addc_u32_e32 v52, vcc, -1, v10, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v38
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v54, v17, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v53, v16, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v25, 0, v23, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v24, 0, v22, s[4:5]
+; SDAG-NEXT: v_addc_u32_e32 v53, vcc, -1, v11, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 0, v38
+; SDAG-NEXT: v_cndmask_b32_e32 v23, v17, v7, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v22, v16, v6, vcc
+; SDAG-NEXT: v_mov_b32_e32 v17, 0
+; SDAG-NEXT: .LBB2_9: ; %udiv-do-while
+; SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; SDAG-NEXT: v_lshl_b64 v[24:25], v[24:25], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v16, 31, v23
+; SDAG-NEXT: v_lshl_b64 v[22:23], v[22:23], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v54, 31, v15
+; SDAG-NEXT: v_lshl_b64 v[14:15], v[14:15], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v55, 31, v13
+; SDAG-NEXT: v_lshl_b64 v[12:13], v[12:13], 1
+; SDAG-NEXT: v_or_b32_e32 v24, v24, v16
+; SDAG-NEXT: v_or_b32_e32 v22, v22, v54
+; SDAG-NEXT: v_or_b32_e32 v14, v14, v55
+; SDAG-NEXT: v_or_b32_e32 v15, v19, v15
+; SDAG-NEXT: v_or_b32_e32 v13, v21, v13
+; SDAG-NEXT: v_or_b32_e32 v14, v18, v14
+; SDAG-NEXT: v_sub_i32_e32 v16, vcc, v50, v22
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v51, v23, vcc
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v52, v24, vcc
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v53, v25, vcc
+; SDAG-NEXT: v_ashrrev_i32_e32 v21, 31, v16
+; SDAG-NEXT: v_and_b32_e32 v16, 1, v21
+; SDAG-NEXT: v_and_b32_e32 v54, v21, v11
+; SDAG-NEXT: v_and_b32_e32 v55, v21, v10
+; SDAG-NEXT: v_and_b32_e32 v40, v21, v36
+; SDAG-NEXT: v_and_b32_e32 v21, v21, v37
+; SDAG-NEXT: v_sub_i32_e32 v22, vcc, v22, v21
+; SDAG-NEXT: v_subb_u32_e32 v23, vcc, v23, v40, vcc
+; SDAG-NEXT: v_subb_u32_e32 v24, vcc, v24, v55, vcc
+; SDAG-NEXT: v_subb_u32_e32 v25, vcc, v25, v54, vcc
+; SDAG-NEXT: v_add_i32_e32 v38, vcc, -1, v38
+; SDAG-NEXT: v_addc_u32_e32 v39, vcc, -1, v39, vcc
+; SDAG-NEXT: v_addc_u32_e32 v48, vcc, -1, v48, vcc
+; SDAG-NEXT: v_addc_u32_e32 v49, vcc, -1, v49, vcc
+; SDAG-NEXT: v_or_b32_e32 v55, v39, v49
+; SDAG-NEXT: v_or_b32_e32 v54, v38, v48
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[54:55]
+; SDAG-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; SDAG-NEXT: v_or_b32_e32 v12, v20, v12
+; SDAG-NEXT: v_mov_b32_e32 v21, v17
+; SDAG-NEXT: v_mov_b32_e32 v20, v16
+; SDAG-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; SDAG-NEXT: s_cbranch_execnz .LBB2_9
+; SDAG-NEXT: ; %bb.10: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[10:11]
+; SDAG-NEXT: .LBB2_11: ; %Flow11
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_lshl_b64 v[14:15], v[14:15], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v20, 31, v13
+; SDAG-NEXT: v_lshl_b64 v[12:13], v[12:13], 1
+; SDAG-NEXT: v_or_b32_e32 v14, v14, v20
+; SDAG-NEXT: v_or_b32_e32 v19, v19, v15
+; SDAG-NEXT: v_or_b32_e32 v17, v17, v13
+; SDAG-NEXT: v_or_b32_e32 v18, v18, v14
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v12
+; SDAG-NEXT: .LBB2_12: ; %Flow12
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_mul_lo_u32 v14, v33, v9
+; SDAG-NEXT: v_mad_u64_u32 v[12:13], s[4:5], v33, v8, 0
+; SDAG-NEXT: v_mul_lo_u32 v24, v27, v8
+; SDAG-NEXT: v_mul_lo_u32 v25, v35, v31
+; SDAG-NEXT: v_mul_lo_u32 v35, v32, v30
+; SDAG-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v31, v33, 0
+; SDAG-NEXT: v_mov_b32_e32 v15, 0
+; SDAG-NEXT: v_mul_lo_u32 v38, v16, v11
+; SDAG-NEXT: v_mad_u64_u32 v[20:21], s[4:5], v16, v10, 0
+; SDAG-NEXT: v_mul_lo_u32 v39, v17, v10
+; SDAG-NEXT: v_mul_lo_u32 v19, v19, v37
+; SDAG-NEXT: v_mul_lo_u32 v48, v18, v36
+; SDAG-NEXT: v_mad_u64_u32 v[10:11], s[4:5], v37, v16, 0
+; SDAG-NEXT: v_add_i32_e32 v13, vcc, v13, v14
+; SDAG-NEXT: v_mov_b32_e32 v14, v9
+; SDAG-NEXT: v_mad_u64_u32 v[22:23], s[4:5], v30, v33, v[14:15]
+; SDAG-NEXT: v_sub_i32_e32 v2, vcc, v2, v8
+; SDAG-NEXT: v_add_i32_e64 v14, s[4:5], v21, v38
+; SDAG-NEXT: v_add_i32_e64 v13, s[4:5], v13, v24
+; SDAG-NEXT: v_mov_b32_e32 v24, v23
+; SDAG-NEXT: v_mov_b32_e32 v23, v15
+; SDAG-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v31, v27, v[22:23]
+; SDAG-NEXT: v_xor_b32_e32 v33, v2, v28
+; SDAG-NEXT: v_add_i32_e64 v21, s[4:5], v14, v39
+; SDAG-NEXT: v_mov_b32_e32 v14, v11
+; SDAG-NEXT: v_mad_u64_u32 v[22:23], s[4:5], v36, v16, v[14:15]
+; SDAG-NEXT: v_mad_u64_u32 v[11:12], s[4:5], v32, v31, v[12:13]
+; SDAG-NEXT: v_mov_b32_e32 v2, v9
+; SDAG-NEXT: v_add_i32_e64 v13, s[4:5], v24, v2
+; SDAG-NEXT: v_addc_u32_e64 v14, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v2, v8
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v3, v2, vcc
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v18, v37, v[20:21]
+; SDAG-NEXT: v_mov_b32_e32 v18, v23
+; SDAG-NEXT: v_mov_b32_e32 v23, v15
+; SDAG-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v37, v17, v[22:23]
+; SDAG-NEXT: v_add_i32_e64 v20, s[4:5], v25, v12
+; SDAG-NEXT: v_mad_u64_u32 v[12:13], s[4:5], v30, v27, v[13:14]
+; SDAG-NEXT: v_xor_b32_e32 v16, v16, v29
+; SDAG-NEXT: v_add_i32_e64 v3, s[4:5], v19, v3
+; SDAG-NEXT: v_add_i32_e64 v14, s[4:5], v18, v9
+; SDAG-NEXT: v_addc_u32_e64 v15, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v18, v8
+; SDAG-NEXT: v_add_i32_e64 v19, s[4:5], v35, v20
+; SDAG-NEXT: v_add_i32_e64 v3, s[4:5], v48, v3
+; SDAG-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v36, v17, v[14:15]
+; SDAG-NEXT: v_add_i32_e64 v11, s[4:5], v12, v11
+; SDAG-NEXT: v_addc_u32_e64 v12, s[4:5], v13, v19, s[4:5]
+; SDAG-NEXT: v_subb_u32_e32 v0, vcc, v0, v11, vcc
+; SDAG-NEXT: v_add_i32_e64 v8, s[4:5], v8, v2
+; SDAG-NEXT: v_addc_u32_e64 v9, s[4:5], v9, v3, s[4:5]
+; SDAG-NEXT: v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; SDAG-NEXT: v_xor_b32_e32 v2, v0, v28
+; SDAG-NEXT: v_xor_b32_e32 v3, v1, v29
+; SDAG-NEXT: v_sub_i32_e32 v0, vcc, v33, v28
+; SDAG-NEXT: v_subb_u32_e32 v1, vcc, v16, v29, vcc
+; SDAG-NEXT: v_subb_u32_e32 v2, vcc, v2, v28, vcc
+; SDAG-NEXT: v_subb_u32_e32 v3, vcc, v3, v29, vcc
+; SDAG-NEXT: v_sub_i32_e32 v6, vcc, v6, v10
+; SDAG-NEXT: v_subb_u32_e32 v7, vcc, v7, v18, vcc
+; SDAG-NEXT: v_xor_b32_e32 v6, v6, v26
+; SDAG-NEXT: v_subb_u32_e32 v4, vcc, v4, v8, vcc
+; SDAG-NEXT: v_xor_b32_e32 v7, v7, v34
+; SDAG-NEXT: v_subb_u32_e32 v5, vcc, v5, v9, vcc
+; SDAG-NEXT: v_xor_b32_e32 v8, v4, v26
+; SDAG-NEXT: v_xor_b32_e32 v9, v5, v34
+; SDAG-NEXT: v_sub_i32_e32 v4, vcc, v6, v26
+; SDAG-NEXT: v_subb_u32_e32 v5, vcc, v7, v34, vcc
+; SDAG-NEXT: v_subb_u32_e32 v6, vcc, v8, v26, vcc
+; SDAG-NEXT: v_subb_u32_e32 v7, vcc, v9, v34, vcc
+; SDAG-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
+; SDAG-NEXT: s_waitcnt vmcnt(0)
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: v_srem_v2i128_vv:
+; GISEL: ; %bb.0: ; %_udiv-special-cases_udiv-special-cases
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: s_mov_b64 s[8:9], 0
+; GISEL-NEXT: v_ashrrev_i32_e32 v28, 31, v3
+; GISEL-NEXT: v_ashrrev_i32_e32 v20, 31, v11
+; GISEL-NEXT: v_mov_b32_e32 v18, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v19, 0
+; GISEL-NEXT: v_xor_b32_e32 v0, v0, v28
+; GISEL-NEXT: v_xor_b32_e32 v1, v1, v28
+; GISEL-NEXT: v_xor_b32_e32 v2, v2, v28
+; GISEL-NEXT: v_xor_b32_e32 v3, v3, v28
+; GISEL-NEXT: v_xor_b32_e32 v8, v8, v20
+; GISEL-NEXT: v_xor_b32_e32 v9, v9, v20
+; GISEL-NEXT: v_xor_b32_e32 v10, v10, v20
+; GISEL-NEXT: v_xor_b32_e32 v11, v11, v20
+; GISEL-NEXT: v_sub_i32_e32 v16, vcc, v0, v28
+; GISEL-NEXT: v_subb_u32_e32 v17, vcc, v1, v28, vcc
+; GISEL-NEXT: v_sub_i32_e64 v30, s[4:5], v8, v20
+; GISEL-NEXT: v_subb_u32_e64 v29, s[4:5], v9, v20, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v8, vcc, v2, v28, vcc
+; GISEL-NEXT: v_subb_u32_e32 v9, vcc, v3, v28, vcc
+; GISEL-NEXT: v_subb_u32_e64 v10, vcc, v10, v20, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v11, vcc, v11, v20, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v20, v29
+; GISEL-NEXT: v_ffbh_u32_e32 v21, v30
+; GISEL-NEXT: v_ffbh_u32_e32 v22, v17
+; GISEL-NEXT: v_ffbh_u32_e32 v23, v16
+; GISEL-NEXT: v_or_b32_e32 v0, v30, v10
+; GISEL-NEXT: v_or_b32_e32 v1, v29, v11
+; GISEL-NEXT: v_or_b32_e32 v2, v16, v8
+; GISEL-NEXT: v_or_b32_e32 v3, v17, v9
+; GISEL-NEXT: v_add_i32_e32 v21, vcc, 32, v21
+; GISEL-NEXT: v_ffbh_u32_e32 v24, v11
+; GISEL-NEXT: v_ffbh_u32_e32 v25, v10
+; GISEL-NEXT: v_add_i32_e32 v23, vcc, 32, v23
+; GISEL-NEXT: v_ffbh_u32_e32 v26, v9
+; GISEL-NEXT: v_ffbh_u32_e32 v27, v8
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[2:3]
+; GISEL-NEXT: v_min_u32_e32 v0, v20, v21
+; GISEL-NEXT: v_add_i32_e64 v1, s[6:7], 32, v25
+; GISEL-NEXT: v_min_u32_e32 v2, v22, v23
+; GISEL-NEXT: v_add_i32_e64 v3, s[6:7], 32, v27
+; GISEL-NEXT: v_add_i32_e64 v0, s[6:7], 64, v0
+; GISEL-NEXT: v_min_u32_e32 v1, v24, v1
+; GISEL-NEXT: v_add_i32_e64 v2, s[6:7], 64, v2
+; GISEL-NEXT: v_min_u32_e32 v3, v26, v3
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v20, 0, 1, s[4:5]
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GISEL-NEXT: v_subb_u32_e64 v1, s[4:5], 0, 0, vcc
+; GISEL-NEXT: v_subb_u32_e64 v2, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[18:19]
+; GISEL-NEXT: v_cndmask_b32_e64 v21, 0, 1, vcc
+; GISEL-NEXT: v_xor_b32_e32 v18, 0x7f, v0
+; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e64 v22, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v18, v18, v2
+; GISEL-NEXT: v_or_b32_e32 v19, v1, v3
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e32 v21, v22, v21, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
+; GISEL-NEXT: v_cndmask_b32_e64 v18, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v19, v20, v21
+; GISEL-NEXT: v_and_b32_e32 v20, 1, v19
+; GISEL-NEXT: v_or_b32_e32 v18, v19, v18
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v20
+; GISEL-NEXT: v_cndmask_b32_e64 v31, v16, 0, vcc
+; GISEL-NEXT: v_and_b32_e32 v20, 1, v18
+; GISEL-NEXT: v_cndmask_b32_e64 v32, v17, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v18, v8, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v19, v9, 0, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v20
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB2_6
+; GISEL-NEXT: ; %bb.1: ; %udiv-bb15
+; GISEL-NEXT: v_add_i32_e32 v31, vcc, 1, v0
+; GISEL-NEXT: v_addc_u32_e64 v32, s[4:5], 0, v1, vcc
+; GISEL-NEXT: v_sub_i32_e32 v24, vcc, 0x7f, v0
+; GISEL-NEXT: v_addc_u32_e64 v33, vcc, 0, v2, s[4:5]
+; GISEL-NEXT: v_addc_u32_e32 v34, vcc, 0, v3, vcc
+; GISEL-NEXT: v_subrev_i32_e64 v20, s[4:5], 64, v24
+; GISEL-NEXT: v_sub_i32_e64 v18, s[4:5], 64, v24
+; GISEL-NEXT: v_lshl_b64 v[0:1], v[16:17], v24
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[8:9], v24
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: v_lshr_b64 v[18:19], v[16:17], v18
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[16:17], v20
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v24
+; GISEL-NEXT: v_cndmask_b32_e32 v20, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v21, 0, v1, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v18, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v19, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v22, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v23, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v24
+; GISEL-NEXT: v_cndmask_b32_e32 v18, v0, v8, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v19, v1, v9, vcc
+; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GISEL-NEXT: v_mov_b32_e32 v0, s8
+; GISEL-NEXT: v_mov_b32_e32 v1, s9
+; GISEL-NEXT: v_mov_b32_e32 v2, s10
+; GISEL-NEXT: v_mov_b32_e32 v3, s11
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GISEL-NEXT: s_xor_b64 s[8:9], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB2_5
+; GISEL-NEXT: ; %bb.2: ; %udiv-preheader4
+; GISEL-NEXT: v_subrev_i32_e32 v24, vcc, 64, v31
+; GISEL-NEXT: v_sub_i32_e32 v22, vcc, 64, v31
+; GISEL-NEXT: v_lshr_b64 v[0:1], v[8:9], v31
+; GISEL-NEXT: v_lshr_b64 v[2:3], v[16:17], v31
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_add_i32_e32 v35, vcc, -1, v30
+; GISEL-NEXT: v_addc_u32_e32 v36, vcc, -1, v29, vcc
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[8:9], v22
+; GISEL-NEXT: v_lshr_b64 v[24:25], v[8:9], v24
+; GISEL-NEXT: v_addc_u32_e32 v37, vcc, -1, v10, vcc
+; GISEL-NEXT: v_addc_u32_e32 v38, vcc, -1, v11, vcc
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_or_b32_e32 v2, v2, v22
+; GISEL-NEXT: v_or_b32_e32 v3, v3, v23
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v31
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v24, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v25, v3, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v26, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v27, 0, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v31
+; GISEL-NEXT: v_cndmask_b32_e32 v24, v2, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v25, v3, v17, vcc
+; GISEL-NEXT: v_mov_b32_e32 v23, 0
+; GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GISEL-NEXT: .LBB2_3: ; %udiv-do-while3
+; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[20:21], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v22, 31, v21
+; GISEL-NEXT: v_lshl_b64 v[48:49], v[24:25], 1
+; GISEL-NEXT: v_lshl_b64 v[26:27], v[26:27], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v24, 31, v25
+; GISEL-NEXT: v_lshrrev_b32_e32 v25, 31, v19
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[18:19], 1
+; GISEL-NEXT: v_add_i32_e32 v31, vcc, -1, v31
+; GISEL-NEXT: v_addc_u32_e32 v32, vcc, -1, v32, vcc
+; GISEL-NEXT: v_or_b32_e32 v20, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v21, v1, v3
+; GISEL-NEXT: v_or_b32_e32 v2, v26, v24
+; GISEL-NEXT: v_or_b32_e32 v3, v48, v25
+; GISEL-NEXT: v_or_b32_e32 v18, v18, v22
+; GISEL-NEXT: v_addc_u32_e32 v33, vcc, -1, v33, vcc
+; GISEL-NEXT: v_addc_u32_e32 v34, vcc, -1, v34, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v35, v3
+; GISEL-NEXT: v_subb_u32_e32 v0, vcc, v36, v49, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v31, v33
+; GISEL-NEXT: v_or_b32_e32 v1, v32, v34
+; GISEL-NEXT: v_subb_u32_e32 v22, vcc, v37, v2, vcc
+; GISEL-NEXT: v_subb_u32_e32 v22, vcc, v38, v27, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_ashrrev_i32_e32 v0, 31, v22
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v22, 1, v0
+; GISEL-NEXT: v_and_b32_e32 v1, v0, v30
+; GISEL-NEXT: v_and_b32_e32 v25, v0, v29
+; GISEL-NEXT: v_and_b32_e32 v26, v0, v10
+; GISEL-NEXT: v_and_b32_e32 v0, v0, v11
+; GISEL-NEXT: v_sub_i32_e32 v24, vcc, v3, v1
+; GISEL-NEXT: v_subb_u32_e32 v25, vcc, v49, v25, vcc
+; GISEL-NEXT: v_subb_u32_e32 v26, vcc, v2, v26, vcc
+; GISEL-NEXT: v_subb_u32_e32 v27, vcc, v27, v0, vcc
+; GISEL-NEXT: v_mov_b32_e32 v0, v22
+; GISEL-NEXT: v_mov_b32_e32 v1, v23
+; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execnz .LBB2_3
+; GISEL-NEXT: ; %bb.4: ; %Flow13
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB2_5: ; %Flow14
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[20:21], 1
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[18:19], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v20, 31, v21
+; GISEL-NEXT: v_or_b32_e32 v18, v18, v20
+; GISEL-NEXT: v_or_b32_e32 v31, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v32, v1, v3
+; GISEL-NEXT: .LBB2_6: ; %Flow16
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: s_mov_b64 s[8:9], 0
+; GISEL-NEXT: v_ashrrev_i32_e32 v33, 31, v7
+; GISEL-NEXT: v_ashrrev_i32_e32 v0, 31, v15
+; GISEL-NEXT: v_mov_b32_e32 v2, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v3, 0
+; GISEL-NEXT: v_xor_b32_e32 v1, v4, v33
+; GISEL-NEXT: v_xor_b32_e32 v4, v5, v33
+; GISEL-NEXT: v_xor_b32_e32 v5, v6, v33
+; GISEL-NEXT: v_xor_b32_e32 v7, v7, v33
+; GISEL-NEXT: v_xor_b32_e32 v6, v12, v0
+; GISEL-NEXT: v_xor_b32_e32 v20, v13, v0
+; GISEL-NEXT: v_xor_b32_e32 v14, v14, v0
+; GISEL-NEXT: v_xor_b32_e32 v15, v15, v0
+; GISEL-NEXT: v_sub_i32_e32 v12, vcc, v1, v33
+; GISEL-NEXT: v_subb_u32_e32 v13, vcc, v4, v33, vcc
+; GISEL-NEXT: v_sub_i32_e64 v35, s[4:5], v6, v0
+; GISEL-NEXT: v_subb_u32_e64 v34, s[4:5], v20, v0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v6, vcc, v5, v33, vcc
+; GISEL-NEXT: v_subb_u32_e32 v7, vcc, v7, v33, vcc
+; GISEL-NEXT: v_subb_u32_e64 v4, vcc, v14, v0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v15, v0, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v20, v34
+; GISEL-NEXT: v_ffbh_u32_e32 v21, v35
+; GISEL-NEXT: v_ffbh_u32_e32 v22, v13
+; GISEL-NEXT: v_ffbh_u32_e32 v23, v12
+; GISEL-NEXT: v_or_b32_e32 v0, v35, v4
+; GISEL-NEXT: v_or_b32_e32 v1, v34, v5
+; GISEL-NEXT: v_or_b32_e32 v14, v12, v6
+; GISEL-NEXT: v_or_b32_e32 v15, v13, v7
+; GISEL-NEXT: v_add_i32_e32 v21, vcc, 32, v21
+; GISEL-NEXT: v_ffbh_u32_e32 v24, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v25, v4
+; GISEL-NEXT: v_add_i32_e32 v23, vcc, 32, v23
+; GISEL-NEXT: v_ffbh_u32_e32 v26, v7
+; GISEL-NEXT: v_ffbh_u32_e32 v27, v6
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[14:15]
+; GISEL-NEXT: v_min_u32_e32 v0, v20, v21
+; GISEL-NEXT: v_add_i32_e64 v1, s[6:7], 32, v25
+; GISEL-NEXT: v_min_u32_e32 v14, v22, v23
+; GISEL-NEXT: v_add_i32_e64 v15, s[6:7], 32, v27
+; GISEL-NEXT: v_add_i32_e64 v0, s[6:7], 64, v0
+; GISEL-NEXT: v_min_u32_e32 v1, v24, v1
+; GISEL-NEXT: v_add_i32_e64 v14, s[6:7], 64, v14
+; GISEL-NEXT: v_min_u32_e32 v15, v26, v15
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v20, 0, 1, s[4:5]
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v15, v14, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GISEL-NEXT: v_subb_u32_e64 v1, s[4:5], 0, 0, vcc
+; GISEL-NEXT: v_subb_u32_e64 v14, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e64 v15, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e64 v21, 0, 1, vcc
+; GISEL-NEXT: v_xor_b32_e32 v2, 0x7f, v0
+; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[14:15]
+; GISEL-NEXT: v_cndmask_b32_e64 v22, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v2, v2, v14
+; GISEL-NEXT: v_or_b32_e32 v3, v1, v15
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[14:15]
+; GISEL-NEXT: v_cndmask_b32_e32 v21, v22, v21, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v3, v20, v21
+; GISEL-NEXT: v_and_b32_e32 v20, 1, v3
+; GISEL-NEXT: v_or_b32_e32 v2, v3, v2
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v20
+; GISEL-NEXT: v_cndmask_b32_e64 v20, v12, 0, vcc
+; GISEL-NEXT: v_and_b32_e32 v22, 1, v2
+; GISEL-NEXT: v_cndmask_b32_e64 v21, v13, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v2, v6, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v3, v7, 0, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v22
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB2_12
+; GISEL-NEXT: ; %bb.7: ; %udiv-bb1
+; GISEL-NEXT: v_add_i32_e32 v36, vcc, 1, v0
+; GISEL-NEXT: v_addc_u32_e64 v37, s[4:5], 0, v1, vcc
+; GISEL-NEXT: v_sub_i32_e32 v24, vcc, 0x7f, v0
+; GISEL-NEXT: v_addc_u32_e64 v38, vcc, 0, v14, s[4:5]
+; GISEL-NEXT: v_addc_u32_e32 v39, vcc, 0, v15, vcc
+; GISEL-NEXT: v_subrev_i32_e64 v20, s[4:5], 64, v24
+; GISEL-NEXT: v_sub_i32_e64 v14, s[4:5], 64, v24
+; GISEL-NEXT: v_lshl_b64 v[0:1], v[12:13], v24
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[6:7], v24
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: v_lshr_b64 v[14:15], v[12:13], v14
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[12:13], v20
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v24
+; GISEL-NEXT: v_cndmask_b32_e32 v20, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v21, 0, v1, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v14, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v15, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v22, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v23, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v24
+; GISEL-NEXT: v_cndmask_b32_e32 v14, v0, v6, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v15, v1, v7, vcc
+; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GISEL-NEXT: v_mov_b32_e32 v0, s8
+; GISEL-NEXT: v_mov_b32_e32 v1, s9
+; GISEL-NEXT: v_mov_b32_e32 v2, s10
+; GISEL-NEXT: v_mov_b32_e32 v3, s11
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GISEL-NEXT: s_xor_b64 s[8:9], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB2_11
+; GISEL-NEXT: ; %bb.8: ; %udiv-preheader
+; GISEL-NEXT: v_subrev_i32_e32 v24, vcc, 64, v36
+; GISEL-NEXT: v_sub_i32_e32 v22, vcc, 64, v36
+; GISEL-NEXT: v_lshr_b64 v[0:1], v[6:7], v36
+; GISEL-NEXT: v_lshr_b64 v[2:3], v[12:13], v36
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_add_i32_e32 v48, vcc, -1, v35
+; GISEL-NEXT: v_addc_u32_e32 v49, vcc, -1, v34, vcc
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[6:7], v22
+; GISEL-NEXT: v_lshr_b64 v[24:25], v[6:7], v24
+; GISEL-NEXT: v_addc_u32_e32 v50, vcc, -1, v4, vcc
+; GISEL-NEXT: v_addc_u32_e32 v51, vcc, -1, v5, vcc
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_or_b32_e32 v2, v2, v22
+; GISEL-NEXT: v_or_b32_e32 v3, v3, v23
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v36
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v24, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v25, v3, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v26, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v27, 0, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v36
+; GISEL-NEXT: v_cndmask_b32_e32 v24, v2, v12, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v25, v3, v13, vcc
+; GISEL-NEXT: v_mov_b32_e32 v23, 0
+; GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GISEL-NEXT: .LBB2_9: ; %udiv-do-while
+; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[20:21], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v22, 31, v21
+; GISEL-NEXT: v_lshl_b64 v[52:53], v[24:25], 1
+; GISEL-NEXT: v_lshl_b64 v[26:27], v[26:27], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v24, 31, v25
+; GISEL-NEXT: v_lshrrev_b32_e32 v25, 31, v15
+; GISEL-NEXT: v_lshl_b64 v[14:15], v[14:15], 1
+; GISEL-NEXT: v_add_i32_e32 v36, vcc, -1, v36
+; GISEL-NEXT: v_addc_u32_e32 v37, vcc, -1, v37, vcc
+; GISEL-NEXT: v_or_b32_e32 v20, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v21, v1, v3
+; GISEL-NEXT: v_or_b32_e32 v2, v26, v24
+; GISEL-NEXT: v_or_b32_e32 v3, v52, v25
+; GISEL-NEXT: v_or_b32_e32 v14, v14, v22
+; GISEL-NEXT: v_addc_u32_e32 v38, vcc, -1, v38, vcc
+; GISEL-NEXT: v_addc_u32_e32 v39, vcc, -1, v39, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v48, v3
+; GISEL-NEXT: v_subb_u32_e32 v0, vcc, v49, v53, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v36, v38
+; GISEL-NEXT: v_or_b32_e32 v1, v37, v39
+; GISEL-NEXT: v_subb_u32_e32 v22, vcc, v50, v2, vcc
+; GISEL-NEXT: v_subb_u32_e32 v22, vcc, v51, v27, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_ashrrev_i32_e32 v0, 31, v22
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v22, 1, v0
+; GISEL-NEXT: v_and_b32_e32 v1, v0, v35
+; GISEL-NEXT: v_and_b32_e32 v25, v0, v34
+; GISEL-NEXT: v_and_b32_e32 v26, v0, v4
+; GISEL-NEXT: v_and_b32_e32 v52, v0, v5
+; GISEL-NEXT: v_sub_i32_e32 v24, vcc, v3, v1
+; GISEL-NEXT: v_subb_u32_e32 v25, vcc, v53, v25, vcc
+; GISEL-NEXT: v_mov_b32_e32 v0, v22
+; GISEL-NEXT: v_mov_b32_e32 v1, v23
+; GISEL-NEXT: v_subb_u32_e32 v26, vcc, v2, v26, vcc
+; GISEL-NEXT: v_subb_u32_e32 v27, vcc, v27, v52, vcc
+; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execnz .LBB2_9
+; GISEL-NEXT: ; %bb.10: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB2_11: ; %Flow11
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[20:21], 1
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[14:15], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v14, 31, v21
+; GISEL-NEXT: v_or_b32_e32 v2, v2, v14
+; GISEL-NEXT: v_or_b32_e32 v20, v0, v22
+; GISEL-NEXT: v_or_b32_e32 v21, v1, v23
+; GISEL-NEXT: .LBB2_12: ; %Flow12
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v30, v31, 0
+; GISEL-NEXT: v_mad_u64_u32 v[14:15], s[4:5], v30, v18, 0
+; GISEL-NEXT: v_mul_lo_u32 v24, v30, v19
+; GISEL-NEXT: v_mul_lo_u32 v25, v29, v18
+; GISEL-NEXT: v_mad_u64_u32 v[18:19], s[4:5], v35, v20, 0
+; GISEL-NEXT: v_mad_u64_u32 v[22:23], s[4:5], v35, v2, 0
+; GISEL-NEXT: v_mul_lo_u32 v26, v35, v3
+; GISEL-NEXT: v_mul_lo_u32 v27, v34, v2
+; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v29, v32, v[14:15]
+; GISEL-NEXT: v_mad_u64_u32 v[14:15], s[4:5], v34, v21, v[22:23]
+; GISEL-NEXT: v_mov_b32_e32 v22, v19
+; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v10, v31, v[2:3]
+; GISEL-NEXT: v_mad_u64_u32 v[14:15], s[4:5], v4, v20, v[14:15]
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], vcc, v30, v32, v[1:2]
+; GISEL-NEXT: v_mov_b32_e32 v23, v14
+; GISEL-NEXT: v_mad_u64_u32 v[22:23], s[4:5], v35, v21, v[22:23]
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[6:7], v29, v31, v[1:2]
+; GISEL-NEXT: v_addc_u32_e64 v3, s[6:7], v3, v24, s[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[22:23], s[6:7], v34, v20, v[22:23]
+; GISEL-NEXT: v_addc_u32_e64 v14, s[6:7], v15, v26, s[6:7]
+; GISEL-NEXT: v_addc_u32_e32 v3, vcc, v3, v25, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v16, v0
+; GISEL-NEXT: v_subb_u32_e32 v1, vcc, v17, v1, vcc
+; GISEL-NEXT: v_xor_b32_e32 v15, v0, v28
+; GISEL-NEXT: v_addc_u32_e64 v0, s[4:5], v14, v27, s[4:5]
+; GISEL-NEXT: v_sub_i32_e64 v12, s[4:5], v12, v18
+; GISEL-NEXT: v_subb_u32_e64 v14, s[4:5], v13, v22, s[4:5]
+; GISEL-NEXT: v_xor_b32_e32 v16, v12, v33
+; GISEL-NEXT: v_mad_u64_u32 v[12:13], s[6:7], v10, v32, v[3:4]
+; GISEL-NEXT: v_xor_b32_e32 v1, v1, v28
+; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v4, v21, v[0:1]
+; GISEL-NEXT: v_xor_b32_e32 v14, v14, v33
+; GISEL-NEXT: v_mad_u64_u32 v[10:11], s[6:7], v11, v31, v[12:13]
+; GISEL-NEXT: v_sub_i32_e64 v0, s[6:7], v15, v28
+; GISEL-NEXT: v_subb_u32_e64 v1, s[6:7], v1, v28, s[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[8:9], v5, v20, v[3:4]
+; GISEL-NEXT: v_sub_i32_e64 v4, s[8:9], v16, v33
+; GISEL-NEXT: v_subb_u32_e64 v5, s[8:9], v14, v33, s[8:9]
+; GISEL-NEXT: v_subb_u32_e32 v2, vcc, v8, v2, vcc
+; GISEL-NEXT: v_subb_u32_e32 v8, vcc, v9, v10, vcc
+; GISEL-NEXT: v_xor_b32_e32 v2, v2, v28
+; GISEL-NEXT: v_subb_u32_e64 v6, vcc, v6, v23, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v3, vcc, v7, v3, vcc
+; GISEL-NEXT: v_xor_b32_e32 v6, v6, v33
+; GISEL-NEXT: v_xor_b32_e32 v7, v8, v28
+; GISEL-NEXT: v_xor_b32_e32 v8, v3, v33
+; GISEL-NEXT: v_subb_u32_e64 v2, vcc, v2, v28, s[6:7]
+; GISEL-NEXT: v_subb_u32_e32 v3, vcc, v7, v28, vcc
+; GISEL-NEXT: v_subb_u32_e64 v6, vcc, v6, v33, s[8:9]
+; GISEL-NEXT: v_subb_u32_e32 v7, vcc, v8, v33, vcc
+; GISEL-NEXT: s_setpc_b64 s[30:31]
%shl = srem <2 x i128> %lhs, %rhs
ret <2 x i128> %shl
}
define <2 x i128> @v_urem_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
+; SDAG-LABEL: v_urem_v2i128_vv:
+; SDAG: ; %bb.0: ; %_udiv-special-cases_udiv-special-cases
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_or_b32_e32 v17, v9, v11
+; SDAG-NEXT: v_or_b32_e32 v16, v8, v10
+; SDAG-NEXT: v_or_b32_e32 v19, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v18, v0, v2
+; SDAG-NEXT: v_ffbh_u32_e32 v20, v10
+; SDAG-NEXT: v_ffbh_u32_e32 v21, v11
+; SDAG-NEXT: v_ffbh_u32_e32 v22, v8
+; SDAG-NEXT: v_ffbh_u32_e32 v23, v9
+; SDAG-NEXT: v_ffbh_u32_e32 v24, v2
+; SDAG-NEXT: v_ffbh_u32_e32 v25, v3
+; SDAG-NEXT: v_ffbh_u32_e32 v26, v0
+; SDAG-NEXT: v_ffbh_u32_e32 v27, v1
+; SDAG-NEXT: v_mov_b32_e32 v28, 0
+; SDAG-NEXT: s_mov_b64 s[8:9], 0x7f
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[18:19]
+; SDAG-NEXT: v_add_i32_e64 v16, s[6:7], 32, v20
+; SDAG-NEXT: v_add_i32_e64 v17, s[6:7], 32, v22
+; SDAG-NEXT: v_add_i32_e64 v18, s[6:7], 32, v24
+; SDAG-NEXT: v_add_i32_e64 v19, s[6:7], 32, v26
+; SDAG-NEXT: s_or_b64 s[6:7], vcc, s[4:5]
+; SDAG-NEXT: v_min_u32_e32 v16, v16, v21
+; SDAG-NEXT: v_min_u32_e32 v17, v17, v23
+; SDAG-NEXT: v_min_u32_e32 v18, v18, v25
+; SDAG-NEXT: v_min_u32_e32 v19, v19, v27
+; SDAG-NEXT: v_add_i32_e32 v17, vcc, 64, v17
+; SDAG-NEXT: v_addc_u32_e64 v20, s[4:5], 0, 0, vcc
+; SDAG-NEXT: v_add_i32_e32 v19, vcc, 64, v19
+; SDAG-NEXT: v_addc_u32_e64 v21, s[4:5], 0, 0, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, v20, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v16, v17, v16, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v21, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v18, v19, v18, vcc
+; SDAG-NEXT: v_sub_i32_e32 v16, vcc, v16, v18
+; SDAG-NEXT: v_subb_u32_e32 v17, vcc, v20, v17, vcc
+; SDAG-NEXT: v_xor_b32_e32 v18, 0x7f, v16
+; SDAG-NEXT: v_subbrev_u32_e32 v20, vcc, 0, v28, vcc
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[8:9], v[16:17]
+; SDAG-NEXT: v_cndmask_b32_e64 v22, 0, 1, s[4:5]
+; SDAG-NEXT: v_subbrev_u32_e32 v21, vcc, 0, v28, vcc
+; SDAG-NEXT: v_or_b32_e32 v18, v18, v20
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[20:21]
+; SDAG-NEXT: v_cndmask_b32_e64 v23, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v19, v17, v21
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[20:21]
+; SDAG-NEXT: v_cndmask_b32_e32 v22, v23, v22, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[18:19]
+; SDAG-NEXT: v_and_b32_e32 v18, 1, v22
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v18
+; SDAG-NEXT: s_or_b64 s[4:5], s[6:7], s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v33, v3, 0, s[4:5]
+; SDAG-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; SDAG-NEXT: v_cndmask_b32_e64 v31, v2, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v30, v1, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v32, v0, 0, s[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB3_6
+; SDAG-NEXT: ; %bb.1: ; %udiv-bb15
+; SDAG-NEXT: v_add_i32_e32 v30, vcc, 1, v16
+; SDAG-NEXT: v_sub_i32_e64 v22, s[4:5], 63, v16
+; SDAG-NEXT: v_mov_b32_e32 v18, 0
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: v_addc_u32_e32 v31, vcc, 0, v17, vcc
+; SDAG-NEXT: v_lshl_b64 v[22:23], v[0:1], v22
+; SDAG-NEXT: v_addc_u32_e32 v32, vcc, 0, v20, vcc
+; SDAG-NEXT: v_addc_u32_e32 v33, vcc, 0, v21, vcc
+; SDAG-NEXT: v_or_b32_e32 v20, v30, v32
+; SDAG-NEXT: v_sub_i32_e32 v26, vcc, 0x7f, v16
+; SDAG-NEXT: v_or_b32_e32 v21, v31, v33
+; SDAG-NEXT: v_lshl_b64 v[16:17], v[2:3], v26
+; SDAG-NEXT: v_sub_i32_e32 v27, vcc, 64, v26
+; SDAG-NEXT: v_lshl_b64 v[24:25], v[0:1], v26
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[20:21]
+; SDAG-NEXT: v_lshr_b64 v[20:21], v[0:1], v27
+; SDAG-NEXT: v_or_b32_e32 v17, v17, v21
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v20
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v26
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v23, v17, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v22, v16, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v23, 0, v25, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v22, 0, v24, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v26
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v17, v3, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v16, v2, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v20, 0
+; SDAG-NEXT: v_mov_b32_e32 v21, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB3_5
+; SDAG-NEXT: ; %bb.2: ; %udiv-preheader4
+; SDAG-NEXT: v_lshr_b64 v[18:19], v[0:1], v30
+; SDAG-NEXT: v_sub_i32_e32 v28, vcc, 64, v30
+; SDAG-NEXT: v_subrev_i32_e32 v35, vcc, 64, v30
+; SDAG-NEXT: v_lshr_b64 v[26:27], v[2:3], v30
+; SDAG-NEXT: v_add_i32_e32 v34, vcc, -1, v8
+; SDAG-NEXT: s_mov_b64 s[10:11], 0
+; SDAG-NEXT: v_mov_b32_e32 v24, 0
+; SDAG-NEXT: v_mov_b32_e32 v25, 0
+; SDAG-NEXT: v_mov_b32_e32 v20, 0
+; SDAG-NEXT: v_mov_b32_e32 v21, 0
+; SDAG-NEXT: v_lshl_b64 v[28:29], v[2:3], v28
+; SDAG-NEXT: v_lshr_b64 v[37:38], v[2:3], v35
+; SDAG-NEXT: v_addc_u32_e32 v35, vcc, -1, v9, vcc
+; SDAG-NEXT: v_or_b32_e32 v19, v19, v29
+; SDAG-NEXT: v_or_b32_e32 v18, v18, v28
+; SDAG-NEXT: v_addc_u32_e32 v36, vcc, -1, v10, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v30
+; SDAG-NEXT: v_cndmask_b32_e64 v19, v38, v19, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v18, v37, v18, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v29, 0, v27, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v28, 0, v26, s[4:5]
+; SDAG-NEXT: v_addc_u32_e32 v37, vcc, -1, v11, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 0, v30
+; SDAG-NEXT: v_cndmask_b32_e32 v27, v19, v1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v26, v18, v0, vcc
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: .LBB3_3: ; %udiv-do-while3
+; SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; SDAG-NEXT: v_lshl_b64 v[28:29], v[28:29], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v18, 31, v27
+; SDAG-NEXT: v_lshl_b64 v[26:27], v[26:27], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v38, 31, v17
+; SDAG-NEXT: v_lshl_b64 v[16:17], v[16:17], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v39, 31, v23
+; SDAG-NEXT: v_lshl_b64 v[22:23], v[22:23], 1
+; SDAG-NEXT: v_or_b32_e32 v28, v28, v18
+; SDAG-NEXT: v_or_b32_e32 v26, v26, v38
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v39
+; SDAG-NEXT: v_or_b32_e32 v17, v21, v17
+; SDAG-NEXT: v_sub_i32_e32 v18, vcc, v34, v26
+; SDAG-NEXT: v_or_b32_e32 v16, v20, v16
+; SDAG-NEXT: v_subb_u32_e32 v18, vcc, v35, v27, vcc
+; SDAG-NEXT: v_subb_u32_e32 v18, vcc, v36, v28, vcc
+; SDAG-NEXT: v_subb_u32_e32 v18, vcc, v37, v29, vcc
+; SDAG-NEXT: v_ashrrev_i32_e32 v38, 31, v18
+; SDAG-NEXT: v_and_b32_e32 v39, v38, v8
+; SDAG-NEXT: v_and_b32_e32 v48, v38, v9
+; SDAG-NEXT: v_and_b32_e32 v49, v38, v10
+; SDAG-NEXT: v_and_b32_e32 v18, 1, v38
+; SDAG-NEXT: v_and_b32_e32 v38, v38, v11
+; SDAG-NEXT: v_sub_i32_e32 v26, vcc, v26, v39
+; SDAG-NEXT: v_subb_u32_e32 v27, vcc, v27, v48, vcc
+; SDAG-NEXT: v_subb_u32_e32 v28, vcc, v28, v49, vcc
+; SDAG-NEXT: v_subb_u32_e32 v29, vcc, v29, v38, vcc
+; SDAG-NEXT: v_add_i32_e32 v30, vcc, -1, v30
+; SDAG-NEXT: v_addc_u32_e32 v31, vcc, -1, v31, vcc
+; SDAG-NEXT: v_addc_u32_e32 v32, vcc, -1, v32, vcc
+; SDAG-NEXT: v_addc_u32_e32 v33, vcc, -1, v33, vcc
+; SDAG-NEXT: v_or_b32_e32 v38, v30, v32
+; SDAG-NEXT: v_or_b32_e32 v39, v31, v33
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[38:39]
+; SDAG-NEXT: v_or_b32_e32 v23, v25, v23
+; SDAG-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; SDAG-NEXT: v_or_b32_e32 v22, v24, v22
+; SDAG-NEXT: v_mov_b32_e32 v25, v19
+; SDAG-NEXT: v_mov_b32_e32 v24, v18
+; SDAG-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; SDAG-NEXT: s_cbranch_execnz .LBB3_3
+; SDAG-NEXT: ; %bb.4: ; %Flow13
+; SDAG-NEXT: s_or_b64 exec, exec, s[10:11]
+; SDAG-NEXT: .LBB3_5: ; %Flow14
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_lshl_b64 v[16:17], v[16:17], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v24, 31, v23
+; SDAG-NEXT: v_lshl_b64 v[22:23], v[22:23], 1
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v24
+; SDAG-NEXT: v_or_b32_e32 v33, v21, v17
+; SDAG-NEXT: v_or_b32_e32 v30, v19, v23
+; SDAG-NEXT: v_or_b32_e32 v31, v20, v16
+; SDAG-NEXT: v_or_b32_e32 v32, v18, v22
+; SDAG-NEXT: .LBB3_6: ; %Flow16
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_or_b32_e32 v17, v13, v15
+; SDAG-NEXT: v_or_b32_e32 v16, v12, v14
+; SDAG-NEXT: v_or_b32_e32 v19, v5, v7
+; SDAG-NEXT: v_or_b32_e32 v18, v4, v6
+; SDAG-NEXT: v_ffbh_u32_e32 v20, v14
+; SDAG-NEXT: v_ffbh_u32_e32 v21, v15
+; SDAG-NEXT: v_ffbh_u32_e32 v22, v12
+; SDAG-NEXT: v_ffbh_u32_e32 v23, v13
+; SDAG-NEXT: v_ffbh_u32_e32 v24, v6
+; SDAG-NEXT: v_ffbh_u32_e32 v25, v7
+; SDAG-NEXT: v_ffbh_u32_e32 v26, v4
+; SDAG-NEXT: v_ffbh_u32_e32 v27, v5
+; SDAG-NEXT: v_mov_b32_e32 v28, 0
+; SDAG-NEXT: s_mov_b64 s[8:9], 0x7f
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[18:19]
+; SDAG-NEXT: v_add_i32_e64 v16, s[6:7], 32, v20
+; SDAG-NEXT: v_add_i32_e64 v17, s[6:7], 32, v22
+; SDAG-NEXT: v_add_i32_e64 v18, s[6:7], 32, v24
+; SDAG-NEXT: v_add_i32_e64 v19, s[6:7], 32, v26
+; SDAG-NEXT: s_or_b64 s[6:7], vcc, s[4:5]
+; SDAG-NEXT: v_min_u32_e32 v16, v16, v21
+; SDAG-NEXT: v_min_u32_e32 v17, v17, v23
+; SDAG-NEXT: v_min_u32_e32 v18, v18, v25
+; SDAG-NEXT: v_min_u32_e32 v19, v19, v27
+; SDAG-NEXT: v_add_i32_e32 v17, vcc, 64, v17
+; SDAG-NEXT: v_addc_u32_e64 v20, s[4:5], 0, 0, vcc
+; SDAG-NEXT: v_add_i32_e32 v19, vcc, 64, v19
+; SDAG-NEXT: v_addc_u32_e64 v21, s[4:5], 0, 0, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[14:15]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, v20, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v16, v17, v16, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v21, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v18, v19, v18, vcc
+; SDAG-NEXT: v_sub_i32_e32 v16, vcc, v16, v18
+; SDAG-NEXT: v_subb_u32_e32 v17, vcc, v20, v17, vcc
+; SDAG-NEXT: v_xor_b32_e32 v20, 0x7f, v16
+; SDAG-NEXT: v_subbrev_u32_e32 v18, vcc, 0, v28, vcc
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[8:9], v[16:17]
+; SDAG-NEXT: v_cndmask_b32_e64 v22, 0, 1, s[4:5]
+; SDAG-NEXT: v_subbrev_u32_e32 v19, vcc, 0, v28, vcc
+; SDAG-NEXT: v_or_b32_e32 v20, v20, v18
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[18:19]
+; SDAG-NEXT: v_cndmask_b32_e64 v23, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v21, v17, v19
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
+; SDAG-NEXT: v_cndmask_b32_e32 v22, v23, v22, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[20:21]
+; SDAG-NEXT: v_and_b32_e32 v20, 1, v22
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v20
+; SDAG-NEXT: s_or_b64 s[4:5], s[6:7], s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v23, v7, 0, s[4:5]
+; SDAG-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; SDAG-NEXT: v_cndmask_b32_e64 v22, v6, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v21, v5, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, v4, 0, s[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB3_12
+; SDAG-NEXT: ; %bb.7: ; %udiv-bb1
+; SDAG-NEXT: v_add_i32_e32 v34, vcc, 1, v16
+; SDAG-NEXT: v_sub_i32_e64 v22, s[4:5], 63, v16
+; SDAG-NEXT: v_mov_b32_e32 v20, 0
+; SDAG-NEXT: v_mov_b32_e32 v21, 0
+; SDAG-NEXT: v_addc_u32_e32 v35, vcc, 0, v17, vcc
+; SDAG-NEXT: v_lshl_b64 v[22:23], v[4:5], v22
+; SDAG-NEXT: v_addc_u32_e32 v36, vcc, 0, v18, vcc
+; SDAG-NEXT: v_addc_u32_e32 v37, vcc, 0, v19, vcc
+; SDAG-NEXT: v_or_b32_e32 v17, v34, v36
+; SDAG-NEXT: v_sub_i32_e32 v19, vcc, 0x7f, v16
+; SDAG-NEXT: v_or_b32_e32 v18, v35, v37
+; SDAG-NEXT: v_lshl_b64 v[24:25], v[6:7], v19
+; SDAG-NEXT: v_sub_i32_e32 v16, vcc, 64, v19
+; SDAG-NEXT: v_lshl_b64 v[26:27], v[4:5], v19
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[17:18]
+; SDAG-NEXT: v_lshr_b64 v[16:17], v[4:5], v16
+; SDAG-NEXT: v_or_b32_e32 v17, v25, v17
+; SDAG-NEXT: v_or_b32_e32 v16, v24, v16
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v19
+; SDAG-NEXT: v_cndmask_b32_e64 v18, v23, v17, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v22, v22, v16, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v17, 0, v27, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, 0, v26, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v19
+; SDAG-NEXT: v_cndmask_b32_e64 v19, v18, v7, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v18, v22, v6, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v22, 0
+; SDAG-NEXT: v_mov_b32_e32 v23, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB3_11
+; SDAG-NEXT: ; %bb.8: ; %udiv-preheader
+; SDAG-NEXT: v_lshr_b64 v[20:21], v[4:5], v34
+; SDAG-NEXT: v_sub_i32_e32 v28, vcc, 64, v34
+; SDAG-NEXT: v_subrev_i32_e32 v39, vcc, 64, v34
+; SDAG-NEXT: v_lshr_b64 v[26:27], v[6:7], v34
+; SDAG-NEXT: v_add_i32_e32 v38, vcc, -1, v12
+; SDAG-NEXT: s_mov_b64 s[10:11], 0
+; SDAG-NEXT: v_mov_b32_e32 v24, 0
+; SDAG-NEXT: v_mov_b32_e32 v25, 0
+; SDAG-NEXT: v_mov_b32_e32 v22, 0
+; SDAG-NEXT: v_mov_b32_e32 v23, 0
+; SDAG-NEXT: v_lshl_b64 v[28:29], v[6:7], v28
+; SDAG-NEXT: v_lshr_b64 v[49:50], v[6:7], v39
+; SDAG-NEXT: v_addc_u32_e32 v39, vcc, -1, v13, vcc
+; SDAG-NEXT: v_or_b32_e32 v21, v21, v29
+; SDAG-NEXT: v_or_b32_e32 v20, v20, v28
+; SDAG-NEXT: v_addc_u32_e32 v48, vcc, -1, v14, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v34
+; SDAG-NEXT: v_cndmask_b32_e64 v21, v50, v21, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, v49, v20, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v29, 0, v27, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v28, 0, v26, s[4:5]
+; SDAG-NEXT: v_addc_u32_e32 v49, vcc, -1, v15, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 0, v34
+; SDAG-NEXT: v_cndmask_b32_e32 v27, v21, v5, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v26, v20, v4, vcc
+; SDAG-NEXT: v_mov_b32_e32 v21, 0
+; SDAG-NEXT: .LBB3_9: ; %udiv-do-while
+; SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; SDAG-NEXT: v_lshl_b64 v[28:29], v[28:29], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v20, 31, v27
+; SDAG-NEXT: v_lshl_b64 v[26:27], v[26:27], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v50, 31, v19
+; SDAG-NEXT: v_lshl_b64 v[18:19], v[18:19], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v51, 31, v17
+; SDAG-NEXT: v_lshl_b64 v[16:17], v[16:17], 1
+; SDAG-NEXT: v_or_b32_e32 v28, v28, v20
+; SDAG-NEXT: v_or_b32_e32 v26, v26, v50
+; SDAG-NEXT: v_or_b32_e32 v18, v18, v51
+; SDAG-NEXT: v_or_b32_e32 v19, v23, v19
+; SDAG-NEXT: v_or_b32_e32 v17, v25, v17
+; SDAG-NEXT: v_or_b32_e32 v18, v22, v18
+; SDAG-NEXT: v_sub_i32_e32 v20, vcc, v38, v26
+; SDAG-NEXT: v_subb_u32_e32 v20, vcc, v39, v27, vcc
+; SDAG-NEXT: v_subb_u32_e32 v20, vcc, v48, v28, vcc
+; SDAG-NEXT: v_subb_u32_e32 v20, vcc, v49, v29, vcc
+; SDAG-NEXT: v_ashrrev_i32_e32 v25, 31, v20
+; SDAG-NEXT: v_and_b32_e32 v20, 1, v25
+; SDAG-NEXT: v_and_b32_e32 v50, v25, v15
+; SDAG-NEXT: v_and_b32_e32 v51, v25, v14
+; SDAG-NEXT: v_and_b32_e32 v52, v25, v13
+; SDAG-NEXT: v_and_b32_e32 v25, v25, v12
+; SDAG-NEXT: v_sub_i32_e32 v26, vcc, v26, v25
+; SDAG-NEXT: v_subb_u32_e32 v27, vcc, v27, v52, vcc
+; SDAG-NEXT: v_subb_u32_e32 v28, vcc, v28, v51, vcc
+; SDAG-NEXT: v_subb_u32_e32 v29, vcc, v29, v50, vcc
+; SDAG-NEXT: v_add_i32_e32 v34, vcc, -1, v34
+; SDAG-NEXT: v_addc_u32_e32 v35, vcc, -1, v35, vcc
+; SDAG-NEXT: v_addc_u32_e32 v36, vcc, -1, v36, vcc
+; SDAG-NEXT: v_addc_u32_e32 v37, vcc, -1, v37, vcc
+; SDAG-NEXT: v_or_b32_e32 v51, v35, v37
+; SDAG-NEXT: v_or_b32_e32 v50, v34, v36
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[50:51]
+; SDAG-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; SDAG-NEXT: v_or_b32_e32 v16, v24, v16
+; SDAG-NEXT: v_mov_b32_e32 v25, v21
+; SDAG-NEXT: v_mov_b32_e32 v24, v20
+; SDAG-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; SDAG-NEXT: s_cbranch_execnz .LBB3_9
+; SDAG-NEXT: ; %bb.10: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[10:11]
+; SDAG-NEXT: .LBB3_11: ; %Flow11
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_lshl_b64 v[18:19], v[18:19], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v24, 31, v17
+; SDAG-NEXT: v_lshl_b64 v[16:17], v[16:17], 1
+; SDAG-NEXT: v_or_b32_e32 v18, v18, v24
+; SDAG-NEXT: v_or_b32_e32 v23, v23, v19
+; SDAG-NEXT: v_or_b32_e32 v21, v21, v17
+; SDAG-NEXT: v_or_b32_e32 v22, v22, v18
+; SDAG-NEXT: v_or_b32_e32 v20, v20, v16
+; SDAG-NEXT: .LBB3_12: ; %Flow12
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_mul_lo_u32 v18, v32, v11
+; SDAG-NEXT: v_mad_u64_u32 v[16:17], s[4:5], v32, v10, 0
+; SDAG-NEXT: v_mul_lo_u32 v28, v30, v10
+; SDAG-NEXT: v_mul_lo_u32 v29, v33, v8
+; SDAG-NEXT: v_mul_lo_u32 v33, v31, v9
+; SDAG-NEXT: v_mad_u64_u32 v[10:11], s[4:5], v8, v32, 0
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: v_mul_lo_u32 v34, v20, v15
+; SDAG-NEXT: v_mad_u64_u32 v[24:25], s[4:5], v20, v14, 0
+; SDAG-NEXT: v_mul_lo_u32 v35, v21, v14
+; SDAG-NEXT: v_mul_lo_u32 v23, v23, v12
+; SDAG-NEXT: v_mul_lo_u32 v36, v22, v13
+; SDAG-NEXT: v_mad_u64_u32 v[14:15], s[4:5], v12, v20, 0
+; SDAG-NEXT: v_add_i32_e32 v17, vcc, v17, v18
+; SDAG-NEXT: v_mov_b32_e32 v18, v11
+; SDAG-NEXT: v_mad_u64_u32 v[26:27], s[4:5], v9, v32, v[18:19]
+; SDAG-NEXT: v_sub_i32_e32 v0, vcc, v0, v10
+; SDAG-NEXT: v_add_i32_e64 v18, s[4:5], v25, v34
+; SDAG-NEXT: v_add_i32_e64 v17, s[4:5], v17, v28
+; SDAG-NEXT: v_mov_b32_e32 v28, v27
+; SDAG-NEXT: v_mov_b32_e32 v27, v19
+; SDAG-NEXT: v_mad_u64_u32 v[10:11], s[4:5], v8, v30, v[26:27]
+; SDAG-NEXT: v_add_i32_e64 v25, s[4:5], v18, v35
+; SDAG-NEXT: v_mov_b32_e32 v18, v15
+; SDAG-NEXT: v_mad_u64_u32 v[26:27], s[4:5], v13, v20, v[18:19]
+; SDAG-NEXT: v_mad_u64_u32 v[15:16], s[4:5], v31, v8, v[16:17]
+; SDAG-NEXT: v_mov_b32_e32 v8, v11
+; SDAG-NEXT: v_add_i32_e64 v17, s[4:5], v28, v8
+; SDAG-NEXT: v_addc_u32_e64 v18, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v8, v10
+; SDAG-NEXT: v_subb_u32_e32 v1, vcc, v1, v8, vcc
+; SDAG-NEXT: v_mad_u64_u32 v[10:11], s[4:5], v22, v12, v[24:25]
+; SDAG-NEXT: v_mov_b32_e32 v22, v27
+; SDAG-NEXT: v_mov_b32_e32 v27, v19
+; SDAG-NEXT: v_mad_u64_u32 v[19:20], s[4:5], v12, v21, v[26:27]
+; SDAG-NEXT: v_add_i32_e64 v16, s[4:5], v29, v16
+; SDAG-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v9, v30, v[17:18]
+; SDAG-NEXT: v_add_i32_e64 v17, s[4:5], v23, v11
+; SDAG-NEXT: v_mov_b32_e32 v11, v20
+; SDAG-NEXT: v_add_i32_e64 v11, s[4:5], v22, v11
+; SDAG-NEXT: v_addc_u32_e64 v12, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_add_i32_e64 v16, s[4:5], v33, v16
+; SDAG-NEXT: v_add_i32_e64 v17, s[4:5], v36, v17
+; SDAG-NEXT: v_mad_u64_u32 v[11:12], s[4:5], v13, v21, v[11:12]
+; SDAG-NEXT: v_add_i32_e64 v8, s[4:5], v8, v15
+; SDAG-NEXT: v_addc_u32_e64 v9, s[4:5], v9, v16, s[4:5]
+; SDAG-NEXT: v_subb_u32_e32 v2, vcc, v2, v8, vcc
+; SDAG-NEXT: v_subb_u32_e32 v3, vcc, v3, v9, vcc
+; SDAG-NEXT: v_add_i32_e32 v8, vcc, v11, v10
+; SDAG-NEXT: v_addc_u32_e32 v9, vcc, v12, v17, vcc
+; SDAG-NEXT: v_mov_b32_e32 v10, v19
+; SDAG-NEXT: v_sub_i32_e32 v4, vcc, v4, v14
+; SDAG-NEXT: v_subb_u32_e32 v5, vcc, v5, v10, vcc
+; SDAG-NEXT: v_subb_u32_e32 v6, vcc, v6, v8, vcc
+; SDAG-NEXT: v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: v_urem_v2i128_vv:
+; GISEL: ; %bb.0: ; %_udiv-special-cases_udiv-special-cases
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: s_mov_b64 s[8:9], 0
+; GISEL-NEXT: v_or_b32_e32 v16, v8, v10
+; GISEL-NEXT: v_or_b32_e32 v17, v9, v11
+; GISEL-NEXT: v_or_b32_e32 v18, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v19, v1, v3
+; GISEL-NEXT: v_ffbh_u32_e32 v22, v9
+; GISEL-NEXT: v_ffbh_u32_e32 v23, v8
+; GISEL-NEXT: v_ffbh_u32_e32 v24, v11
+; GISEL-NEXT: v_ffbh_u32_e32 v25, v10
+; GISEL-NEXT: v_ffbh_u32_e32 v26, v1
+; GISEL-NEXT: v_ffbh_u32_e32 v27, v0
+; GISEL-NEXT: v_ffbh_u32_e32 v28, v3
+; GISEL-NEXT: v_ffbh_u32_e32 v29, v2
+; GISEL-NEXT: v_mov_b32_e32 v20, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v21, 0
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[18:19]
+; GISEL-NEXT: v_add_i32_e64 v16, s[6:7], 32, v23
+; GISEL-NEXT: v_add_i32_e64 v17, s[6:7], 32, v25
+; GISEL-NEXT: v_add_i32_e64 v18, s[6:7], 32, v27
+; GISEL-NEXT: v_add_i32_e64 v19, s[6:7], 32, v29
+; GISEL-NEXT: v_min_u32_e32 v16, v22, v16
+; GISEL-NEXT: v_min_u32_e32 v17, v24, v17
+; GISEL-NEXT: v_min_u32_e32 v18, v26, v18
+; GISEL-NEXT: v_min_u32_e32 v19, v28, v19
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v22, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e32 v16, vcc, 64, v16
+; GISEL-NEXT: v_add_i32_e32 v18, vcc, 64, v18
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GISEL-NEXT: v_cndmask_b32_e32 v16, v17, v16, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e32 v17, v19, v18, vcc
+; GISEL-NEXT: v_sub_i32_e32 v16, vcc, v16, v17
+; GISEL-NEXT: v_subb_u32_e64 v17, s[4:5], 0, 0, vcc
+; GISEL-NEXT: v_subb_u32_e64 v18, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e64 v19, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[16:17], v[20:21]
+; GISEL-NEXT: v_cndmask_b32_e64 v23, 0, 1, vcc
+; GISEL-NEXT: v_xor_b32_e32 v20, 0x7f, v16
+; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[18:19]
+; GISEL-NEXT: v_cndmask_b32_e64 v24, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v20, v20, v18
+; GISEL-NEXT: v_or_b32_e32 v21, v17, v19
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
+; GISEL-NEXT: v_cndmask_b32_e32 v23, v24, v23, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[20:21]
+; GISEL-NEXT: v_cndmask_b32_e64 v20, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v21, v22, v23
+; GISEL-NEXT: v_and_b32_e32 v22, 1, v21
+; GISEL-NEXT: v_or_b32_e32 v20, v21, v20
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v22
+; GISEL-NEXT: v_cndmask_b32_e64 v32, v0, 0, vcc
+; GISEL-NEXT: v_and_b32_e32 v22, 1, v20
+; GISEL-NEXT: v_cndmask_b32_e64 v33, v1, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v20, v2, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v21, v3, 0, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v22
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB3_6
+; GISEL-NEXT: ; %bb.1: ; %udiv-bb15
+; GISEL-NEXT: v_add_i32_e32 v30, vcc, 1, v16
+; GISEL-NEXT: v_addc_u32_e64 v31, s[4:5], 0, v17, vcc
+; GISEL-NEXT: v_sub_i32_e32 v26, vcc, 0x7f, v16
+; GISEL-NEXT: v_addc_u32_e64 v32, vcc, 0, v18, s[4:5]
+; GISEL-NEXT: v_addc_u32_e32 v33, vcc, 0, v19, vcc
+; GISEL-NEXT: v_subrev_i32_e64 v22, s[4:5], 64, v26
+; GISEL-NEXT: v_sub_i32_e64 v20, s[4:5], 64, v26
+; GISEL-NEXT: v_lshl_b64 v[16:17], v[0:1], v26
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[2:3], v26
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: v_lshr_b64 v[20:21], v[0:1], v20
+; GISEL-NEXT: v_lshl_b64 v[24:25], v[0:1], v22
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v26
+; GISEL-NEXT: v_cndmask_b32_e32 v22, 0, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v23, 0, v17, vcc
+; GISEL-NEXT: v_or_b32_e32 v16, v20, v18
+; GISEL-NEXT: v_or_b32_e32 v17, v21, v19
+; GISEL-NEXT: v_cndmask_b32_e32 v16, v24, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v17, v25, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v26
+; GISEL-NEXT: v_cndmask_b32_e32 v20, v16, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v21, v17, v3, vcc
+; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GISEL-NEXT: v_mov_b32_e32 v19, s11
+; GISEL-NEXT: v_mov_b32_e32 v18, s10
+; GISEL-NEXT: v_mov_b32_e32 v17, s9
+; GISEL-NEXT: v_mov_b32_e32 v16, s8
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GISEL-NEXT: s_xor_b64 s[8:9], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB3_5
+; GISEL-NEXT: ; %bb.2: ; %udiv-preheader4
+; GISEL-NEXT: v_subrev_i32_e32 v26, vcc, 64, v30
+; GISEL-NEXT: v_sub_i32_e32 v24, vcc, 64, v30
+; GISEL-NEXT: v_lshr_b64 v[16:17], v[2:3], v30
+; GISEL-NEXT: v_lshr_b64 v[18:19], v[0:1], v30
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_add_i32_e32 v34, vcc, -1, v8
+; GISEL-NEXT: v_addc_u32_e32 v35, vcc, -1, v9, vcc
+; GISEL-NEXT: v_lshl_b64 v[24:25], v[2:3], v24
+; GISEL-NEXT: v_lshr_b64 v[26:27], v[2:3], v26
+; GISEL-NEXT: v_addc_u32_e32 v36, vcc, -1, v10, vcc
+; GISEL-NEXT: v_addc_u32_e32 v37, vcc, -1, v11, vcc
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_or_b32_e32 v18, v18, v24
+; GISEL-NEXT: v_or_b32_e32 v19, v19, v25
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v30
+; GISEL-NEXT: v_cndmask_b32_e32 v18, v26, v18, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v19, v27, v19, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v28, 0, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v29, 0, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v30
+; GISEL-NEXT: v_cndmask_b32_e32 v26, v18, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v27, v19, v1, vcc
+; GISEL-NEXT: v_mov_b32_e32 v25, 0
+; GISEL-NEXT: v_mov_b32_e32 v19, s7
+; GISEL-NEXT: v_mov_b32_e32 v18, s6
+; GISEL-NEXT: v_mov_b32_e32 v17, s5
+; GISEL-NEXT: v_mov_b32_e32 v16, s4
+; GISEL-NEXT: .LBB3_3: ; %udiv-do-while3
+; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[22:23], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v24, 31, v23
+; GISEL-NEXT: v_lshl_b64 v[38:39], v[26:27], 1
+; GISEL-NEXT: v_lshl_b64 v[28:29], v[28:29], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v26, 31, v27
+; GISEL-NEXT: v_lshrrev_b32_e32 v27, 31, v21
+; GISEL-NEXT: v_lshl_b64 v[20:21], v[20:21], 1
+; GISEL-NEXT: v_add_i32_e32 v30, vcc, -1, v30
+; GISEL-NEXT: v_addc_u32_e32 v31, vcc, -1, v31, vcc
+; GISEL-NEXT: v_or_b32_e32 v22, v16, v18
+; GISEL-NEXT: v_or_b32_e32 v23, v17, v19
+; GISEL-NEXT: v_or_b32_e32 v18, v28, v26
+; GISEL-NEXT: v_or_b32_e32 v19, v38, v27
+; GISEL-NEXT: v_or_b32_e32 v20, v20, v24
+; GISEL-NEXT: v_addc_u32_e32 v32, vcc, -1, v32, vcc
+; GISEL-NEXT: v_addc_u32_e32 v33, vcc, -1, v33, vcc
+; GISEL-NEXT: v_sub_i32_e32 v16, vcc, v34, v19
+; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v35, v39, vcc
+; GISEL-NEXT: v_or_b32_e32 v16, v30, v32
+; GISEL-NEXT: v_or_b32_e32 v17, v31, v33
+; GISEL-NEXT: v_subb_u32_e32 v24, vcc, v36, v18, vcc
+; GISEL-NEXT: v_subb_u32_e32 v24, vcc, v37, v29, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; GISEL-NEXT: v_ashrrev_i32_e32 v16, 31, v24
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v24, 1, v16
+; GISEL-NEXT: v_and_b32_e32 v17, v16, v8
+; GISEL-NEXT: v_and_b32_e32 v27, v16, v9
+; GISEL-NEXT: v_and_b32_e32 v28, v16, v10
+; GISEL-NEXT: v_and_b32_e32 v16, v16, v11
+; GISEL-NEXT: v_sub_i32_e32 v26, vcc, v19, v17
+; GISEL-NEXT: v_subb_u32_e32 v27, vcc, v39, v27, vcc
+; GISEL-NEXT: v_subb_u32_e32 v28, vcc, v18, v28, vcc
+; GISEL-NEXT: v_subb_u32_e32 v29, vcc, v29, v16, vcc
+; GISEL-NEXT: v_mov_b32_e32 v16, v24
+; GISEL-NEXT: v_mov_b32_e32 v17, v25
+; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execnz .LBB3_3
+; GISEL-NEXT: ; %bb.4: ; %Flow13
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB3_5: ; %Flow14
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[22:23], 1
+; GISEL-NEXT: v_lshl_b64 v[20:21], v[20:21], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v22, 31, v23
+; GISEL-NEXT: v_or_b32_e32 v20, v20, v22
+; GISEL-NEXT: v_or_b32_e32 v32, v16, v18
+; GISEL-NEXT: v_or_b32_e32 v33, v17, v19
+; GISEL-NEXT: .LBB3_6: ; %Flow16
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: s_mov_b64 s[8:9], 0
+; GISEL-NEXT: v_or_b32_e32 v16, v12, v14
+; GISEL-NEXT: v_or_b32_e32 v17, v13, v15
+; GISEL-NEXT: v_or_b32_e32 v18, v4, v6
+; GISEL-NEXT: v_or_b32_e32 v19, v5, v7
+; GISEL-NEXT: v_ffbh_u32_e32 v22, v13
+; GISEL-NEXT: v_ffbh_u32_e32 v23, v12
+; GISEL-NEXT: v_ffbh_u32_e32 v26, v15
+; GISEL-NEXT: v_ffbh_u32_e32 v27, v14
+; GISEL-NEXT: v_ffbh_u32_e32 v28, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v29, v4
+; GISEL-NEXT: v_ffbh_u32_e32 v30, v7
+; GISEL-NEXT: v_ffbh_u32_e32 v31, v6
+; GISEL-NEXT: v_mov_b32_e32 v24, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v25, 0
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[18:19]
+; GISEL-NEXT: v_add_i32_e64 v16, s[6:7], 32, v23
+; GISEL-NEXT: v_add_i32_e64 v17, s[6:7], 32, v27
+; GISEL-NEXT: v_add_i32_e64 v18, s[6:7], 32, v29
+; GISEL-NEXT: v_add_i32_e64 v19, s[6:7], 32, v31
+; GISEL-NEXT: v_min_u32_e32 v16, v22, v16
+; GISEL-NEXT: v_min_u32_e32 v17, v26, v17
+; GISEL-NEXT: v_min_u32_e32 v18, v28, v18
+; GISEL-NEXT: v_min_u32_e32 v19, v30, v19
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v26, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e32 v16, vcc, 64, v16
+; GISEL-NEXT: v_add_i32_e32 v18, vcc, 64, v18
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[14:15]
+; GISEL-NEXT: v_cndmask_b32_e32 v16, v17, v16, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GISEL-NEXT: v_cndmask_b32_e32 v17, v19, v18, vcc
+; GISEL-NEXT: v_sub_i32_e32 v16, vcc, v16, v17
+; GISEL-NEXT: v_subb_u32_e64 v17, s[4:5], 0, 0, vcc
+; GISEL-NEXT: v_subb_u32_e64 v22, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e64 v23, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[16:17], v[24:25]
+; GISEL-NEXT: v_cndmask_b32_e64 v24, 0, 1, vcc
+; GISEL-NEXT: v_xor_b32_e32 v18, 0x7f, v16
+; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[22:23]
+; GISEL-NEXT: v_cndmask_b32_e64 v25, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v18, v18, v22
+; GISEL-NEXT: v_or_b32_e32 v19, v17, v23
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[22:23]
+; GISEL-NEXT: v_cndmask_b32_e32 v24, v25, v24, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
+; GISEL-NEXT: v_cndmask_b32_e64 v18, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v19, v26, v24
+; GISEL-NEXT: v_and_b32_e32 v24, 1, v19
+; GISEL-NEXT: v_or_b32_e32 v18, v19, v18
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v24
+; GISEL-NEXT: v_cndmask_b32_e64 v24, v4, 0, vcc
+; GISEL-NEXT: v_and_b32_e32 v26, 1, v18
+; GISEL-NEXT: v_cndmask_b32_e64 v25, v5, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v18, v6, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v19, v7, 0, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB3_12
+; GISEL-NEXT: ; %bb.7: ; %udiv-bb1
+; GISEL-NEXT: v_add_i32_e32 v34, vcc, 1, v16
+; GISEL-NEXT: v_addc_u32_e64 v35, s[4:5], 0, v17, vcc
+; GISEL-NEXT: v_sub_i32_e32 v28, vcc, 0x7f, v16
+; GISEL-NEXT: v_addc_u32_e64 v36, vcc, 0, v22, s[4:5]
+; GISEL-NEXT: v_addc_u32_e32 v37, vcc, 0, v23, vcc
+; GISEL-NEXT: v_subrev_i32_e64 v24, s[4:5], 64, v28
+; GISEL-NEXT: v_sub_i32_e64 v22, s[4:5], 64, v28
+; GISEL-NEXT: v_lshl_b64 v[16:17], v[4:5], v28
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[6:7], v28
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: v_lshr_b64 v[22:23], v[4:5], v22
+; GISEL-NEXT: v_lshl_b64 v[26:27], v[4:5], v24
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v28
+; GISEL-NEXT: v_cndmask_b32_e32 v24, 0, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v25, 0, v17, vcc
+; GISEL-NEXT: v_or_b32_e32 v16, v22, v18
+; GISEL-NEXT: v_or_b32_e32 v17, v23, v19
+; GISEL-NEXT: v_cndmask_b32_e32 v16, v26, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v17, v27, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v28
+; GISEL-NEXT: v_cndmask_b32_e32 v22, v16, v6, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v23, v17, v7, vcc
+; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GISEL-NEXT: v_mov_b32_e32 v19, s11
+; GISEL-NEXT: v_mov_b32_e32 v18, s10
+; GISEL-NEXT: v_mov_b32_e32 v17, s9
+; GISEL-NEXT: v_mov_b32_e32 v16, s8
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GISEL-NEXT: s_xor_b64 s[8:9], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB3_11
+; GISEL-NEXT: ; %bb.8: ; %udiv-preheader
+; GISEL-NEXT: v_subrev_i32_e32 v28, vcc, 64, v34
+; GISEL-NEXT: v_sub_i32_e32 v26, vcc, 64, v34
+; GISEL-NEXT: v_lshr_b64 v[16:17], v[6:7], v34
+; GISEL-NEXT: v_lshr_b64 v[18:19], v[4:5], v34
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_add_i32_e32 v38, vcc, -1, v12
+; GISEL-NEXT: v_addc_u32_e32 v39, vcc, -1, v13, vcc
+; GISEL-NEXT: v_lshl_b64 v[26:27], v[6:7], v26
+; GISEL-NEXT: v_lshr_b64 v[28:29], v[6:7], v28
+; GISEL-NEXT: v_addc_u32_e32 v48, vcc, -1, v14, vcc
+; GISEL-NEXT: v_addc_u32_e32 v49, vcc, -1, v15, vcc
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_or_b32_e32 v18, v18, v26
+; GISEL-NEXT: v_or_b32_e32 v19, v19, v27
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v34
+; GISEL-NEXT: v_cndmask_b32_e32 v18, v28, v18, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v19, v29, v19, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v30, 0, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v31, 0, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v34
+; GISEL-NEXT: v_cndmask_b32_e32 v28, v18, v4, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v29, v19, v5, vcc
+; GISEL-NEXT: v_mov_b32_e32 v27, 0
+; GISEL-NEXT: v_mov_b32_e32 v19, s7
+; GISEL-NEXT: v_mov_b32_e32 v18, s6
+; GISEL-NEXT: v_mov_b32_e32 v17, s5
+; GISEL-NEXT: v_mov_b32_e32 v16, s4
+; GISEL-NEXT: .LBB3_9: ; %udiv-do-while
+; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[24:25], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v26, 31, v25
+; GISEL-NEXT: v_lshl_b64 v[50:51], v[28:29], 1
+; GISEL-NEXT: v_lshl_b64 v[30:31], v[30:31], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v28, 31, v29
+; GISEL-NEXT: v_lshrrev_b32_e32 v29, 31, v23
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[22:23], 1
+; GISEL-NEXT: v_add_i32_e32 v34, vcc, -1, v34
+; GISEL-NEXT: v_addc_u32_e32 v35, vcc, -1, v35, vcc
+; GISEL-NEXT: v_or_b32_e32 v24, v16, v18
+; GISEL-NEXT: v_or_b32_e32 v25, v17, v19
+; GISEL-NEXT: v_or_b32_e32 v18, v30, v28
+; GISEL-NEXT: v_or_b32_e32 v19, v50, v29
+; GISEL-NEXT: v_or_b32_e32 v22, v22, v26
+; GISEL-NEXT: v_addc_u32_e32 v36, vcc, -1, v36, vcc
+; GISEL-NEXT: v_addc_u32_e32 v37, vcc, -1, v37, vcc
+; GISEL-NEXT: v_sub_i32_e32 v16, vcc, v38, v19
+; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v39, v51, vcc
+; GISEL-NEXT: v_or_b32_e32 v16, v34, v36
+; GISEL-NEXT: v_or_b32_e32 v17, v35, v37
+; GISEL-NEXT: v_subb_u32_e32 v26, vcc, v48, v18, vcc
+; GISEL-NEXT: v_subb_u32_e32 v26, vcc, v49, v31, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; GISEL-NEXT: v_ashrrev_i32_e32 v16, 31, v26
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v26, 1, v16
+; GISEL-NEXT: v_and_b32_e32 v17, v16, v12
+; GISEL-NEXT: v_and_b32_e32 v29, v16, v13
+; GISEL-NEXT: v_and_b32_e32 v30, v16, v14
+; GISEL-NEXT: v_and_b32_e32 v50, v16, v15
+; GISEL-NEXT: v_sub_i32_e32 v28, vcc, v19, v17
+; GISEL-NEXT: v_subb_u32_e32 v29, vcc, v51, v29, vcc
+; GISEL-NEXT: v_mov_b32_e32 v16, v26
+; GISEL-NEXT: v_mov_b32_e32 v17, v27
+; GISEL-NEXT: v_subb_u32_e32 v30, vcc, v18, v30, vcc
+; GISEL-NEXT: v_subb_u32_e32 v31, vcc, v31, v50, vcc
+; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execnz .LBB3_9
+; GISEL-NEXT: ; %bb.10: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB3_11: ; %Flow11
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_lshl_b64 v[26:27], v[24:25], 1
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[22:23], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v22, 31, v25
+; GISEL-NEXT: v_or_b32_e32 v18, v18, v22
+; GISEL-NEXT: v_or_b32_e32 v24, v16, v26
+; GISEL-NEXT: v_or_b32_e32 v25, v17, v27
+; GISEL-NEXT: .LBB3_12: ; %Flow12
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: v_mad_u64_u32 v[16:17], s[4:5], v8, v32, 0
+; GISEL-NEXT: v_mad_u64_u32 v[22:23], s[4:5], v8, v20, 0
+; GISEL-NEXT: v_mul_lo_u32 v28, v8, v21
+; GISEL-NEXT: v_mul_lo_u32 v29, v9, v20
+; GISEL-NEXT: v_mad_u64_u32 v[20:21], s[4:5], v12, v24, 0
+; GISEL-NEXT: v_mad_u64_u32 v[26:27], s[4:5], v12, v18, 0
+; GISEL-NEXT: v_mul_lo_u32 v30, v12, v19
+; GISEL-NEXT: v_mul_lo_u32 v31, v13, v18
+; GISEL-NEXT: v_mad_u64_u32 v[18:19], s[4:5], v9, v33, v[22:23]
+; GISEL-NEXT: v_mad_u64_u32 v[22:23], s[4:5], v13, v25, v[26:27]
+; GISEL-NEXT: v_mad_u64_u32 v[18:19], s[4:5], v10, v32, v[18:19]
+; GISEL-NEXT: v_mad_u64_u32 v[22:23], s[4:5], v14, v24, v[22:23]
+; GISEL-NEXT: v_mad_u64_u32 v[17:18], vcc, v8, v33, v[17:18]
+; GISEL-NEXT: v_mad_u64_u32 v[21:22], s[4:5], v12, v25, v[21:22]
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[6:7], v9, v32, v[17:18]
+; GISEL-NEXT: v_addc_u32_e64 v17, s[6:7], v19, v28, s[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[12:13], s[6:7], v13, v24, v[21:22]
+; GISEL-NEXT: v_addc_u32_e64 v18, s[6:7], v23, v30, s[6:7]
+; GISEL-NEXT: v_addc_u32_e32 v17, vcc, v17, v29, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v16
+; GISEL-NEXT: v_subb_u32_e32 v1, vcc, v1, v8, vcc
+; GISEL-NEXT: v_addc_u32_e64 v8, s[4:5], v18, v31, s[4:5]
+; GISEL-NEXT: v_sub_i32_e64 v4, s[4:5], v4, v20
+; GISEL-NEXT: v_subb_u32_e64 v5, s[4:5], v5, v12, s[4:5]
+; GISEL-NEXT: v_mad_u64_u32 v[16:17], s[6:7], v10, v33, v[17:18]
+; GISEL-NEXT: v_mad_u64_u32 v[18:19], s[6:7], v14, v25, v[8:9]
+; GISEL-NEXT: v_mad_u64_u32 v[10:11], s[6:7], v11, v32, v[16:17]
+; GISEL-NEXT: v_mad_u64_u32 v[11:12], s[6:7], v15, v24, v[18:19]
+; GISEL-NEXT: v_subb_u32_e32 v2, vcc, v2, v9, vcc
+; GISEL-NEXT: v_subb_u32_e32 v3, vcc, v3, v10, vcc
+; GISEL-NEXT: v_subb_u32_e64 v6, vcc, v6, v13, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v7, vcc, v7, v11, vcc
+; GISEL-NEXT: s_setpc_b64 s[30:31]
%shl = urem <2 x i128> %lhs, %rhs
ret <2 x i128> %shl
}
diff --git a/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll b/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll
index 0069370..05558c5 100644
--- a/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll
+++ b/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll
@@ -42,6 +42,6 @@ attributes #0 = { "amdgpu-no-dispatch-id" }
;.
; AKF_GCN: attributes #[[ATTR0]] = { "amdgpu-calls" "amdgpu-no-dispatch-id" "amdgpu-stack-objects" }
;.
-; ATTRIBUTOR_GCN: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_GCN: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; ATTRIBUTOR_GCN: attributes #[[ATTR1]] = { "amdgpu-no-dispatch-id" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll b/llvm/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll
index 4ed1b8a..e198197 100644
--- a/llvm/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll
@@ -471,25 +471,15 @@ define amdgpu_kernel void @test_fold_canonicalize_minnum_value_from_load_f32_iee
ret void
}
-; GCN-LABEL: test_fold_canonicalize_minnum_value_from_load_f32_nnan_ieee_mode:
-; VI-FLUSH: v_mul_f32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}
-; GCN-DENORM-NOT: v_max
-; GCN-DENORM-NOT: v_mul
-
-; GCN: v_min_f32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}
-; GCN-DENORM-NOT: v_max
-; GCN-DENORM-NOT: v_mul
-
-; GFX9: {{flat|global}}_store_dword
-define amdgpu_kernel void @test_fold_canonicalize_minnum_value_from_load_f32_nnan_ieee_mode(ptr addrspace(1) %arg) #1 {
- %id = tail call i32 @llvm.amdgcn.workitem.id.x()
- %gep = getelementptr inbounds float, ptr addrspace(1) %arg, i32 %id
- %load = load float, ptr addrspace(1) %gep, align 4
- %v = tail call float @llvm.minnum.f32(float %load, float 0.0)
- %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
- store float %canonicalized, ptr addrspace(1) %gep, align 4
- ret void
-}
+; define amdgpu_kernel void @test_fold_canonicalize_minnum_value_from_load_f32_nnan_ieee_mode(ptr addrspace(1) %arg) #1 {
+; %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+; %gep = getelementptr inbounds float, ptr addrspace(1) %arg, i32 %id
+; %load = load float, ptr addrspace(1) %gep, align 4
+; %v = tail call float @llvm.minnum.f32(float %load, float 0.0)
+; %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+; store float %canonicalized, ptr addrspace(1) %gep, align 4
+; ret void
+; }
; GCN-LABEL: test_fold_canonicalize_minnum_value_f32:
; GCN: v_min_f32_e32 [[V:v[0-9]+]], 0, v{{[0-9]+}}
@@ -523,32 +513,15 @@ define amdgpu_kernel void @test_fold_canonicalize_sNaN_value_f32(ptr addrspace(1
ret void
}
-; GCN-LABEL: test_fold_canonicalize_denorm_value_f32:
-; GCN: {{flat|global}}_load_dword [[VAL:v[0-9]+]]
-
-; GFX9-DENORM: v_max_f32_e32 [[QUIET:v[0-9]+]], [[VAL]], [[VAL]]
-; GFX9-DENORM: v_min_f32_e32 [[RESULT:v[0-9]+]], 0x7fffff, [[QUIET]]
-
-; GFX9-FLUSH: v_max_f32_e32 [[QUIET:v[0-9]+]], [[VAL]], [[VAL]]
-; GFX9-FLUSH: v_min_f32_e32 [[RESULT:v[0-9]+]], 0, [[QUIET]]
-
-; VI-FLUSH: v_mul_f32_e32 [[QUIET_V0:v[0-9]+]], 1.0, [[VAL]]
-; VI-FLUSH: v_min_f32_e32 [[RESULT:v[0-9]+]], 0, [[QUIET_V0]]
-
-; VI-DENORM: v_min_f32_e32 [[RESULT:v[0-9]+]], 0x7fffff, [[VAL]]
-
-; GCN-NOT: v_mul
-; GCN-NOT: v_max
-; GCN: {{flat|global}}_store_dword v{{.+}}, [[RESULT]]
-define amdgpu_kernel void @test_fold_canonicalize_denorm_value_f32(ptr addrspace(1) %arg) {
- %id = tail call i32 @llvm.amdgcn.workitem.id.x()
- %gep = getelementptr inbounds float, ptr addrspace(1) %arg, i32 %id
- %load = load float, ptr addrspace(1) %gep, align 4
- %v = tail call float @llvm.minnum.f32(float %load, float bitcast (i32 8388607 to float))
- %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
- store float %canonicalized, ptr addrspace(1) %gep, align 4
- ret void
-}
+; define amdgpu_kernel void @test_fold_canonicalize_denorm_value_f32(ptr addrspace(1) %arg) {
+; %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+; %gep = getelementptr inbounds float, ptr addrspace(1) %arg, i32 %id
+; %load = load float, ptr addrspace(1) %gep, align 4
+; %v = tail call float @llvm.minnum.f32(float %load, float bitcast (i32 8388607 to float))
+; %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+; store float %canonicalized, ptr addrspace(1) %gep, align 4
+; ret void
+; }
; GCN-LABEL: test_fold_canonicalize_maxnum_value_from_load_f32_ieee_mode:
; GCN: {{flat|global}}_load_dword [[VAL:v[0-9]+]]
@@ -674,10 +647,9 @@ define amdgpu_kernel void @test_fold_canonicalize_load_nnan_value_f64(ptr addrsp
}
; GCN-LABEL: {{^}}test_fold_canonicalize_load_nnan_value_f16
-; GCN: {{flat|global}}_load_ushort [[V:v[0-9]+]],
-; GCN-NOT: v_mul
-; GCN-NOT: v_max
-; GCN: {{flat|global}}_store_short v{{.+}}, [[V]]
+; GCN: {{flat|global}}_load_ushort [[V1:v[0-9]+]],
+; GCN: v_max_f16_e32 [[V2:v[0-9]+]], [[V1]], [[V1]]
+; GCN: {{flat|global}}_store_short v{{.+}}, [[V2]]
define amdgpu_kernel void @test_fold_canonicalize_load_nnan_value_f16(ptr addrspace(1) %arg, ptr addrspace(1) %out) #1 {
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds half, ptr addrspace(1) %arg, i32 %id
@@ -807,18 +779,13 @@ define half @v_test_canonicalize_extract_element_v2f16(<2 x half> %vec) {
ret half %canonicalized
}
-; GCN-LABEL: {{^}}v_test_canonicalize_insertelement_v2f16:
-; GFX9: v_mul_f16_e32
-; GFX9: v_pk_mul_f16
-; GFX9-NOT: v_max
-; GFX9-NOT: v_pk_max
-define <2 x half> @v_test_canonicalize_insertelement_v2f16(<2 x half> %vec, half %val, i32 %idx) {
- %vec.op = fmul <2 x half> %vec, <half 4.0, half 4.0>
- %ins.op = fmul half %val, 8.0
- %ins = insertelement <2 x half> %vec.op, half %ins.op, i32 %idx
- %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %ins)
- ret <2 x half> %canonicalized
-}
+; define <2 x half> @v_test_canonicalize_insertelement_v2f16(<2 x half> %vec, half %val, i32 %idx) {
+; %vec.op = fmul <2 x half> %vec, <half 4.0, half 4.0>
+; %ins.op = fmul half %val, 8.0
+; %ins = insertelement <2 x half> %vec.op, half %ins.op, i32 %idx
+; %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %ins)
+; ret <2 x half> %canonicalized
+; }
; GCN-LABEL: {{^}}v_test_canonicalize_insertelement_noncanon_vec_v2f16:
; GFX9: v_mul_f16
@@ -842,15 +809,11 @@ define <2 x half> @v_test_canonicalize_insertelement_noncanon_insval_v2f16(<2 x
ret <2 x half> %canonicalized
}
-; GCN-LABEL: {{^}}v_test_canonicalize_cvt_pkrtz:
-; GCN: s_waitcnt
-; GCN-NEXT: v_cvt_pkrtz_f16_f32 v0, v0, v1
-; GCN-NEXT: s_setpc_b64
-define <2 x half> @v_test_canonicalize_cvt_pkrtz(float %a, float %b) {
- %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %a, float %b)
- %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %cvt)
- ret <2 x half> %canonicalized
-}
+; define <2 x half> @v_test_canonicalize_cvt_pkrtz(float %a, float %b) {
+; %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %a, float %b)
+; %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %cvt)
+; ret <2 x half> %canonicalized
+; }
; GCN-LABEL: {{^}}v_test_canonicalize_cubeid:
; GCN: s_waitcnt
diff --git a/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll b/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll
index 27462130..581b7b4 100644
--- a/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll
@@ -94,7 +94,6 @@ define amdgpu_kernel void @v_test_canonicalize_var_f16(ptr addrspace(1) %out) #1
; CI-NEXT: buffer_load_ushort v0, off, s[0:3], 0
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: buffer_store_short v0, off, s[0:3], 0
; CI-NEXT: s_endpgm
@@ -147,7 +146,6 @@ define amdgpu_kernel void @s_test_canonicalize_var_f16(ptr addrspace(1) %out, i1
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: v_cvt_f32_f16_e32 v0, s2
; CI-NEXT: s_mov_b32 s2, -1
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: buffer_store_short v0, off, s[0:3], 0
; CI-NEXT: s_endpgm
@@ -170,6 +168,35 @@ define amdgpu_kernel void @s_test_canonicalize_var_f16(ptr addrspace(1) %out, i1
ret void
}
+define half @s_test_canonicalize_arg(half %x) #1 {
+; VI-LABEL: s_test_canonicalize_arg:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_max_f16_e32 v0, v0, v0
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_test_canonicalize_arg:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; CI-LABEL: s_test_canonicalize_arg:
+; CI: ; %bb.0:
+; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; CI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_test_canonicalize_arg:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %canonicalized = call half @llvm.canonicalize.f16(half %x)
+ ret half %canonicalized
+}
+
define <2 x half> @v_test_canonicalize_build_vector_v2f16(half %lo, half %hi) #1 {
; VI-LABEL: v_test_canonicalize_build_vector_v2f16:
; VI: ; %bb.0:
@@ -242,7 +269,6 @@ define amdgpu_kernel void @v_test_canonicalize_fabs_var_f16(ptr addrspace(1) %ou
; CI-NEXT: buffer_load_ushort v0, off, s[0:3], 0
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_cvt_f32_f16_e64 v0, |v0|
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: buffer_store_short v0, off, s[0:3], 0
; CI-NEXT: s_endpgm
@@ -299,7 +325,6 @@ define amdgpu_kernel void @v_test_canonicalize_fneg_fabs_var_f16(ptr addrspace(1
; CI-NEXT: buffer_load_ushort v0, off, s[0:3], 0
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_cvt_f32_f16_e64 v0, -|v0|
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: buffer_store_short v0, off, s[0:3], 0
; CI-NEXT: s_endpgm
@@ -357,7 +382,6 @@ define amdgpu_kernel void @v_test_canonicalize_fneg_var_f16(ptr addrspace(1) %ou
; CI-NEXT: buffer_load_ushort v0, off, s[0:3], 0
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_cvt_f32_f16_e64 v0, -v0
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: buffer_store_short v0, off, s[0:3], 0
; CI-NEXT: s_endpgm
@@ -414,7 +438,6 @@ define amdgpu_kernel void @v_test_no_denormals_canonicalize_fneg_var_f16(ptr add
; CI-NEXT: buffer_load_ushort v0, off, s[0:3], 0
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_cvt_f32_f16_e64 v0, -v0
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: buffer_store_short v0, off, s[0:3], 0
; CI-NEXT: s_endpgm
@@ -471,7 +494,6 @@ define amdgpu_kernel void @v_test_no_denormals_canonicalize_fneg_fabs_var_f16(pt
; CI-NEXT: buffer_load_ushort v0, off, s[0:3], 0
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_cvt_f32_f16_e64 v0, -|v0|
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: buffer_store_short v0, off, s[0:3], 0
; CI-NEXT: s_endpgm
@@ -1246,9 +1268,7 @@ define amdgpu_kernel void @v_test_canonicalize_var_v2f16(ptr addrspace(1) %out)
; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; CI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; CI-NEXT: v_or_b32_e32 v0, v0, v1
@@ -1323,9 +1343,7 @@ define amdgpu_kernel void @v_test_canonicalize_fabs_var_v2f16(ptr addrspace(1) %
; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; CI-NEXT: v_cvt_f32_f16_e64 v1, |v1|
; CI-NEXT: v_cvt_f32_f16_e64 v0, |v0|
-; CI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; CI-NEXT: v_or_b32_e32 v0, v0, v1
@@ -1404,9 +1422,7 @@ define amdgpu_kernel void @v_test_canonicalize_fneg_fabs_var_v2f16(ptr addrspace
; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; CI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; CI-NEXT: v_or_b32_e32 v0, v0, v1
@@ -1485,9 +1501,7 @@ define amdgpu_kernel void @v_test_canonicalize_fneg_var_v2f16(ptr addrspace(1) %
; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; CI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; CI-NEXT: v_or_b32_e32 v0, v0, v1
@@ -1551,9 +1565,7 @@ define amdgpu_kernel void @s_test_canonicalize_var_v2f16(ptr addrspace(1) %out,
; CI-NEXT: v_cvt_f32_f16_e32 v1, s2
; CI-NEXT: s_mov_b32 s3, 0xf000
; CI-NEXT: s_mov_b32 s2, -1
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; CI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
; CI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; CI-NEXT: v_or_b32_e32 v0, v1, v0
@@ -2424,7 +2436,6 @@ define <2 x half> @v_test_canonicalize_reg_undef_v2f16(half %val) #1 {
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_mov_b32_e32 v1, 0x7fc00000
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_test_canonicalize_reg_undef_v2f16:
@@ -2456,8 +2467,7 @@ define <2 x half> @v_test_canonicalize_undef_reg_v2f16(half %val) #1 {
; CI: ; %bb.0:
; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; CI-NEXT: v_mul_f32_e32 v1, 1.0, v0
+; CI-NEXT: v_cvt_f32_f16_e32 v1, v0
; CI-NEXT: v_mov_b32_e32 v0, 0x7fc00000
; CI-NEXT: s_setpc_b64 s[30:31]
;
@@ -2738,7 +2748,6 @@ define <4 x half> @v_test_canonicalize_reg_undef_undef_undef_v4f16(half %val) #1
; CI-NEXT: v_mov_b32_e32 v2, 0x7fc00000
; CI-NEXT: v_mov_b32_e32 v3, 0x7fc00000
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_test_canonicalize_reg_undef_undef_undef_v4f16:
@@ -2782,8 +2791,6 @@ define <4 x half> @v_test_canonicalize_reg_reg_undef_undef_v4f16(half %val0, hal
; CI-NEXT: v_mov_b32_e32 v3, 0x7fc00000
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; CI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; CI-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_test_canonicalize_reg_reg_undef_undef_v4f16:
@@ -2826,13 +2833,10 @@ define <4 x half> @v_test_canonicalize_reg_undef_reg_reg_v4f16(half %val0, half
; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; CI-NEXT: v_cvt_f16_f32_e32 v3, v2
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; CI-NEXT: v_cvt_f32_f16_e32 v3, v2
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; CI-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; CI-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; CI-NEXT: v_cvt_f32_f16_e32 v2, v1
+; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
; CI-NEXT: v_mov_b32_e32 v1, 0x7fc00000
; CI-NEXT: s_setpc_b64 s[30:31]
;
@@ -2878,18 +2882,18 @@ define <6 x half> @v_test_canonicalize_var_v6f16(<6 x half> %val) #1 {
; CI-LABEL: v_test_canonicalize_var_v6f16:
; CI: ; %bb.0:
; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
-; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
; CI-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_test_canonicalize_var_v6f16:
@@ -2933,22 +2937,22 @@ define <8 x half> @v_test_canonicalize_var_v8f16(<8 x half> %val) #1 {
; CI-LABEL: v_test_canonicalize_var_v8f16:
; CI: ; %bb.0:
; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
-; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
-; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
-; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
-; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
-; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
; CI-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_test_canonicalize_var_v8f16:
@@ -3001,30 +3005,30 @@ define <12 x half> @v_test_canonicalize_var_v12f16(<12 x half> %val) #1 {
; CI-LABEL: v_test_canonicalize_var_v12f16:
; CI: ; %bb.0:
; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v11, v11
+; CI-NEXT: v_cvt_f16_f32_e32 v10, v10
+; CI-NEXT: v_cvt_f16_f32_e32 v9, v9
+; CI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
-; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
-; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
-; CI-NEXT: v_cvt_f16_f32_e32 v8, v8
-; CI-NEXT: v_cvt_f16_f32_e32 v9, v9
-; CI-NEXT: v_cvt_f16_f32_e32 v10, v10
-; CI-NEXT: v_cvt_f16_f32_e32 v11, v11
+; CI-NEXT: v_cvt_f32_f16_e32 v11, v11
+; CI-NEXT: v_cvt_f32_f16_e32 v10, v10
+; CI-NEXT: v_cvt_f32_f16_e32 v9, v9
+; CI-NEXT: v_cvt_f32_f16_e32 v8, v8
+; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
-; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
-; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
-; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
-; CI-NEXT: v_cvt_f32_f16_e32 v8, v8
-; CI-NEXT: v_cvt_f32_f16_e32 v9, v9
-; CI-NEXT: v_cvt_f32_f16_e32 v10, v10
-; CI-NEXT: v_cvt_f32_f16_e32 v11, v11
; CI-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_test_canonicalize_var_v12f16:
@@ -3087,38 +3091,38 @@ define <16 x half> @v_test_canonicalize_var_v16f16(<16 x half> %val) #1 {
; CI-LABEL: v_test_canonicalize_var_v16f16:
; CI: ; %bb.0:
; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; CI-NEXT: v_cvt_f16_f32_e32 v14, v14
+; CI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; CI-NEXT: v_cvt_f16_f32_e32 v12, v12
+; CI-NEXT: v_cvt_f16_f32_e32 v11, v11
+; CI-NEXT: v_cvt_f16_f32_e32 v10, v10
+; CI-NEXT: v_cvt_f16_f32_e32 v9, v9
+; CI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
-; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
-; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
-; CI-NEXT: v_cvt_f16_f32_e32 v8, v8
-; CI-NEXT: v_cvt_f16_f32_e32 v9, v9
-; CI-NEXT: v_cvt_f16_f32_e32 v10, v10
-; CI-NEXT: v_cvt_f16_f32_e32 v11, v11
-; CI-NEXT: v_cvt_f16_f32_e32 v12, v12
-; CI-NEXT: v_cvt_f16_f32_e32 v13, v13
-; CI-NEXT: v_cvt_f16_f32_e32 v14, v14
-; CI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; CI-NEXT: v_cvt_f32_f16_e32 v15, v15
+; CI-NEXT: v_cvt_f32_f16_e32 v14, v14
+; CI-NEXT: v_cvt_f32_f16_e32 v13, v13
+; CI-NEXT: v_cvt_f32_f16_e32 v12, v12
+; CI-NEXT: v_cvt_f32_f16_e32 v11, v11
+; CI-NEXT: v_cvt_f32_f16_e32 v10, v10
+; CI-NEXT: v_cvt_f32_f16_e32 v9, v9
+; CI-NEXT: v_cvt_f32_f16_e32 v8, v8
+; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
-; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
-; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
-; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
-; CI-NEXT: v_cvt_f32_f16_e32 v8, v8
-; CI-NEXT: v_cvt_f32_f16_e32 v9, v9
-; CI-NEXT: v_cvt_f32_f16_e32 v10, v10
-; CI-NEXT: v_cvt_f32_f16_e32 v11, v11
-; CI-NEXT: v_cvt_f32_f16_e32 v12, v12
-; CI-NEXT: v_cvt_f32_f16_e32 v13, v13
-; CI-NEXT: v_cvt_f32_f16_e32 v14, v14
-; CI-NEXT: v_cvt_f32_f16_e32 v15, v15
; CI-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_test_canonicalize_var_v16f16:
@@ -3216,68 +3220,68 @@ define <32 x half> @v_test_canonicalize_var_v32f16(<32 x half> %val) #1 {
; CI: ; %bb.0:
; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CI-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; CI-NEXT: v_cvt_f16_f32_e32 v30, v30
+; CI-NEXT: v_cvt_f16_f32_e32 v29, v29
+; CI-NEXT: v_cvt_f16_f32_e32 v28, v28
+; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; CI-NEXT: v_cvt_f16_f32_e32 v26, v26
+; CI-NEXT: v_cvt_f16_f32_e32 v25, v25
+; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
+; CI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; CI-NEXT: v_cvt_f16_f32_e32 v22, v22
+; CI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; CI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; CI-NEXT: v_cvt_f16_f32_e32 v19, v19
+; CI-NEXT: v_cvt_f16_f32_e32 v18, v18
+; CI-NEXT: v_cvt_f16_f32_e32 v17, v17
+; CI-NEXT: v_cvt_f16_f32_e32 v16, v16
+; CI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; CI-NEXT: v_cvt_f16_f32_e32 v14, v14
+; CI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; CI-NEXT: v_cvt_f16_f32_e32 v12, v12
+; CI-NEXT: v_cvt_f16_f32_e32 v11, v11
+; CI-NEXT: v_cvt_f16_f32_e32 v10, v10
+; CI-NEXT: v_cvt_f16_f32_e32 v9, v9
+; CI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
-; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
-; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
-; CI-NEXT: v_cvt_f16_f32_e32 v8, v8
-; CI-NEXT: v_cvt_f16_f32_e32 v9, v9
-; CI-NEXT: v_cvt_f16_f32_e32 v10, v10
-; CI-NEXT: v_cvt_f16_f32_e32 v11, v11
-; CI-NEXT: v_cvt_f16_f32_e32 v12, v12
-; CI-NEXT: v_cvt_f16_f32_e32 v13, v13
-; CI-NEXT: v_cvt_f16_f32_e32 v14, v14
-; CI-NEXT: v_cvt_f16_f32_e32 v15, v15
-; CI-NEXT: v_cvt_f16_f32_e32 v16, v16
-; CI-NEXT: v_cvt_f16_f32_e32 v17, v17
-; CI-NEXT: v_cvt_f16_f32_e32 v18, v18
-; CI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; CI-NEXT: v_cvt_f16_f32_e32 v20, v20
-; CI-NEXT: v_cvt_f16_f32_e32 v21, v21
-; CI-NEXT: v_cvt_f16_f32_e32 v22, v22
-; CI-NEXT: v_cvt_f16_f32_e32 v23, v23
-; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
-; CI-NEXT: v_cvt_f16_f32_e32 v25, v25
-; CI-NEXT: v_cvt_f16_f32_e32 v26, v26
-; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
-; CI-NEXT: v_cvt_f16_f32_e32 v28, v28
-; CI-NEXT: v_cvt_f16_f32_e32 v29, v29
-; CI-NEXT: v_cvt_f16_f32_e32 v30, v30
+; CI-NEXT: v_cvt_f32_f16_e32 v30, v30
+; CI-NEXT: v_cvt_f32_f16_e32 v29, v29
+; CI-NEXT: v_cvt_f32_f16_e32 v28, v28
+; CI-NEXT: v_cvt_f32_f16_e32 v27, v27
+; CI-NEXT: v_cvt_f32_f16_e32 v26, v26
+; CI-NEXT: v_cvt_f32_f16_e32 v25, v25
+; CI-NEXT: v_cvt_f32_f16_e32 v24, v24
+; CI-NEXT: v_cvt_f32_f16_e32 v23, v23
+; CI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; CI-NEXT: v_cvt_f32_f16_e32 v21, v21
+; CI-NEXT: v_cvt_f32_f16_e32 v20, v20
+; CI-NEXT: v_cvt_f32_f16_e32 v19, v19
+; CI-NEXT: v_cvt_f32_f16_e32 v18, v18
+; CI-NEXT: v_cvt_f32_f16_e32 v17, v17
+; CI-NEXT: v_cvt_f32_f16_e32 v16, v16
+; CI-NEXT: v_cvt_f32_f16_e32 v15, v15
+; CI-NEXT: v_cvt_f32_f16_e32 v14, v14
+; CI-NEXT: v_cvt_f32_f16_e32 v13, v13
+; CI-NEXT: v_cvt_f32_f16_e32 v12, v12
+; CI-NEXT: v_cvt_f32_f16_e32 v11, v11
+; CI-NEXT: v_cvt_f32_f16_e32 v10, v10
+; CI-NEXT: v_cvt_f32_f16_e32 v9, v9
+; CI-NEXT: v_cvt_f32_f16_e32 v8, v8
+; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
-; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
-; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
-; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
-; CI-NEXT: v_cvt_f32_f16_e32 v8, v8
-; CI-NEXT: v_cvt_f32_f16_e32 v9, v9
-; CI-NEXT: v_cvt_f32_f16_e32 v10, v10
-; CI-NEXT: v_cvt_f32_f16_e32 v11, v11
-; CI-NEXT: v_cvt_f32_f16_e32 v12, v12
-; CI-NEXT: v_cvt_f32_f16_e32 v13, v13
-; CI-NEXT: v_cvt_f32_f16_e32 v14, v14
-; CI-NEXT: v_cvt_f32_f16_e32 v15, v15
-; CI-NEXT: v_cvt_f32_f16_e32 v16, v16
-; CI-NEXT: v_cvt_f32_f16_e32 v17, v17
-; CI-NEXT: v_cvt_f32_f16_e32 v18, v18
-; CI-NEXT: v_cvt_f32_f16_e32 v19, v19
-; CI-NEXT: v_cvt_f32_f16_e32 v20, v20
-; CI-NEXT: v_cvt_f32_f16_e32 v21, v21
-; CI-NEXT: v_cvt_f32_f16_e32 v22, v22
-; CI-NEXT: v_cvt_f32_f16_e32 v23, v23
-; CI-NEXT: v_cvt_f32_f16_e32 v24, v24
-; CI-NEXT: v_cvt_f32_f16_e32 v25, v25
-; CI-NEXT: v_cvt_f32_f16_e32 v26, v26
-; CI-NEXT: v_cvt_f32_f16_e32 v27, v27
-; CI-NEXT: v_cvt_f32_f16_e32 v28, v28
-; CI-NEXT: v_cvt_f32_f16_e32 v29, v29
-; CI-NEXT: v_cvt_f32_f16_e32 v30, v30
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_cvt_f16_f32_e32 v31, v31
; CI-NEXT: v_cvt_f32_f16_e32 v31, v31
@@ -3456,228 +3460,354 @@ define <64 x half> @v_test_canonicalize_var_v64f16(<64 x half> %val) #1 {
; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; CI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:104
+; CI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108
+; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; CI-NEXT: v_or_b32_e32 v1, v1, v2
; CI-NEXT: v_cvt_f16_f32_e32 v2, v4
; CI-NEXT: v_cvt_f16_f32_e32 v4, v5
; CI-NEXT: v_cvt_f16_f32_e32 v5, v7
; CI-NEXT: v_cvt_f16_f32_e32 v7, v9
+; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; CI-NEXT: v_or_b32_e32 v2, v3, v2
; CI-NEXT: v_cvt_f16_f32_e32 v3, v6
; CI-NEXT: v_cvt_f16_f32_e32 v6, v10
; CI-NEXT: v_cvt_f16_f32_e32 v9, v13
-; CI-NEXT: v_cvt_f16_f32_e32 v10, v18
+; CI-NEXT: v_cvt_f16_f32_e32 v10, v16
+; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; CI-NEXT: v_cvt_f32_f16_e32 v9, v9
+; CI-NEXT: v_cvt_f16_f32_e32 v13, v17
+; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; CI-NEXT: v_cvt_f16_f32_e32 v9, v9
+; CI-NEXT: v_cvt_f32_f16_e32 v13, v13
; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; CI-NEXT: v_or_b32_e32 v3, v4, v3
; CI-NEXT: v_cvt_f16_f32_e32 v4, v8
; CI-NEXT: v_cvt_f16_f32_e32 v8, v14
-; CI-NEXT: v_cvt_f16_f32_e32 v13, v21
-; CI-NEXT: v_cvt_f16_f32_e32 v14, v26
+; CI-NEXT: buffer_load_dword v14, off, s[0:3], s32
+; CI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:4
+; CI-NEXT: v_cvt_f16_f32_e32 v17, v23
+; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; CI-NEXT: v_cvt_f32_f16_e32 v8, v8
+; CI-NEXT: v_cvt_f32_f16_e32 v17, v17
+; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; CI-NEXT: v_cvt_f16_f32_e32 v8, v8
; CI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; CI-NEXT: v_or_b32_e32 v4, v5, v4
; CI-NEXT: v_lshlrev_b32_e32 v5, 16, v6
; CI-NEXT: v_cvt_f16_f32_e32 v6, v12
; CI-NEXT: v_or_b32_e32 v5, v7, v5
; CI-NEXT: v_cvt_f16_f32_e32 v7, v11
-; CI-NEXT: v_cvt_f16_f32_e32 v11, v17
+; CI-NEXT: v_cvt_f16_f32_e32 v11, v15
+; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; CI-NEXT: v_cvt_f16_f32_e32 v15, v21
+; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
; CI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
-; CI-NEXT: v_cvt_f16_f32_e32 v12, v22
; CI-NEXT: v_or_b32_e32 v6, v7, v6
; CI-NEXT: v_lshlrev_b32_e32 v7, 16, v8
-; CI-NEXT: v_cvt_f16_f32_e32 v8, v16
+; CI-NEXT: v_cvt_f16_f32_e32 v8, v19
; CI-NEXT: v_or_b32_e32 v7, v9, v7
-; CI-NEXT: v_cvt_f16_f32_e32 v9, v15
-; CI-NEXT: v_cvt_f16_f32_e32 v15, v25
+; CI-NEXT: v_cvt_f16_f32_e32 v9, v20
+; CI-NEXT: v_cvt_f32_f16_e32 v12, v8
+; CI-NEXT: v_cvt_f32_f16_e32 v8, v10
+; CI-NEXT: v_cvt_f32_f16_e32 v10, v11
+; CI-NEXT: v_cvt_f16_f32_e32 v11, v18
+; CI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:124
+; CI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:112
+; CI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:116
+; CI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; CI-NEXT: v_cvt_f16_f32_e32 v10, v10
+; CI-NEXT: v_cvt_f32_f16_e32 v11, v11
+; CI-NEXT: v_cvt_f32_f16_e32 v9, v9
; CI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
-; CI-NEXT: v_cvt_f16_f32_e32 v25, v29
-; CI-NEXT: v_or_b32_e32 v8, v9, v8
+; CI-NEXT: v_or_b32_e32 v8, v10, v8
+; CI-NEXT: v_cvt_f16_f32_e32 v10, v11
+; CI-NEXT: v_cvt_f16_f32_e32 v11, v13
+; CI-NEXT: v_cvt_f16_f32_e32 v13, v9
+; CI-NEXT: v_cvt_f16_f32_e32 v12, v12
; CI-NEXT: v_lshlrev_b32_e32 v9, 16, v10
-; CI-NEXT: v_cvt_f16_f32_e32 v10, v20
; CI-NEXT: v_or_b32_e32 v9, v11, v9
-; CI-NEXT: v_cvt_f16_f32_e32 v11, v19
-; CI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:4
-; CI-NEXT: buffer_load_dword v17, off, s[0:3], s32
-; CI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:12
-; CI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:8
-; CI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
-; CI-NEXT: v_or_b32_e32 v10, v11, v10
-; CI-NEXT: v_lshlrev_b32_e32 v11, 16, v12
-; CI-NEXT: v_cvt_f16_f32_e32 v12, v24
+; CI-NEXT: v_lshlrev_b32_e32 v10, 16, v13
+; CI-NEXT: v_cvt_f16_f32_e32 v11, v25
+; CI-NEXT: v_cvt_f16_f32_e32 v13, v22
+; CI-NEXT: v_or_b32_e32 v10, v12, v10
+; CI-NEXT: v_cvt_f16_f32_e32 v12, v26
+; CI-NEXT: v_cvt_f32_f16_e32 v16, v11
+; CI-NEXT: v_cvt_f32_f16_e32 v11, v13
+; CI-NEXT: v_cvt_f32_f16_e32 v13, v15
+; CI-NEXT: v_cvt_f16_f32_e32 v15, v24
+; CI-NEXT: v_cvt_f32_f16_e32 v12, v12
+; CI-NEXT: v_cvt_f16_f32_e32 v11, v11
+; CI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; CI-NEXT: v_cvt_f32_f16_e32 v15, v15
+; CI-NEXT: v_cvt_f16_f32_e32 v22, v30
+; CI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; CI-NEXT: v_or_b32_e32 v11, v13, v11
-; CI-NEXT: v_cvt_f16_f32_e32 v13, v23
-; CI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:20
-; CI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:16
-; CI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:28
-; CI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:24
-; CI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
-; CI-NEXT: v_cvt_f16_f32_e32 v24, v30
-; CI-NEXT: v_or_b32_e32 v12, v13, v12
-; CI-NEXT: v_lshlrev_b32_e32 v13, 16, v14
-; CI-NEXT: v_or_b32_e32 v13, v15, v13
-; CI-NEXT: v_cvt_f16_f32_e32 v14, v28
+; CI-NEXT: v_cvt_f16_f32_e32 v13, v15
+; CI-NEXT: v_cvt_f16_f32_e32 v15, v17
+; CI-NEXT: v_cvt_f16_f32_e32 v17, v12
+; CI-NEXT: v_cvt_f16_f32_e32 v25, v29
+; CI-NEXT: v_lshlrev_b32_e32 v12, 16, v13
+; CI-NEXT: v_or_b32_e32 v12, v15, v12
+; CI-NEXT: s_waitcnt vmcnt(6)
+; CI-NEXT: v_cvt_f16_f32_e32 v15, v31
+; CI-NEXT: v_lshlrev_b32_e32 v13, 16, v17
+; CI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:128
+; CI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:132
+; CI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:120
+; CI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; CI-NEXT: v_cvt_f32_f16_e32 v23, v15
; CI-NEXT: v_cvt_f16_f32_e32 v15, v27
-; CI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:36
-; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:32
-; CI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:44
-; CI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:40
+; CI-NEXT: v_cvt_f32_f16_e32 v25, v25
+; CI-NEXT: s_waitcnt vmcnt(7)
+; CI-NEXT: v_cvt_f16_f32_e32 v14, v14
+; CI-NEXT: s_waitcnt vmcnt(6)
+; CI-NEXT: v_cvt_f16_f32_e32 v21, v33
+; CI-NEXT: v_cvt_f32_f16_e32 v15, v15
+; CI-NEXT: v_cvt_f16_f32_e32 v22, v22
+; CI-NEXT: v_cvt_f32_f16_e32 v24, v14
+; CI-NEXT: v_cvt_f16_f32_e32 v14, v28
+; CI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; CI-NEXT: v_cvt_f32_f16_e32 v21, v21
+; CI-NEXT: v_cvt_f16_f32_e32 v25, v25
+; CI-NEXT: v_cvt_f32_f16_e32 v14, v14
+; CI-NEXT: v_cvt_f16_f32_e32 v16, v16
+; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
+; CI-NEXT: v_cvt_f16_f32_e32 v28, v23
+; CI-NEXT: v_cvt_f16_f32_e32 v14, v14
+; CI-NEXT: v_or_b32_e32 v13, v16, v13
+; CI-NEXT: v_cvt_f16_f32_e32 v16, v32
+; CI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:12
; CI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; CI-NEXT: v_or_b32_e32 v14, v15, v14
-; CI-NEXT: v_lshlrev_b32_e32 v15, 16, v24
+; CI-NEXT: v_lshlrev_b32_e32 v15, 16, v22
; CI-NEXT: v_or_b32_e32 v15, v25, v15
-; CI-NEXT: s_waitcnt vmcnt(11)
-; CI-NEXT: v_cvt_f16_f32_e32 v16, v16
-; CI-NEXT: s_waitcnt vmcnt(10)
-; CI-NEXT: v_cvt_f16_f32_e32 v17, v17
+; CI-NEXT: v_cvt_f16_f32_e32 v25, v21
+; CI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:96
+; CI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:100
+; CI-NEXT: v_cvt_f32_f16_e32 v16, v16
+; CI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:64
+; CI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
+; CI-NEXT: v_cvt_f16_f32_e32 v27, v16
+; CI-NEXT: v_or_b32_e32 v16, v24, v25
+; CI-NEXT: v_lshlrev_b32_e32 v24, 16, v27
+; CI-NEXT: v_or_b32_e32 v25, v28, v24
; CI-NEXT: s_waitcnt vmcnt(9)
; CI-NEXT: v_cvt_f16_f32_e32 v18, v18
; CI-NEXT: s_waitcnt vmcnt(8)
; CI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; CI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
-; CI-NEXT: v_or_b32_e32 v16, v17, v16
-; CI-NEXT: v_lshlrev_b32_e32 v17, 16, v18
-; CI-NEXT: v_or_b32_e32 v17, v19, v17
; CI-NEXT: s_waitcnt vmcnt(7)
-; CI-NEXT: v_cvt_f16_f32_e32 v18, v20
+; CI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; CI-NEXT: v_cvt_f32_f16_e32 v18, v18
+; CI-NEXT: v_cvt_f32_f16_e32 v19, v19
+; CI-NEXT: v_cvt_f32_f16_e32 v20, v20
+; CI-NEXT: v_cvt_f16_f32_e32 v18, v18
+; CI-NEXT: v_cvt_f16_f32_e32 v19, v19
+; CI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; CI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
+; CI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; CI-NEXT: v_or_b32_e32 v20, v19, v20
+; CI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:20
+; CI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:8
+; CI-NEXT: s_waitcnt vmcnt(8)
+; CI-NEXT: v_cvt_f16_f32_e32 v17, v17
+; CI-NEXT: s_waitcnt vmcnt(7)
+; CI-NEXT: v_cvt_f16_f32_e32 v26, v26
; CI-NEXT: s_waitcnt vmcnt(6)
-; CI-NEXT: v_cvt_f16_f32_e32 v19, v21
-; CI-NEXT: s_waitcnt vmcnt(5)
+; CI-NEXT: v_cvt_f16_f32_e32 v27, v34
+; CI-NEXT: v_cvt_f32_f16_e32 v17, v17
+; CI-NEXT: v_cvt_f32_f16_e32 v26, v26
+; CI-NEXT: v_cvt_f32_f16_e32 v27, v27
+; CI-NEXT: v_cvt_f16_f32_e32 v17, v17
+; CI-NEXT: v_cvt_f16_f32_e32 v26, v26
+; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; CI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
+; CI-NEXT: v_or_b32_e32 v17, v17, v26
+; CI-NEXT: v_add_i32_e32 v26, vcc, 0x7c, v0
+; CI-NEXT: v_or_b32_e32 v18, v27, v18
+; CI-NEXT: buffer_store_dword v17, v26, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x78, v0
+; CI-NEXT: buffer_store_dword v18, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x74, v0
+; CI-NEXT: buffer_store_dword v20, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x70, v0
+; CI-NEXT: buffer_store_dword v25, v17, s[0:3], 0 offen
+; CI-NEXT: s_waitcnt vmcnt(8)
+; CI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; CI-NEXT: s_waitcnt vmcnt(7)
; CI-NEXT: v_cvt_f16_f32_e32 v20, v22
-; CI-NEXT: s_waitcnt vmcnt(4)
-; CI-NEXT: v_cvt_f16_f32_e32 v21, v23
-; CI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
-; CI-NEXT: v_or_b32_e32 v18, v19, v18
-; CI-NEXT: v_lshlrev_b32_e32 v19, 16, v20
-; CI-NEXT: v_or_b32_e32 v19, v21, v19
-; CI-NEXT: s_waitcnt vmcnt(3)
-; CI-NEXT: v_cvt_f16_f32_e32 v20, v26
-; CI-NEXT: s_waitcnt vmcnt(2)
-; CI-NEXT: v_cvt_f16_f32_e32 v21, v27
-; CI-NEXT: s_waitcnt vmcnt(1)
-; CI-NEXT: v_cvt_f16_f32_e32 v26, v28
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_cvt_f16_f32_e32 v27, v29
+; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:88
+; CI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:92
+; CI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:80
+; CI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:84
+; CI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:72
+; CI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:76
+; CI-NEXT: v_cvt_f16_f32_e32 v22, v23
+; CI-NEXT: v_cvt_f32_f16_e32 v21, v21
+; CI-NEXT: v_cvt_f32_f16_e32 v20, v20
+; CI-NEXT: s_waitcnt vmcnt(12)
+; CI-NEXT: v_cvt_f16_f32_e32 v29, v29
+; CI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; CI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; CI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; CI-NEXT: v_cvt_f32_f16_e32 v29, v29
+; CI-NEXT: v_cvt_f16_f32_e32 v22, v22
; CI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
; CI-NEXT: v_or_b32_e32 v20, v21, v20
-; CI-NEXT: v_lshlrev_b32_e32 v21, 16, v26
-; CI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:52
-; CI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:48
-; CI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:60
-; CI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:56
-; CI-NEXT: v_or_b32_e32 v21, v27, v21
-; CI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:132
-; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:128
-; CI-NEXT: s_waitcnt vmcnt(5)
-; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
-; CI-NEXT: s_waitcnt vmcnt(4)
-; CI-NEXT: v_cvt_f16_f32_e32 v25, v25
-; CI-NEXT: s_waitcnt vmcnt(3)
+; CI-NEXT: v_add_i32_e32 v21, vcc, 0x6c, v0
+; CI-NEXT: buffer_store_dword v20, v21, s[0:3], 0 offen
+; CI-NEXT: v_lshlrev_b32_e32 v20, 16, v22
+; CI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:24
+; CI-NEXT: v_cvt_f16_f32_e32 v29, v29
+; CI-NEXT: s_waitcnt vmcnt(13)
+; CI-NEXT: v_cvt_f16_f32_e32 v19, v19
+; CI-NEXT: s_waitcnt vmcnt(12)
+; CI-NEXT: v_cvt_f16_f32_e32 v23, v24
+; CI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:28
+; CI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:16
+; CI-NEXT: v_cvt_f32_f16_e32 v19, v19
+; CI-NEXT: v_cvt_f32_f16_e32 v23, v23
+; CI-NEXT: v_cvt_f16_f32_e32 v19, v19
; CI-NEXT: v_cvt_f16_f32_e32 v23, v23
-; CI-NEXT: s_waitcnt vmcnt(2)
-; CI-NEXT: v_cvt_f16_f32_e32 v22, v22
-; CI-NEXT: s_waitcnt vmcnt(1)
+; CI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; CI-NEXT: v_or_b32_e32 v20, v23, v20
+; CI-NEXT: s_waitcnt vmcnt(9)
+; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; CI-NEXT: s_waitcnt vmcnt(8)
+; CI-NEXT: v_cvt_f16_f32_e32 v23, v28
+; CI-NEXT: s_waitcnt vmcnt(7)
+; CI-NEXT: v_cvt_f16_f32_e32 v17, v17
+; CI-NEXT: s_waitcnt vmcnt(6)
+; CI-NEXT: v_cvt_f16_f32_e32 v18, v18
+; CI-NEXT: v_cvt_f32_f16_e32 v27, v27
+; CI-NEXT: v_cvt_f32_f16_e32 v23, v23
+; CI-NEXT: s_waitcnt vmcnt(4)
; CI-NEXT: v_cvt_f16_f32_e32 v26, v26
-; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v25, v25
; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
-; CI-NEXT: v_lshlrev_b32_e32 v24, 16, v24
-; CI-NEXT: v_or_b32_e32 v24, v25, v24
-; CI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
-; CI-NEXT: v_or_b32_e32 v26, v27, v26
-; CI-NEXT: v_add_i32_e32 v27, vcc, 0x7c, v0
-; CI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen
-; CI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:124
-; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:120
+; CI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; CI-NEXT: v_cvt_f32_f16_e32 v18, v18
+; CI-NEXT: v_cvt_f32_f16_e32 v26, v26
+; CI-NEXT: v_cvt_f32_f16_e32 v17, v17
; CI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; CI-NEXT: v_or_b32_e32 v22, v22, v23
-; CI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:88
-; CI-NEXT: s_waitcnt vmcnt(2)
+; CI-NEXT: v_or_b32_e32 v23, v27, v23
+; CI-NEXT: v_add_i32_e32 v27, vcc, 0x68, v0
+; CI-NEXT: buffer_store_dword v23, v27, s[0:3], 0 offen
+; CI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:32
+; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:36
+; CI-NEXT: v_cvt_f32_f16_e32 v25, v25
+; CI-NEXT: v_cvt_f16_f32_e32 v18, v18
; CI-NEXT: v_cvt_f16_f32_e32 v26, v26
-; CI-NEXT: s_waitcnt vmcnt(1)
-; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; CI-NEXT: v_cvt_f16_f32_e32 v17, v17
+; CI-NEXT: v_cvt_f16_f32_e32 v25, v25
+; CI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
; CI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
-; CI-NEXT: v_or_b32_e32 v26, v27, v26
-; CI-NEXT: v_add_i32_e32 v27, vcc, 0x78, v0
-; CI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen
-; CI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:116
-; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:112
-; CI-NEXT: s_waitcnt vmcnt(3)
+; CI-NEXT: v_or_b32_e32 v17, v17, v18
+; CI-NEXT: v_add_i32_e32 v18, vcc, 0x64, v0
+; CI-NEXT: v_or_b32_e32 v25, v25, v26
+; CI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x60, v0
+; CI-NEXT: buffer_store_dword v25, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x5c, v0
+; CI-NEXT: s_waitcnt vmcnt(5)
+; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
+; CI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; CI-NEXT: v_cvt_f32_f16_e32 v24, v24
+; CI-NEXT: v_cvt_f32_f16_e32 v21, v21
+; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
+; CI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; CI-NEXT: v_or_b32_e32 v19, v24, v19
+; CI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:44
+; CI-NEXT: v_cvt_f16_f32_e32 v22, v22
+; CI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
+; CI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; CI-NEXT: v_cvt_f16_f32_e32 v22, v22
+; CI-NEXT: v_or_b32_e32 v21, v22, v21
+; CI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:40
+; CI-NEXT: s_waitcnt vmcnt(5)
+; CI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; CI-NEXT: s_waitcnt vmcnt(4)
+; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; CI-NEXT: v_cvt_f32_f16_e32 v23, v23
+; CI-NEXT: v_cvt_f32_f16_e32 v27, v27
; CI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; CI-NEXT: v_lshlrev_b32_e32 v27, 16, v27
; CI-NEXT: s_waitcnt vmcnt(1)
-; CI-NEXT: v_cvt_f16_f32_e32 v26, v26
+; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
+; CI-NEXT: v_cvt_f32_f16_e32 v24, v24
+; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v22, v22
+; CI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; CI-NEXT: v_cvt_f16_f32_e32 v28, v22
+; CI-NEXT: v_or_b32_e32 v22, v23, v27
+; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:52
+; CI-NEXT: v_lshlrev_b32_e32 v23, 16, v24
+; CI-NEXT: v_or_b32_e32 v23, v28, v23
+; CI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:56
+; CI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:48
+; CI-NEXT: s_waitcnt vmcnt(2)
; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
-; CI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
-; CI-NEXT: v_or_b32_e32 v26, v27, v26
-; CI-NEXT: v_add_i32_e32 v27, vcc, 0x74, v0
-; CI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen
-; CI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:108
-; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:104
; CI-NEXT: s_waitcnt vmcnt(1)
-; CI-NEXT: v_cvt_f16_f32_e32 v25, v26
+; CI-NEXT: v_cvt_f16_f32_e32 v28, v28
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_cvt_f16_f32_e32 v26, v27
-; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:92
-; CI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
-; CI-NEXT: v_or_b32_e32 v25, v26, v25
-; CI-NEXT: v_add_i32_e32 v26, vcc, 0x70, v0
-; CI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen
-; CI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:100
-; CI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:96
-; CI-NEXT: s_waitcnt vmcnt(3)
+; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
+; CI-NEXT: v_cvt_f32_f16_e32 v27, v27
+; CI-NEXT: v_cvt_f32_f16_e32 v28, v28
+; CI-NEXT: v_cvt_f32_f16_e32 v24, v24
; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; CI-NEXT: v_cvt_f16_f32_e32 v28, v28
+; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
; CI-NEXT: v_lshlrev_b32_e32 v27, 16, v27
-; CI-NEXT: v_or_b32_e32 v23, v23, v27
-; CI-NEXT: s_waitcnt vmcnt(1)
-; CI-NEXT: v_cvt_f16_f32_e32 v25, v25
+; CI-NEXT: v_or_b32_e32 v24, v24, v27
+; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:60
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_cvt_f16_f32_e32 v26, v26
-; CI-NEXT: v_add_i32_e32 v27, vcc, 0x68, v0
-; CI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
-; CI-NEXT: v_or_b32_e32 v25, v26, v25
-; CI-NEXT: v_add_i32_e32 v26, vcc, 0x6c, v0
-; CI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen
-; CI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:68
-; CI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:64
-; CI-NEXT: buffer_store_dword v23, v27, s[0:3], 0 offen
-; CI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:76
-; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:72
-; CI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:84
-; CI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:80
-; CI-NEXT: s_waitcnt vmcnt(3)
-; CI-NEXT: v_cvt_f16_f32_e32 v23, v23
-; CI-NEXT: v_cvt_f16_f32_e32 v25, v25
-; CI-NEXT: v_cvt_f16_f32_e32 v26, v26
-; CI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; CI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
-; CI-NEXT: v_or_b32_e32 v25, v26, v25
-; CI-NEXT: s_waitcnt vmcnt(2)
-; CI-NEXT: v_cvt_f16_f32_e32 v26, v27
+; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; CI-NEXT: v_cvt_f32_f16_e32 v27, v27
+; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; CI-NEXT: v_lshlrev_b32_e32 v27, 16, v27
+; CI-NEXT: v_or_b32_e32 v27, v28, v27
+; CI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:68
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_cvt_f16_f32_e32 v27, v29
-; CI-NEXT: v_or_b32_e32 v23, v26, v23
-; CI-NEXT: v_cvt_f16_f32_e32 v26, v28
-; CI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
-; CI-NEXT: v_or_b32_e32 v26, v27, v26
-; CI-NEXT: v_add_i32_e32 v27, vcc, 0x64, v0
-; CI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen
-; CI-NEXT: v_add_i32_e32 v26, vcc, 0x60, v0
-; CI-NEXT: buffer_store_dword v23, v26, s[0:3], 0 offen
-; CI-NEXT: v_add_i32_e32 v23, vcc, 0x5c, v0
-; CI-NEXT: buffer_store_dword v25, v23, s[0:3], 0 offen
-; CI-NEXT: v_add_i32_e32 v23, vcc, 0x58, v0
-; CI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen
-; CI-NEXT: v_add_i32_e32 v22, vcc, 0x54, v0
-; CI-NEXT: buffer_store_dword v24, v22, s[0:3], 0 offen
-; CI-NEXT: v_add_i32_e32 v22, vcc, 0x50, v0
-; CI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen
-; CI-NEXT: v_add_i32_e32 v21, vcc, 0x4c, v0
-; CI-NEXT: buffer_store_dword v20, v21, s[0:3], 0 offen
-; CI-NEXT: v_add_i32_e32 v20, vcc, 0x48, v0
-; CI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen
-; CI-NEXT: v_add_i32_e32 v19, vcc, 0x44, v0
-; CI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen
-; CI-NEXT: v_add_i32_e32 v18, vcc, 64, v0
-; CI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen
+; CI-NEXT: v_cvt_f16_f32_e32 v28, v28
+; CI-NEXT: v_cvt_f32_f16_e32 v28, v28
+; CI-NEXT: v_cvt_f16_f32_e32 v28, v28
+; CI-NEXT: v_lshlrev_b32_e32 v28, 16, v28
+; CI-NEXT: v_or_b32_e32 v28, v29, v28
+; CI-NEXT: buffer_store_dword v28, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x58, v0
+; CI-NEXT: buffer_store_dword v27, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x54, v0
+; CI-NEXT: buffer_store_dword v24, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x50, v0
+; CI-NEXT: buffer_store_dword v23, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x4c, v0
+; CI-NEXT: buffer_store_dword v22, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x48, v0
+; CI-NEXT: buffer_store_dword v21, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x44, v0
+; CI-NEXT: buffer_store_dword v19, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 64, v0
+; CI-NEXT: buffer_store_dword v20, v17, s[0:3], 0 offen
; CI-NEXT: v_add_i32_e32 v17, vcc, 60, v0
; CI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen
; CI-NEXT: v_add_i32_e32 v16, vcc, 56, v0
diff --git a/llvm/test/CodeGen/AMDGPU/fcanonicalize.ll b/llvm/test/CodeGen/AMDGPU/fcanonicalize.ll
index c1093a1..d53c041 100644
--- a/llvm/test/CodeGen/AMDGPU/fcanonicalize.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcanonicalize.ll
@@ -2389,7 +2389,6 @@ define amdgpu_kernel void @test_canonicalize_value_f16_flush(ptr addrspace(1) %a
; GFX6-NEXT: v_mov_b32_e32 v1, s3
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_cvt_f32_f16_e32 v0, v0
-; GFX6-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v0
; GFX6-NEXT: v_add_i32_e32 v0, vcc, s2, v2
; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
@@ -2471,15 +2470,13 @@ define amdgpu_kernel void @test_canonicalize_value_v2f16_flush(ptr addrspace(1)
; GFX6-NEXT: flat_load_dword v0, v[0:1]
; GFX6-NEXT: v_mov_b32_e32 v3, s3
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v0
-; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v1
; GFX6-NEXT: v_cvt_f32_f16_e32 v0, v0
-; GFX6-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1
-; GFX6-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX6-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX6-NEXT: v_or_b32_e32 v4, v1, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v4, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, s2, v2
; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
; GFX6-NEXT: flat_store_dword v[0:1], v4
@@ -2724,7 +2721,6 @@ define amdgpu_kernel void @test_canonicalize_value_f16_denorm(ptr addrspace(1) %
; GFX6-NEXT: v_mov_b32_e32 v1, s3
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_cvt_f32_f16_e32 v0, v0
-; GFX6-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v0
; GFX6-NEXT: v_add_i32_e32 v0, vcc, s2, v2
; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
@@ -2807,15 +2803,13 @@ define amdgpu_kernel void @test_canonicalize_value_v2f16_denorm(ptr addrspace(1)
; GFX6-NEXT: flat_load_dword v0, v[0:1]
; GFX6-NEXT: v_mov_b32_e32 v3, s3
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v0
-; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v1
; GFX6-NEXT: v_cvt_f32_f16_e32 v0, v0
-; GFX6-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1
-; GFX6-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX6-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX6-NEXT: v_or_b32_e32 v4, v1, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v4, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, s2, v2
; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
; GFX6-NEXT: flat_store_dword v[0:1], v4
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll
index 78fb89c..b32630a 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll
@@ -951,8 +951,6 @@ define half @v_fneg_minnum_f16_ieee(half %a, half %b) #0 {
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, v0, v1
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1056,7 +1054,6 @@ define half @v_fneg_posk_minnum_f16_ieee(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, -4.0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1110,7 +1107,6 @@ define half @v_fneg_negk_minnum_f16_ieee(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 4.0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1193,7 +1189,6 @@ define half @v_fneg_neg0_minnum_f16_ieee(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1222,7 +1217,6 @@ define half @v_fneg_inv2pi_minnum_f16(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 0xbe230000, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1253,7 +1247,6 @@ define half @v_fneg_neg_inv2pi_minnum_f16(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 0xbe230000, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1311,7 +1304,6 @@ define half @v_fneg_0_minnum_foldable_use_f16_ieee(half %a, half %b) #0 {
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_min_f32_e32 v0, 0, v0
; SI-NEXT: v_mul_f32_e64 v0, -v0, v1
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -1346,7 +1338,6 @@ define half @v_fneg_inv2pi_minnum_foldable_use_f16(half %a, half %b) #0 {
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 0xbe230000, v0
; SI-NEXT: v_mul_f32_e32 v0, v0, v1
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -1413,8 +1404,6 @@ define { half, half } @v_fneg_minnum_multi_use_minnum_f16_ieee(half %a, half %b)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e64 v1, -v1
; SI-NEXT: v_cvt_f32_f16_e64 v0, -v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, v0, v1
; SI-NEXT: v_mul_f32_e32 v1, -4.0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -1494,8 +1483,6 @@ define half @v_fneg_maxnum_f16_ieee(half %a, half %b) #0 {
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_min_f32_e32 v0, v0, v1
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1599,7 +1586,6 @@ define half @v_fneg_posk_maxnum_f16_ieee(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_min_f32_e32 v0, -4.0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1653,7 +1639,6 @@ define half @v_fneg_negk_maxnum_f16_ieee(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_min_f32_e32 v0, 4.0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1736,7 +1721,6 @@ define half @v_fneg_neg0_maxnum_f16_ieee(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_min_f32_e32 v0, 0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1792,7 +1776,6 @@ define half @v_fneg_0_maxnum_foldable_use_f16_ieee(half %a, half %b) #0 {
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 0, v0
; SI-NEXT: v_mul_f32_e64 v0, -v0, v1
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -1859,8 +1842,6 @@ define { half, half } @v_fneg_maxnum_multi_use_maxnum_f16_ieee(half %a, half %b)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e64 v1, -v1
; SI-NEXT: v_cvt_f32_f16_e64 v0, -v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_min_f32_e32 v0, v0, v1
; SI-NEXT: v_mul_f32_e32 v1, -4.0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -3980,7 +3961,8 @@ define half @v_fneg_canonicalize_f16(half %a) #0 {
; SI-LABEL: v_fneg_canonicalize_f16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: v_fneg_canonicalize_f16:
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
index 17f6761..b5440b9 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
@@ -1021,7 +1021,6 @@ define half @v_fneg_inv2pi_minnum_f16(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 0xbe230000, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1043,7 +1042,6 @@ define half @v_fneg_neg_inv2pi_minnum_f16(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 0x3e230000, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
diff --git a/llvm/test/CodeGen/AMDGPU/fold-restore-undef-use.mir b/llvm/test/CodeGen/AMDGPU/fold-restore-undef-use.mir
index 3616d61..5ef8a94 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-restore-undef-use.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-restore-undef-use.mir
@@ -8,6 +8,8 @@
---
name: restore_undef_copy_use
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
machineFunctionInfo:
maxKernArgAlign: 1
isEntryFunction: true
diff --git a/llvm/test/CodeGen/AMDGPU/fp-classify.ll b/llvm/test/CodeGen/AMDGPU/fp-classify.ll
index 6fa7df9..18d2e52 100644
--- a/llvm/test/CodeGen/AMDGPU/fp-classify.ll
+++ b/llvm/test/CodeGen/AMDGPU/fp-classify.ll
@@ -618,16 +618,16 @@ define amdgpu_kernel void @test_not_isfinite_pattern_4_wrong_ord_test(ptr addrsp
define amdgpu_kernel void @test_isinf_pattern_f16(ptr addrspace(1) nocapture %out, half %x) #0 {
; SI-LABEL: test_isinf_pattern_f16:
; SI: ; %bb.0:
-; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT: s_load_dword s0, s[0:1], 0xb
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_mov_b32 s1, 0x7f800000
+; SI-NEXT: s_load_dword s4, s[0:1], 0xb
+; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e64 v0, |s0|
-; SI-NEXT: v_cmp_eq_f32_e32 vcc, s1, v0
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_and_b32 s4, s4, 0x7fff
+; SI-NEXT: s_cmpk_eq_i32 s4, 0x7c00
+; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: test_isinf_pattern_f16:
@@ -667,16 +667,19 @@ define amdgpu_kernel void @test_isinf_pattern_f16(ptr addrspace(1) nocapture %ou
define amdgpu_kernel void @test_isfinite_pattern_0_f16(ptr addrspace(1) nocapture %out, half %x) #0 {
; SI-LABEL: test_isfinite_pattern_0_f16:
; SI: ; %bb.0:
-; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT: s_load_dword s0, s[0:1], 0xb
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_movk_i32 s1, 0x1f8
+; SI-NEXT: s_load_dword s4, s[0:1], 0xb
+; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v0, s0
-; SI-NEXT: v_cmp_class_f32_e64 s[0:1], v0, s1
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: v_cvt_f32_f16_e32 v0, s4
+; SI-NEXT: s_and_b32 s4, s4, 0x7fff
+; SI-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; SI-NEXT: s_cmpk_lg_i32 s4, 0x7c00
+; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; SI-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: test_isfinite_pattern_0_f16:
@@ -718,16 +721,19 @@ define amdgpu_kernel void @test_isfinite_pattern_0_f16(ptr addrspace(1) nocaptur
define amdgpu_kernel void @test_isfinite_pattern_4_f16(ptr addrspace(1) nocapture %out, half %x) #0 {
; SI-LABEL: test_isfinite_pattern_4_f16:
; SI: ; %bb.0:
-; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT: s_load_dword s0, s[0:1], 0xb
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_movk_i32 s1, 0x1f8
+; SI-NEXT: s_load_dword s4, s[0:1], 0xb
+; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v0, s0
-; SI-NEXT: v_cmp_class_f32_e64 s[0:1], v0, s1
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: v_cvt_f32_f16_e32 v0, s4
+; SI-NEXT: s_and_b32 s4, s4, 0x7fff
+; SI-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; SI-NEXT: s_cmpk_lt_i32 s4, 0x7c00
+; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; SI-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: test_isfinite_pattern_4_f16:
diff --git a/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll
index 767d347..a948fab 100644
--- a/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll
@@ -1181,18 +1181,28 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat(ptr addrspace(1) %ptr) #1 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB42_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s6
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
-; GFX90A-NEXT: v_mov_b32_e32 v4, 0
+; GFX90A-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX90A-NEXT: v_mov_b32_e32 v6, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
-; GFX90A-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB42_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
+; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
; GFX90A-NEXT: buffer_wbl2
-; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1_vol
@@ -1200,20 +1210,30 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat(ptr addrspace(1) %pt
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX90A-NEXT: s_cbranch_execnz .LBB42_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB42_2
+; GFX90A-NEXT: .LBB42_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB42_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX940-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: buffer_wbl2 sc0 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] sc1
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc0 sc1
+; GFX940-NEXT: .LBB42_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 seq_cst
@@ -1223,26 +1243,45 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent(ptr addrspace(1) %ptr) #1 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_agent:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB43_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
+; GFX90A-NEXT: .LBB43_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_agent:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB43_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX940-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: .LBB43_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst
@@ -1252,18 +1291,28 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_system(ptr addrspace(1) %ptr) #1 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_system:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB44_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s6
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
-; GFX90A-NEXT: v_mov_b32_e32 v4, 0
+; GFX90A-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX90A-NEXT: v_mov_b32_e32 v6, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
-; GFX90A-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB44_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
+; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
; GFX90A-NEXT: buffer_wbl2
-; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1_vol
@@ -1271,20 +1320,30 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_system(ptr addrspace
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX90A-NEXT: s_cbranch_execnz .LBB44_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB44_2
+; GFX90A-NEXT: .LBB44_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_system:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB44_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX940-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: buffer_wbl2 sc0 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] sc1
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc0 sc1
+; GFX940-NEXT: .LBB44_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") seq_cst
@@ -1294,26 +1353,45 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_flush(ptr addrspace(1) %ptr) #0 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_flush:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB45_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
+; GFX90A-NEXT: .LBB45_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_flush:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB45_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX940-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: .LBB45_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst
@@ -1485,37 +1563,57 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent_safe(ptr addrspace(1) %ptr) {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_agent_safe:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB52_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s6
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
-; GFX90A-NEXT: v_mov_b32_e32 v4, 0
+; GFX90A-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX90A-NEXT: v_mov_b32_e32 v6, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
-; GFX90A-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB52_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
-; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX90A-NEXT: s_cbranch_execnz .LBB52_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB52_2
+; GFX90A-NEXT: .LBB52_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_agent_safe:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB52_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX940-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: .LBB52_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst
@@ -2020,23 +2118,42 @@ main_body:
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr) #1 {
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB70_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: .LBB70_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB70_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b32_e32 v2, s0
; GFX940-NEXT: ds_add_f64 v2, v[0:1]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: .LBB70_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
@@ -2046,23 +2163,42 @@ main_body:
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3) %ptr) #0 {
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat_flush:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB71_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: .LBB71_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat_flush:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB71_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b32_e32 v2, s0
; GFX940-NEXT: ds_add_f64 v2, v[0:1]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: .LBB71_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
@@ -2072,46 +2208,66 @@ main_body:
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrspace(3) %ptr) #4 {
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
; GFX90A: ; %bb.0: ; %main_body
-; GFX90A-NEXT: s_load_dword s2, s[0:1], 0x24
-; GFX90A-NEXT: s_mov_b64 s[0:1], 0
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB72_3
+; GFX90A-NEXT: ; %bb.1:
+; GFX90A-NEXT: s_load_dword s4, s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_mov_b32_e32 v0, s2
-; GFX90A-NEXT: ds_read_b64 v[0:1], v0
-; GFX90A-NEXT: v_mov_b32_e32 v2, s2
-; GFX90A-NEXT: .LBB72_1: ; %atomicrmw.start
+; GFX90A-NEXT: v_mov_b32_e32 v0, s4
+; GFX90A-NEXT: ds_read_b64 v[2:3], v0
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
+; GFX90A-NEXT: s_mov_b64 s[0:1], 0
+; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: .LBB72_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f64 v[4:5], v[0:1], 4.0
-; GFX90A-NEXT: ds_cmpst_rtn_b64 v[4:5], v2, v[0:1], v[4:5]
+; GFX90A-NEXT: v_add_f64 v[6:7], v[2:3], v[0:1]
+; GFX90A-NEXT: ds_cmpst_rtn_b64 v[6:7], v4, v[2:3], v[6:7]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[0:1]
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
; GFX90A-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[6:7], v[6:7] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; GFX90A-NEXT: s_cbranch_execnz .LBB72_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB72_2
+; GFX90A-NEXT: .LBB72_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
; GFX940: ; %bb.0: ; %main_body
-; GFX940-NEXT: s_load_dword s2, s[0:1], 0x24
-; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB72_3
+; GFX940-NEXT: ; %bb.1:
+; GFX940-NEXT: s_load_dword s4, s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: v_mov_b32_e32 v0, s2
-; GFX940-NEXT: ds_read_b64 v[0:1], v0
-; GFX940-NEXT: v_mov_b32_e32 v2, s2
-; GFX940-NEXT: .LBB72_1: ; %atomicrmw.start
+; GFX940-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-NEXT: ds_read_b64 v[2:3], v0
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
+; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: v_mov_b32_e32 v4, s4
+; GFX940-NEXT: .LBB72_2: ; %atomicrmw.start
; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: v_add_f64 v[4:5], v[0:1], 4.0
-; GFX940-NEXT: ds_cmpst_rtn_b64 v[4:5], v2, v[0:1], v[4:5]
+; GFX940-NEXT: v_add_f64 v[6:7], v[2:3], v[0:1]
+; GFX940-NEXT: ds_cmpst_rtn_b64 v[6:7], v4, v[2:3], v[6:7]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[0:1]
+; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
+; GFX940-NEXT: v_mov_b64_e32 v[2:3], v[6:7]
; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; GFX940-NEXT: s_cbranch_execnz .LBB72_1
-; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_cbranch_execnz .LBB72_2
+; GFX940-NEXT: .LBB72_3:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
diff --git a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
new file mode 100644
index 0000000..66bf0d5
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
@@ -0,0 +1,1502 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GISEL %s
+
+define i128 @fptosi_f64_to_i128(double %x) {
+; SDAG-LABEL: fptosi_f64_to_i128:
+; SDAG: ; %bb.0: ; %fp-to-i-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_mov_b32_e32 v5, v1
+; SDAG-NEXT: v_bfe_u32 v6, v5, 20, 11
+; SDAG-NEXT: v_mov_b32_e32 v7, 0
+; SDAG-NEXT: s_mov_b64 s[4:5], 0x3fe
+; SDAG-NEXT: v_mov_b32_e32 v4, v0
+; SDAG-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v0, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-NEXT: v_mov_b32_e32 v1, 0
+; SDAG-NEXT: v_mov_b32_e32 v3, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB0_10
+; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
+; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v7, vcc
+; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v7, vcc
+; SDAG-NEXT: s_movk_i32 s4, 0xff7f
+; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v7, vcc
+; SDAG-NEXT: s_mov_b32 s5, -1
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[0:1]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[6:7], -1, v[2:3]
+; SDAG-NEXT: v_cmp_lt_i64_e32 vcc, -1, v[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[6:7]
+; SDAG-NEXT: s_cbranch_execz .LBB0_7
+; SDAG-NEXT: ; %bb.2: ; %fp-to-i-if-end9
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_add_co_u32_e64 v9, s[4:5], -1, v0
+; SDAG-NEXT: v_addc_co_u32_e64 v10, s[4:5], 0, -1, s[4:5]
+; SDAG-NEXT: s_mov_b64 s[4:5], 0x432
+; SDAG-NEXT: v_and_b32_e32 v0, 0xfffff, v5
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, -1, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v11, -1, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v5, 0x100000, v0
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_xor_b64 s[12:13], exec, s[6:7]
+; SDAG-NEXT: s_cbranch_execz .LBB0_4
+; SDAG-NEXT: ; %bb.3: ; %fp-to-i-if-else
+; SDAG-NEXT: v_sub_u32_e32 v0, 0x473, v6
+; SDAG-NEXT: v_add_u32_e32 v2, 0xfffffb8d, v6
+; SDAG-NEXT: v_add_u32_e32 v7, 0xfffffbcd, v6
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v0, v[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[2:3], v2, v[4:5]
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v7
+; SDAG-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[4:5]
+; SDAG-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v7
+; SDAG-NEXT: v_cndmask_b32_e64 v6, 0, v1, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v2, v0, s[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v7, v[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, v2, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v12, 0, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v7, 0, v1, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v12, v11, 0
+; SDAG-NEXT: v_mov_b32_e32 v3, 0
+; SDAG-NEXT: v_mul_lo_u32 v13, v8, v2
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v11, v2, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, v1
+; SDAG-NEXT: v_mul_lo_u32 v6, v11, v6
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v7, v11, v[2:3]
+; SDAG-NEXT: v_mul_lo_u32 v10, v10, v12
+; SDAG-NEXT: v_add3_u32 v5, v5, v6, v13
+; SDAG-NEXT: v_mov_b32_e32 v6, v2
+; SDAG-NEXT: v_mov_b32_e32 v2, v3
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v12, v8, v[1:2]
+; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v9, v12, v[4:5]
+; SDAG-NEXT: v_add_co_u32_e64 v5, s[4:5], v6, v2
+; SDAG-NEXT: v_addc_co_u32_e64 v6, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mul_lo_u32 v9, v9, v7
+; SDAG-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v7, v8, v[5:6]
+; SDAG-NEXT: ; implicit-def: $vgpr11
+; SDAG-NEXT: ; implicit-def: $vgpr8
+; SDAG-NEXT: v_add3_u32 v4, v10, v4, v9
+; SDAG-NEXT: v_add_co_u32_e64 v2, s[4:5], v5, v3
+; SDAG-NEXT: v_addc_co_u32_e64 v3, s[4:5], v6, v4, s[4:5]
+; SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
+; SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; SDAG-NEXT: ; implicit-def: $vgpr9
+; SDAG-NEXT: ; implicit-def: $vgpr10
+; SDAG-NEXT: .LBB0_4: ; %Flow
+; SDAG-NEXT: s_andn2_saveexec_b64 s[12:13], s[12:13]
+; SDAG-NEXT: s_cbranch_execz .LBB0_6
+; SDAG-NEXT: ; %bb.5: ; %fp-to-i-if-then12
+; SDAG-NEXT: v_sub_u32_e32 v2, 0x433, v6
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v2, v[4:5]
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v2
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v2
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v1, 0, v1, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v6, v0, v4, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v5, v1, v5, s[6:7]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v6, v11, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v5, v11, v[1:2]
+; SDAG-NEXT: v_mov_b32_e32 v7, v4
+; SDAG-NEXT: v_mov_b32_e32 v4, v2
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v6, v8, v[3:4]
+; SDAG-NEXT: v_add_co_u32_e64 v2, s[4:5], v7, v2
+; SDAG-NEXT: v_addc_co_u32_e64 v3, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v5, v8, v[2:3]
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v9, v6, v[2:3]
+; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v10, v6, v[3:4]
+; SDAG-NEXT: v_mad_i32_i24 v3, v9, v5, v3
+; SDAG-NEXT: .LBB0_6: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
+; SDAG-NEXT: .LBB0_7: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.8: ; %fp-to-i-if-then5
+; SDAG-NEXT: v_bfrev_b32_e32 v0, 1
+; SDAG-NEXT: v_bfrev_b32_e32 v1, -2
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
+; SDAG-NEXT: v_mov_b32_e32 v0, v2
+; SDAG-NEXT: v_mov_b32_e32 v1, v2
+; SDAG-NEXT: ; %bb.9: ; %Flow3
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB0_10: ; %fp-to-i-cleanup
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: fptosi_f64_to_i128:
+; GISEL: ; %bb.0: ; %fp-to-i-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_mov_b32_e32 v5, v1
+; GISEL-NEXT: v_mov_b32_e32 v4, v0
+; GISEL-NEXT: v_lshrrev_b32_e32 v0, 20, v5
+; GISEL-NEXT: v_and_b32_e32 v6, 0x7ff, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, 0x3ff
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-NEXT: v_mov_b32_e32 v7, 0
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1]
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB0_10
+; GISEL-NEXT: ; %bb.1: ; %fp-to-i-if-end
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6
+; GISEL-NEXT: v_mov_b32_e32 v2, 0xffffff80
+; GISEL-NEXT: v_addc_co_u32_e64 v1, s[6:7], 0, -1, vcc
+; GISEL-NEXT: v_mov_b32_e32 v3, -1
+; GISEL-NEXT: v_addc_co_u32_e64 v8, s[6:7], 0, -1, s[6:7]
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[0:1], v[2:3]
+; GISEL-NEXT: v_addc_co_u32_e64 v9, s[6:7], 0, -1, s[6:7]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_cmp_le_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cmp_lt_i64_e64 s[4:5], -1, v[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_xor_b64 s[14:15], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB0_7
+; GISEL-NEXT: ; %bb.2: ; %fp-to-i-if-end9
+; GISEL-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[6:7]
+; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v2, 1, v0
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[6:7]
+; GISEL-NEXT: v_lshlrev_b16_e32 v3, 2, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v8, 3, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v9, 4, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v10, 5, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v11, 6, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v12, 7, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v13, 8, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v14, 9, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v15, 10, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v16, 11, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v17, 12, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v18, 13, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v19, 14, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v20, 15, v0
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v2
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v3
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v3
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v8
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v8
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v9
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v9
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v10
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v10
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v11
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v11
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v12
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v12
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v13
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v13
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v14
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v14
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v15
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v15
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v16
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v16
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v17
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v17
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v18
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v18
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v19
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v19
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v20
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v20
+; GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v0
+; GISEL-NEXT: v_lshl_or_b32 v10, v0, 16, v0
+; GISEL-NEXT: v_or3_b32 v8, v1, v2, 1
+; GISEL-NEXT: v_or3_b32 v9, v0, v2, 0
+; GISEL-NEXT: v_mov_b32_e32 v0, 0x433
+; GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-NEXT: v_and_b32_e32 v2, 0xfffff, v5
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1]
+; GISEL-NEXT: v_or_b32_e32 v5, 0x100000, v2
+; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_xor_b64 s[16:17], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB0_4
+; GISEL-NEXT: ; %bb.3: ; %fp-to-i-if-else
+; GISEL-NEXT: v_add_u32_e32 v6, 0xfffffbcd, v6
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v6, v[4:5]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v12, 0, v1, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v11, v10, 0
+; GISEL-NEXT: v_subrev_u32_e32 v7, 64, v6
+; GISEL-NEXT: v_sub_u32_e32 v2, 64, v6
+; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, v[4:5]
+; GISEL-NEXT: v_lshlrev_b64 v[4:5], v7, v[4:5]
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v6
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v13, v2, 0, s[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v8, v[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v11, v8, 0
+; GISEL-NEXT: v_mov_b32_e32 v2, v6
+; GISEL-NEXT: v_mul_lo_u32 v6, v11, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v11, v9, v[1:2]
+; GISEL-NEXT: v_mul_lo_u32 v4, v12, v10
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v12, v8, v[1:2]
+; GISEL-NEXT: v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11]
+; GISEL-NEXT: v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9]
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v9, v[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[6:7]
+; GISEL-NEXT: ; implicit-def: $vgpr10
+; GISEL-NEXT: ; implicit-def: $vgpr9
+; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v3, v8, v[6:7]
+; GISEL-NEXT: ; implicit-def: $vgpr6
+; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GISEL-NEXT: ; implicit-def: $vgpr8
+; GISEL-NEXT: .LBB0_4: ; %Flow
+; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[16:17]
+; GISEL-NEXT: s_cbranch_execz .LBB0_6
+; GISEL-NEXT: ; %bb.5: ; %fp-to-i-if-then12
+; GISEL-NEXT: v_sub_co_u32_e32 v6, vcc, 0x433, v6
+; GISEL-NEXT: v_subrev_u32_e32 v2, 64, v6
+; GISEL-NEXT: v_lshrrev_b64 v[0:1], v6, v[4:5]
+; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, 0
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v0, v4, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v4, v10, 0
+; GISEL-NEXT: v_cndmask_b32_e32 v5, v1, v5, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v4, v8, 0
+; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v5, v9, v[2:3]
+; GISEL-NEXT: v_mul_lo_u32 v6, v5, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], vcc, v4, v9, v[1:2]
+; GISEL-NEXT: v_mul_lo_u32 v4, v4, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[6:7], v5, v8, v[1:2]
+; GISEL-NEXT: v_addc_co_u32_e64 v3, s[6:7], v3, v4, s[6:7]
+; GISEL-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v6, vcc
+; GISEL-NEXT: .LBB0_6: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: .LBB0_7: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[14:15]
+; GISEL-NEXT: s_cbranch_execz .LBB0_9
+; GISEL-NEXT: ; %bb.8: ; %fp-to-i-if-then5
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v1, 1, v1
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GISEL-NEXT: v_lshlrev_b32_e32 v2, 1, v1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_lshlrev_b32_e32 v3, 2, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v4, 3, v1
+; GISEL-NEXT: v_or_b32_e32 v2, v1, v2
+; GISEL-NEXT: v_or3_b32 v0, v0, v3, v4
+; GISEL-NEXT: v_lshlrev_b32_e32 v5, 4, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v6, 5, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v3, v4
+; GISEL-NEXT: v_or3_b32 v0, v0, v5, v6
+; GISEL-NEXT: v_lshlrev_b32_e32 v7, 6, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v8, 7, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v5, v6
+; GISEL-NEXT: v_or3_b32 v0, v0, v7, v8
+; GISEL-NEXT: v_lshlrev_b32_e32 v9, 8, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v10, 9, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v7, v8
+; GISEL-NEXT: v_or3_b32 v0, v0, v9, v10
+; GISEL-NEXT: v_lshlrev_b32_e32 v11, 10, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v12, 11, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v9, v10
+; GISEL-NEXT: v_or3_b32 v0, v0, v11, v12
+; GISEL-NEXT: v_lshlrev_b32_e32 v13, 12, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v14, 13, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v11, v12
+; GISEL-NEXT: v_or3_b32 v0, v0, v13, v14
+; GISEL-NEXT: v_lshlrev_b32_e32 v15, 14, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v16, 15, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v13, v14
+; GISEL-NEXT: v_or3_b32 v0, v0, v15, v16
+; GISEL-NEXT: v_lshlrev_b32_e32 v17, 16, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v18, 17, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v15, v16
+; GISEL-NEXT: v_or3_b32 v0, v0, v17, v18
+; GISEL-NEXT: v_lshlrev_b32_e32 v19, 18, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v20, 19, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v17, v18
+; GISEL-NEXT: v_or3_b32 v0, v0, v19, v20
+; GISEL-NEXT: v_lshlrev_b32_e32 v3, 20, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v4, 21, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v19, v20
+; GISEL-NEXT: v_or3_b32 v0, v0, v3, v4
+; GISEL-NEXT: v_lshlrev_b32_e32 v5, 22, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v6, 23, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v3, v4
+; GISEL-NEXT: v_or3_b32 v0, v0, v5, v6
+; GISEL-NEXT: v_lshlrev_b32_e32 v7, 24, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v8, 25, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v5, v6
+; GISEL-NEXT: v_or3_b32 v0, v0, v7, v8
+; GISEL-NEXT: v_lshlrev_b32_e32 v9, 26, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v10, 27, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v7, v8
+; GISEL-NEXT: v_or3_b32 v0, v0, v9, v10
+; GISEL-NEXT: v_lshlrev_b32_e32 v11, 28, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v12, 29, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v9, v10
+; GISEL-NEXT: v_or3_b32 v0, v0, v11, v12
+; GISEL-NEXT: v_lshlrev_b32_e32 v13, 30, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v1, 31, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v11, v12
+; GISEL-NEXT: v_or3_b32 v0, v0, v13, v1
+; GISEL-NEXT: v_or3_b32 v1, v2, v13, v1
+; GISEL-NEXT: v_add_u32_e32 v3, 0x80000000, v1
+; GISEL-NEXT: v_mov_b32_e32 v2, v1
+; GISEL-NEXT: .LBB0_9: ; %Flow3
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: .LBB0_10: ; %fp-to-i-cleanup
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = fptosi double %x to i128
+ ret i128 %cvt
+}
+
+define i128 @fptoui_f64_to_i128(double %x) {
+; SDAG-LABEL: fptoui_f64_to_i128:
+; SDAG: ; %bb.0: ; %fp-to-i-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_mov_b32_e32 v5, v1
+; SDAG-NEXT: v_bfe_u32 v6, v5, 20, 11
+; SDAG-NEXT: v_mov_b32_e32 v7, 0
+; SDAG-NEXT: s_mov_b64 s[4:5], 0x3fe
+; SDAG-NEXT: v_mov_b32_e32 v4, v0
+; SDAG-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v0, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-NEXT: v_mov_b32_e32 v1, 0
+; SDAG-NEXT: v_mov_b32_e32 v3, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB1_10
+; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
+; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v7, vcc
+; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v7, vcc
+; SDAG-NEXT: s_movk_i32 s4, 0xff7f
+; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v7, vcc
+; SDAG-NEXT: s_mov_b32 s5, -1
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[0:1]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[6:7], -1, v[2:3]
+; SDAG-NEXT: v_cmp_lt_i64_e32 vcc, -1, v[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[6:7]
+; SDAG-NEXT: s_cbranch_execz .LBB1_7
+; SDAG-NEXT: ; %bb.2: ; %fp-to-i-if-end9
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_add_co_u32_e64 v9, s[4:5], -1, v0
+; SDAG-NEXT: v_addc_co_u32_e64 v10, s[4:5], 0, -1, s[4:5]
+; SDAG-NEXT: s_mov_b64 s[4:5], 0x432
+; SDAG-NEXT: v_and_b32_e32 v0, 0xfffff, v5
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, -1, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v11, -1, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v5, 0x100000, v0
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_xor_b64 s[12:13], exec, s[6:7]
+; SDAG-NEXT: s_cbranch_execz .LBB1_4
+; SDAG-NEXT: ; %bb.3: ; %fp-to-i-if-else
+; SDAG-NEXT: v_sub_u32_e32 v0, 0x473, v6
+; SDAG-NEXT: v_add_u32_e32 v2, 0xfffffb8d, v6
+; SDAG-NEXT: v_add_u32_e32 v7, 0xfffffbcd, v6
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v0, v[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[2:3], v2, v[4:5]
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v7
+; SDAG-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[4:5]
+; SDAG-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v7
+; SDAG-NEXT: v_cndmask_b32_e64 v6, 0, v1, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v2, v0, s[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v7, v[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, v2, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v12, 0, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v7, 0, v1, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v12, v11, 0
+; SDAG-NEXT: v_mov_b32_e32 v3, 0
+; SDAG-NEXT: v_mul_lo_u32 v13, v8, v2
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v11, v2, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, v1
+; SDAG-NEXT: v_mul_lo_u32 v6, v11, v6
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v7, v11, v[2:3]
+; SDAG-NEXT: v_mul_lo_u32 v10, v10, v12
+; SDAG-NEXT: v_add3_u32 v5, v5, v6, v13
+; SDAG-NEXT: v_mov_b32_e32 v6, v2
+; SDAG-NEXT: v_mov_b32_e32 v2, v3
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v12, v8, v[1:2]
+; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v9, v12, v[4:5]
+; SDAG-NEXT: v_add_co_u32_e64 v5, s[4:5], v6, v2
+; SDAG-NEXT: v_addc_co_u32_e64 v6, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mul_lo_u32 v9, v9, v7
+; SDAG-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v7, v8, v[5:6]
+; SDAG-NEXT: ; implicit-def: $vgpr11
+; SDAG-NEXT: ; implicit-def: $vgpr8
+; SDAG-NEXT: v_add3_u32 v4, v10, v4, v9
+; SDAG-NEXT: v_add_co_u32_e64 v2, s[4:5], v5, v3
+; SDAG-NEXT: v_addc_co_u32_e64 v3, s[4:5], v6, v4, s[4:5]
+; SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
+; SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; SDAG-NEXT: ; implicit-def: $vgpr9
+; SDAG-NEXT: ; implicit-def: $vgpr10
+; SDAG-NEXT: .LBB1_4: ; %Flow
+; SDAG-NEXT: s_andn2_saveexec_b64 s[12:13], s[12:13]
+; SDAG-NEXT: s_cbranch_execz .LBB1_6
+; SDAG-NEXT: ; %bb.5: ; %fp-to-i-if-then12
+; SDAG-NEXT: v_sub_u32_e32 v2, 0x433, v6
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v2, v[4:5]
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v2
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v2
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v1, 0, v1, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v6, v0, v4, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v5, v1, v5, s[6:7]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v6, v11, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v5, v11, v[1:2]
+; SDAG-NEXT: v_mov_b32_e32 v7, v4
+; SDAG-NEXT: v_mov_b32_e32 v4, v2
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v6, v8, v[3:4]
+; SDAG-NEXT: v_add_co_u32_e64 v2, s[4:5], v7, v2
+; SDAG-NEXT: v_addc_co_u32_e64 v3, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v5, v8, v[2:3]
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v9, v6, v[2:3]
+; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v10, v6, v[3:4]
+; SDAG-NEXT: v_mad_i32_i24 v3, v9, v5, v3
+; SDAG-NEXT: .LBB1_6: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
+; SDAG-NEXT: .LBB1_7: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.8: ; %fp-to-i-if-then5
+; SDAG-NEXT: v_bfrev_b32_e32 v0, 1
+; SDAG-NEXT: v_bfrev_b32_e32 v1, -2
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
+; SDAG-NEXT: v_mov_b32_e32 v0, v2
+; SDAG-NEXT: v_mov_b32_e32 v1, v2
+; SDAG-NEXT: ; %bb.9: ; %Flow3
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB1_10: ; %fp-to-i-cleanup
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: fptoui_f64_to_i128:
+; GISEL: ; %bb.0: ; %fp-to-i-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_mov_b32_e32 v5, v1
+; GISEL-NEXT: v_mov_b32_e32 v4, v0
+; GISEL-NEXT: v_lshrrev_b32_e32 v0, 20, v5
+; GISEL-NEXT: v_and_b32_e32 v6, 0x7ff, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, 0x3ff
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-NEXT: v_mov_b32_e32 v7, 0
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1]
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB1_10
+; GISEL-NEXT: ; %bb.1: ; %fp-to-i-if-end
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6
+; GISEL-NEXT: v_mov_b32_e32 v2, 0xffffff80
+; GISEL-NEXT: v_addc_co_u32_e64 v1, s[6:7], 0, -1, vcc
+; GISEL-NEXT: v_mov_b32_e32 v3, -1
+; GISEL-NEXT: v_addc_co_u32_e64 v8, s[6:7], 0, -1, s[6:7]
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[0:1], v[2:3]
+; GISEL-NEXT: v_addc_co_u32_e64 v9, s[6:7], 0, -1, s[6:7]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_cmp_le_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cmp_lt_i64_e64 s[4:5], -1, v[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_xor_b64 s[14:15], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB1_7
+; GISEL-NEXT: ; %bb.2: ; %fp-to-i-if-end9
+; GISEL-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[6:7]
+; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v2, 1, v0
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[6:7]
+; GISEL-NEXT: v_lshlrev_b16_e32 v3, 2, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v8, 3, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v9, 4, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v10, 5, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v11, 6, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v12, 7, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v13, 8, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v14, 9, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v15, 10, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v16, 11, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v17, 12, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v18, 13, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v19, 14, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v20, 15, v0
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v2
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v3
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v3
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v8
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v8
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v9
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v9
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v10
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v10
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v11
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v11
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v12
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v12
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v13
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v13
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v14
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v14
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v15
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v15
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v16
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v16
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v17
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v17
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v18
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v18
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v19
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v19
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v20
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v20
+; GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v0
+; GISEL-NEXT: v_lshl_or_b32 v10, v0, 16, v0
+; GISEL-NEXT: v_or3_b32 v8, v1, v2, 1
+; GISEL-NEXT: v_or3_b32 v9, v0, v2, 0
+; GISEL-NEXT: v_mov_b32_e32 v0, 0x433
+; GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-NEXT: v_and_b32_e32 v2, 0xfffff, v5
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1]
+; GISEL-NEXT: v_or_b32_e32 v5, 0x100000, v2
+; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_xor_b64 s[16:17], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB1_4
+; GISEL-NEXT: ; %bb.3: ; %fp-to-i-if-else
+; GISEL-NEXT: v_add_u32_e32 v6, 0xfffffbcd, v6
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v6, v[4:5]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v12, 0, v1, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v11, v10, 0
+; GISEL-NEXT: v_subrev_u32_e32 v7, 64, v6
+; GISEL-NEXT: v_sub_u32_e32 v2, 64, v6
+; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, v[4:5]
+; GISEL-NEXT: v_lshlrev_b64 v[4:5], v7, v[4:5]
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v6
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v13, v2, 0, s[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v8, v[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v11, v8, 0
+; GISEL-NEXT: v_mov_b32_e32 v2, v6
+; GISEL-NEXT: v_mul_lo_u32 v6, v11, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v11, v9, v[1:2]
+; GISEL-NEXT: v_mul_lo_u32 v4, v12, v10
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v12, v8, v[1:2]
+; GISEL-NEXT: v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11]
+; GISEL-NEXT: v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9]
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v9, v[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[6:7]
+; GISEL-NEXT: ; implicit-def: $vgpr10
+; GISEL-NEXT: ; implicit-def: $vgpr9
+; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v3, v8, v[6:7]
+; GISEL-NEXT: ; implicit-def: $vgpr6
+; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GISEL-NEXT: ; implicit-def: $vgpr8
+; GISEL-NEXT: .LBB1_4: ; %Flow
+; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[16:17]
+; GISEL-NEXT: s_cbranch_execz .LBB1_6
+; GISEL-NEXT: ; %bb.5: ; %fp-to-i-if-then12
+; GISEL-NEXT: v_sub_co_u32_e32 v6, vcc, 0x433, v6
+; GISEL-NEXT: v_subrev_u32_e32 v2, 64, v6
+; GISEL-NEXT: v_lshrrev_b64 v[0:1], v6, v[4:5]
+; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, 0
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v0, v4, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v4, v10, 0
+; GISEL-NEXT: v_cndmask_b32_e32 v5, v1, v5, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v4, v8, 0
+; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v5, v9, v[2:3]
+; GISEL-NEXT: v_mul_lo_u32 v6, v5, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], vcc, v4, v9, v[1:2]
+; GISEL-NEXT: v_mul_lo_u32 v4, v4, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[6:7], v5, v8, v[1:2]
+; GISEL-NEXT: v_addc_co_u32_e64 v3, s[6:7], v3, v4, s[6:7]
+; GISEL-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v6, vcc
+; GISEL-NEXT: .LBB1_6: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: .LBB1_7: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[14:15]
+; GISEL-NEXT: s_cbranch_execz .LBB1_9
+; GISEL-NEXT: ; %bb.8: ; %fp-to-i-if-then5
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v1, 1, v1
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GISEL-NEXT: v_lshlrev_b32_e32 v2, 1, v1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_lshlrev_b32_e32 v3, 2, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v4, 3, v1
+; GISEL-NEXT: v_or_b32_e32 v2, v1, v2
+; GISEL-NEXT: v_or3_b32 v0, v0, v3, v4
+; GISEL-NEXT: v_lshlrev_b32_e32 v5, 4, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v6, 5, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v3, v4
+; GISEL-NEXT: v_or3_b32 v0, v0, v5, v6
+; GISEL-NEXT: v_lshlrev_b32_e32 v7, 6, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v8, 7, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v5, v6
+; GISEL-NEXT: v_or3_b32 v0, v0, v7, v8
+; GISEL-NEXT: v_lshlrev_b32_e32 v9, 8, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v10, 9, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v7, v8
+; GISEL-NEXT: v_or3_b32 v0, v0, v9, v10
+; GISEL-NEXT: v_lshlrev_b32_e32 v11, 10, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v12, 11, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v9, v10
+; GISEL-NEXT: v_or3_b32 v0, v0, v11, v12
+; GISEL-NEXT: v_lshlrev_b32_e32 v13, 12, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v14, 13, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v11, v12
+; GISEL-NEXT: v_or3_b32 v0, v0, v13, v14
+; GISEL-NEXT: v_lshlrev_b32_e32 v15, 14, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v16, 15, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v13, v14
+; GISEL-NEXT: v_or3_b32 v0, v0, v15, v16
+; GISEL-NEXT: v_lshlrev_b32_e32 v17, 16, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v18, 17, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v15, v16
+; GISEL-NEXT: v_or3_b32 v0, v0, v17, v18
+; GISEL-NEXT: v_lshlrev_b32_e32 v19, 18, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v20, 19, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v17, v18
+; GISEL-NEXT: v_or3_b32 v0, v0, v19, v20
+; GISEL-NEXT: v_lshlrev_b32_e32 v3, 20, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v4, 21, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v19, v20
+; GISEL-NEXT: v_or3_b32 v0, v0, v3, v4
+; GISEL-NEXT: v_lshlrev_b32_e32 v5, 22, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v6, 23, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v3, v4
+; GISEL-NEXT: v_or3_b32 v0, v0, v5, v6
+; GISEL-NEXT: v_lshlrev_b32_e32 v7, 24, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v8, 25, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v5, v6
+; GISEL-NEXT: v_or3_b32 v0, v0, v7, v8
+; GISEL-NEXT: v_lshlrev_b32_e32 v9, 26, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v10, 27, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v7, v8
+; GISEL-NEXT: v_or3_b32 v0, v0, v9, v10
+; GISEL-NEXT: v_lshlrev_b32_e32 v11, 28, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v12, 29, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v9, v10
+; GISEL-NEXT: v_or3_b32 v0, v0, v11, v12
+; GISEL-NEXT: v_lshlrev_b32_e32 v13, 30, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v1, 31, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v11, v12
+; GISEL-NEXT: v_or3_b32 v0, v0, v13, v1
+; GISEL-NEXT: v_or3_b32 v1, v2, v13, v1
+; GISEL-NEXT: v_add_u32_e32 v3, 0x80000000, v1
+; GISEL-NEXT: v_mov_b32_e32 v2, v1
+; GISEL-NEXT: .LBB1_9: ; %Flow3
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: .LBB1_10: ; %fp-to-i-cleanup
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = fptoui double %x to i128
+ ret i128 %cvt
+}
+
+define i128 @fptosi_f32_to_i128(float %x) {
+; SDAG-LABEL: fptosi_f32_to_i128:
+; SDAG: ; %bb.0: ; %fp-to-i-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_mov_b32_e32 v4, v0
+; SDAG-NEXT: v_bfe_u32 v5, v4, 23, 8
+; SDAG-NEXT: s_movk_i32 s4, 0x7e
+; SDAG-NEXT: v_mov_b32_e32 v0, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-NEXT: v_mov_b32_e32 v6, 0
+; SDAG-NEXT: v_mov_b32_e32 v1, 0
+; SDAG-NEXT: v_mov_b32_e32 v3, 0
+; SDAG-NEXT: v_cmp_lt_u32_e32 vcc, s4, v5
+; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB2_10
+; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
+; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc
+; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc
+; SDAG-NEXT: s_movk_i32 s4, 0xff7f
+; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc
+; SDAG-NEXT: s_mov_b32 s5, -1
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[0:1]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[6:7], -1, v[2:3]
+; SDAG-NEXT: v_cmp_lt_i32_e32 vcc, -1, v4
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[6:7]
+; SDAG-NEXT: s_cbranch_execz .LBB2_7
+; SDAG-NEXT: ; %bb.2: ; %fp-to-i-if-end9
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_add_co_u32_e64 v9, s[4:5], -1, v0
+; SDAG-NEXT: v_addc_co_u32_e64 v11, s[4:5], 0, -1, s[4:5]
+; SDAG-NEXT: s_mov_b64 s[4:5], 0x95
+; SDAG-NEXT: v_and_b32_e32 v0, 0x7fffff, v4
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[5:6]
+; SDAG-NEXT: v_mov_b32_e32 v7, 0
+; SDAG-NEXT: v_cndmask_b32_e64 v8, -1, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v10, -1, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v6, 0x800000, v0
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_xor_b64 s[12:13], exec, s[6:7]
+; SDAG-NEXT: s_cbranch_execz .LBB2_4
+; SDAG-NEXT: ; %bb.3: ; %fp-to-i-if-else
+; SDAG-NEXT: v_sub_u32_e32 v0, 0xd6, v5
+; SDAG-NEXT: v_add_u32_e32 v2, 0xffffff2a, v5
+; SDAG-NEXT: v_add_u32_e32 v4, 0xffffff6a, v5
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v0, v[6:7]
+; SDAG-NEXT: v_lshlrev_b64 v[2:3], v2, v[6:7]
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v4
+; SDAG-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[4:5]
+; SDAG-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v4
+; SDAG-NEXT: v_cndmask_b32_e64 v3, 0, v1, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v2, v0, s[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v4, v[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, v2, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v13, 0, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v12, 0, v1, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v13, v10, 0
+; SDAG-NEXT: v_mul_lo_u32 v14, v8, v2
+; SDAG-NEXT: v_mul_lo_u32 v15, v10, v3
+; SDAG-NEXT: v_mov_b32_e32 v6, v1
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v12, v10, v[6:7]
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v10, v2, 0
+; SDAG-NEXT: v_mov_b32_e32 v6, v5
+; SDAG-NEXT: v_mov_b32_e32 v5, v7
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v13, v8, v[4:5]
+; SDAG-NEXT: v_add3_u32 v3, v3, v15, v14
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v9, v13, v[2:3]
+; SDAG-NEXT: v_add_co_u32_e64 v5, s[4:5], v6, v5
+; SDAG-NEXT: v_addc_co_u32_e64 v6, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mul_lo_u32 v3, v9, v12
+; SDAG-NEXT: v_mul_lo_u32 v7, v11, v13
+; SDAG-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v12, v8, v[5:6]
+; SDAG-NEXT: ; implicit-def: $vgpr10
+; SDAG-NEXT: ; implicit-def: $vgpr8
+; SDAG-NEXT: ; implicit-def: $vgpr9
+; SDAG-NEXT: v_add3_u32 v3, v7, v2, v3
+; SDAG-NEXT: v_add_co_u32_e64 v2, s[4:5], v5, v1
+; SDAG-NEXT: v_addc_co_u32_e64 v3, s[4:5], v6, v3, s[4:5]
+; SDAG-NEXT: ; implicit-def: $vgpr5_vgpr6
+; SDAG-NEXT: v_mov_b32_e32 v1, v4
+; SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
+; SDAG-NEXT: .LBB2_4: ; %Flow
+; SDAG-NEXT: s_andn2_saveexec_b64 s[6:7], s[12:13]
+; SDAG-NEXT: s_cbranch_execz .LBB2_6
+; SDAG-NEXT: ; %bb.5: ; %fp-to-i-if-then12
+; SDAG-NEXT: v_sub_u32_e32 v2, 0x96, v5
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v2, v[6:7]
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v2
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, v0, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v2
+; SDAG-NEXT: v_cndmask_b32_e64 v3, v0, v6, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v3, v10, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v3, v8, v[1:2]
+; SDAG-NEXT: v_mov_b32_e32 v1, v5
+; SDAG-NEXT: v_mad_i64_i32 v[2:3], s[4:5], v9, v3, v[1:2]
+; SDAG-NEXT: v_mov_b32_e32 v1, v4
+; SDAG-NEXT: .LBB2_6: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: .LBB2_7: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.8: ; %fp-to-i-if-then5
+; SDAG-NEXT: v_bfrev_b32_e32 v0, 1
+; SDAG-NEXT: v_bfrev_b32_e32 v1, -2
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
+; SDAG-NEXT: v_mov_b32_e32 v0, v2
+; SDAG-NEXT: v_mov_b32_e32 v1, v2
+; SDAG-NEXT: ; %bb.9: ; %Flow3
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB2_10: ; %fp-to-i-cleanup
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: fptosi_f32_to_i128:
+; GISEL: ; %bb.0: ; %fp-to-i-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_mov_b32_e32 v4, v0
+; GISEL-NEXT: v_mov_b32_e32 v5, 0
+; GISEL-NEXT: v_lshrrev_b64 v[0:1], 23, v[4:5]
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_bfe_u32 v6, v0, 0, 8
+; GISEL-NEXT: v_mov_b32_e32 v0, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1]
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB2_10
+; GISEL-NEXT: ; %bb.1: ; %fp-to-i-if-end
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v6
+; GISEL-NEXT: v_mov_b32_e32 v2, 0xffffff80
+; GISEL-NEXT: v_addc_co_u32_e64 v1, s[6:7], 0, -1, vcc
+; GISEL-NEXT: v_mov_b32_e32 v3, -1
+; GISEL-NEXT: v_addc_co_u32_e64 v8, s[6:7], 0, -1, s[6:7]
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[0:1], v[2:3]
+; GISEL-NEXT: v_addc_co_u32_e64 v9, s[6:7], 0, -1, s[6:7]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_cmp_le_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cmp_lt_i32_e64 s[4:5], -1, v4
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_xor_b64 s[14:15], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB2_7
+; GISEL-NEXT: ; %bb.2: ; %fp-to-i-if-end9
+; GISEL-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[6:7]
+; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v2, 1, v0
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[6:7]
+; GISEL-NEXT: v_lshlrev_b16_e32 v3, 2, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v5, 3, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v8, 4, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v9, 5, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v10, 6, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v11, 7, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v12, 8, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v13, 9, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v14, 10, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v15, 11, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v16, 12, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v17, 13, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v18, 14, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v19, 15, v0
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v2
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v3
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v3
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v5
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v5
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v8
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v8
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v9
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v9
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v10
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v10
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v11
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v11
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v12
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v12
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v13
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v13
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v14
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v14
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v15
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v15
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v16
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v16
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v17
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v17
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v18
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v18
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v19
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v19
+; GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v0
+; GISEL-NEXT: v_lshl_or_b32 v10, v0, 16, v0
+; GISEL-NEXT: v_or3_b32 v9, v1, v2, 1
+; GISEL-NEXT: v_or3_b32 v8, v0, v2, 0
+; GISEL-NEXT: v_mov_b32_e32 v0, 0x96
+; GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-NEXT: v_and_b32_e32 v2, 0x7fffff, v4
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1]
+; GISEL-NEXT: v_or_b32_e32 v4, 0x800000, v2
+; GISEL-NEXT: v_mov_b32_e32 v5, 0
+; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_xor_b64 s[16:17], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB2_4
+; GISEL-NEXT: ; %bb.3: ; %fp-to-i-if-else
+; GISEL-NEXT: v_add_u32_e32 v6, 0xffffff6a, v6
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v6, v[4:5]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v12, 0, v1, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v11, v10, 0
+; GISEL-NEXT: v_subrev_u32_e32 v7, 64, v6
+; GISEL-NEXT: v_sub_u32_e32 v2, 64, v6
+; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, v[4:5]
+; GISEL-NEXT: v_lshlrev_b64 v[4:5], v7, v[4:5]
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v6
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v8, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v13, v2, 0, s[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v9, v[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v11, v9, 0
+; GISEL-NEXT: v_mov_b32_e32 v2, v6
+; GISEL-NEXT: v_mul_lo_u32 v6, v11, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v11, v8, v[1:2]
+; GISEL-NEXT: v_mul_lo_u32 v4, v12, v10
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v12, v9, v[1:2]
+; GISEL-NEXT: v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11]
+; GISEL-NEXT: v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9]
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v8, v[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[6:7]
+; GISEL-NEXT: ; implicit-def: $vgpr10
+; GISEL-NEXT: ; implicit-def: $vgpr8
+; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v3, v9, v[6:7]
+; GISEL-NEXT: ; implicit-def: $vgpr6
+; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GISEL-NEXT: ; implicit-def: $vgpr9
+; GISEL-NEXT: .LBB2_4: ; %Flow
+; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[16:17]
+; GISEL-NEXT: s_cbranch_execz .LBB2_6
+; GISEL-NEXT: ; %bb.5: ; %fp-to-i-if-then12
+; GISEL-NEXT: v_sub_co_u32_e32 v3, vcc, 0x96, v6
+; GISEL-NEXT: v_subrev_u32_e32 v2, 64, v3
+; GISEL-NEXT: v_lshrrev_b64 v[0:1], v3, v[4:5]
+; GISEL-NEXT: v_lshrrev_b64 v[1:2], v2, 0
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v0, v4, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v4, v9, 0
+; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[8:9], v4, v10, 0
+; GISEL-NEXT: v_mul_lo_u32 v5, v4, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], vcc, v4, v8, v[1:2]
+; GISEL-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
+; GISEL-NEXT: .LBB2_6: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: .LBB2_7: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[14:15]
+; GISEL-NEXT: s_cbranch_execz .LBB2_9
+; GISEL-NEXT: ; %bb.8: ; %fp-to-i-if-then5
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v1, 1, v1
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GISEL-NEXT: v_lshlrev_b32_e32 v2, 1, v1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_lshlrev_b32_e32 v3, 2, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v4, 3, v1
+; GISEL-NEXT: v_or_b32_e32 v2, v1, v2
+; GISEL-NEXT: v_or3_b32 v0, v0, v3, v4
+; GISEL-NEXT: v_lshlrev_b32_e32 v5, 4, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v6, 5, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v3, v4
+; GISEL-NEXT: v_or3_b32 v0, v0, v5, v6
+; GISEL-NEXT: v_lshlrev_b32_e32 v7, 6, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v8, 7, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v5, v6
+; GISEL-NEXT: v_or3_b32 v0, v0, v7, v8
+; GISEL-NEXT: v_lshlrev_b32_e32 v9, 8, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v10, 9, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v7, v8
+; GISEL-NEXT: v_or3_b32 v0, v0, v9, v10
+; GISEL-NEXT: v_lshlrev_b32_e32 v11, 10, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v12, 11, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v9, v10
+; GISEL-NEXT: v_or3_b32 v0, v0, v11, v12
+; GISEL-NEXT: v_lshlrev_b32_e32 v13, 12, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v14, 13, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v11, v12
+; GISEL-NEXT: v_or3_b32 v0, v0, v13, v14
+; GISEL-NEXT: v_lshlrev_b32_e32 v15, 14, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v16, 15, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v13, v14
+; GISEL-NEXT: v_or3_b32 v0, v0, v15, v16
+; GISEL-NEXT: v_lshlrev_b32_e32 v17, 16, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v18, 17, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v15, v16
+; GISEL-NEXT: v_or3_b32 v0, v0, v17, v18
+; GISEL-NEXT: v_lshlrev_b32_e32 v19, 18, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v20, 19, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v17, v18
+; GISEL-NEXT: v_or3_b32 v0, v0, v19, v20
+; GISEL-NEXT: v_lshlrev_b32_e32 v3, 20, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v4, 21, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v19, v20
+; GISEL-NEXT: v_or3_b32 v0, v0, v3, v4
+; GISEL-NEXT: v_lshlrev_b32_e32 v5, 22, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v6, 23, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v3, v4
+; GISEL-NEXT: v_or3_b32 v0, v0, v5, v6
+; GISEL-NEXT: v_lshlrev_b32_e32 v7, 24, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v8, 25, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v5, v6
+; GISEL-NEXT: v_or3_b32 v0, v0, v7, v8
+; GISEL-NEXT: v_lshlrev_b32_e32 v9, 26, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v10, 27, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v7, v8
+; GISEL-NEXT: v_or3_b32 v0, v0, v9, v10
+; GISEL-NEXT: v_lshlrev_b32_e32 v11, 28, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v12, 29, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v9, v10
+; GISEL-NEXT: v_or3_b32 v0, v0, v11, v12
+; GISEL-NEXT: v_lshlrev_b32_e32 v13, 30, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v1, 31, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v11, v12
+; GISEL-NEXT: v_or3_b32 v0, v0, v13, v1
+; GISEL-NEXT: v_or3_b32 v1, v2, v13, v1
+; GISEL-NEXT: v_add_u32_e32 v3, 0x80000000, v1
+; GISEL-NEXT: v_mov_b32_e32 v2, v1
+; GISEL-NEXT: .LBB2_9: ; %Flow3
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: .LBB2_10: ; %fp-to-i-cleanup
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = fptosi float %x to i128
+ ret i128 %cvt
+}
+
+define i128 @fptoui_f32_to_i128(float %x) {
+; SDAG-LABEL: fptoui_f32_to_i128:
+; SDAG: ; %bb.0: ; %fp-to-i-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_mov_b32_e32 v4, v0
+; SDAG-NEXT: v_bfe_u32 v5, v4, 23, 8
+; SDAG-NEXT: s_movk_i32 s4, 0x7e
+; SDAG-NEXT: v_mov_b32_e32 v0, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-NEXT: v_mov_b32_e32 v6, 0
+; SDAG-NEXT: v_mov_b32_e32 v1, 0
+; SDAG-NEXT: v_mov_b32_e32 v3, 0
+; SDAG-NEXT: v_cmp_lt_u32_e32 vcc, s4, v5
+; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB3_10
+; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
+; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc
+; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc
+; SDAG-NEXT: s_movk_i32 s4, 0xff7f
+; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc
+; SDAG-NEXT: s_mov_b32 s5, -1
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[0:1]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[6:7], -1, v[2:3]
+; SDAG-NEXT: v_cmp_lt_i32_e32 vcc, -1, v4
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[6:7]
+; SDAG-NEXT: s_cbranch_execz .LBB3_7
+; SDAG-NEXT: ; %bb.2: ; %fp-to-i-if-end9
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_add_co_u32_e64 v9, s[4:5], -1, v0
+; SDAG-NEXT: v_addc_co_u32_e64 v11, s[4:5], 0, -1, s[4:5]
+; SDAG-NEXT: s_mov_b64 s[4:5], 0x95
+; SDAG-NEXT: v_and_b32_e32 v0, 0x7fffff, v4
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[5:6]
+; SDAG-NEXT: v_mov_b32_e32 v7, 0
+; SDAG-NEXT: v_cndmask_b32_e64 v8, -1, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v10, -1, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v6, 0x800000, v0
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_xor_b64 s[12:13], exec, s[6:7]
+; SDAG-NEXT: s_cbranch_execz .LBB3_4
+; SDAG-NEXT: ; %bb.3: ; %fp-to-i-if-else
+; SDAG-NEXT: v_sub_u32_e32 v0, 0xd6, v5
+; SDAG-NEXT: v_add_u32_e32 v2, 0xffffff2a, v5
+; SDAG-NEXT: v_add_u32_e32 v4, 0xffffff6a, v5
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v0, v[6:7]
+; SDAG-NEXT: v_lshlrev_b64 v[2:3], v2, v[6:7]
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v4
+; SDAG-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[4:5]
+; SDAG-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v4
+; SDAG-NEXT: v_cndmask_b32_e64 v3, 0, v1, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v2, v0, s[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v4, v[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, v2, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v13, 0, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v12, 0, v1, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v13, v10, 0
+; SDAG-NEXT: v_mul_lo_u32 v14, v8, v2
+; SDAG-NEXT: v_mul_lo_u32 v15, v10, v3
+; SDAG-NEXT: v_mov_b32_e32 v6, v1
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v12, v10, v[6:7]
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v10, v2, 0
+; SDAG-NEXT: v_mov_b32_e32 v6, v5
+; SDAG-NEXT: v_mov_b32_e32 v5, v7
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v13, v8, v[4:5]
+; SDAG-NEXT: v_add3_u32 v3, v3, v15, v14
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v9, v13, v[2:3]
+; SDAG-NEXT: v_add_co_u32_e64 v5, s[4:5], v6, v5
+; SDAG-NEXT: v_addc_co_u32_e64 v6, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mul_lo_u32 v3, v9, v12
+; SDAG-NEXT: v_mul_lo_u32 v7, v11, v13
+; SDAG-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v12, v8, v[5:6]
+; SDAG-NEXT: ; implicit-def: $vgpr10
+; SDAG-NEXT: ; implicit-def: $vgpr8
+; SDAG-NEXT: ; implicit-def: $vgpr9
+; SDAG-NEXT: v_add3_u32 v3, v7, v2, v3
+; SDAG-NEXT: v_add_co_u32_e64 v2, s[4:5], v5, v1
+; SDAG-NEXT: v_addc_co_u32_e64 v3, s[4:5], v6, v3, s[4:5]
+; SDAG-NEXT: ; implicit-def: $vgpr5_vgpr6
+; SDAG-NEXT: v_mov_b32_e32 v1, v4
+; SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
+; SDAG-NEXT: .LBB3_4: ; %Flow
+; SDAG-NEXT: s_andn2_saveexec_b64 s[6:7], s[12:13]
+; SDAG-NEXT: s_cbranch_execz .LBB3_6
+; SDAG-NEXT: ; %bb.5: ; %fp-to-i-if-then12
+; SDAG-NEXT: v_sub_u32_e32 v2, 0x96, v5
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v2, v[6:7]
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v2
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, v0, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v2
+; SDAG-NEXT: v_cndmask_b32_e64 v3, v0, v6, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v3, v10, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v3, v8, v[1:2]
+; SDAG-NEXT: v_mov_b32_e32 v1, v5
+; SDAG-NEXT: v_mad_i64_i32 v[2:3], s[4:5], v9, v3, v[1:2]
+; SDAG-NEXT: v_mov_b32_e32 v1, v4
+; SDAG-NEXT: .LBB3_6: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: .LBB3_7: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.8: ; %fp-to-i-if-then5
+; SDAG-NEXT: v_bfrev_b32_e32 v0, 1
+; SDAG-NEXT: v_bfrev_b32_e32 v1, -2
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
+; SDAG-NEXT: v_mov_b32_e32 v0, v2
+; SDAG-NEXT: v_mov_b32_e32 v1, v2
+; SDAG-NEXT: ; %bb.9: ; %Flow3
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB3_10: ; %fp-to-i-cleanup
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: fptoui_f32_to_i128:
+; GISEL: ; %bb.0: ; %fp-to-i-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_mov_b32_e32 v4, v0
+; GISEL-NEXT: v_mov_b32_e32 v5, 0
+; GISEL-NEXT: v_lshrrev_b64 v[0:1], 23, v[4:5]
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_bfe_u32 v6, v0, 0, 8
+; GISEL-NEXT: v_mov_b32_e32 v0, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1]
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB3_10
+; GISEL-NEXT: ; %bb.1: ; %fp-to-i-if-end
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v6
+; GISEL-NEXT: v_mov_b32_e32 v2, 0xffffff80
+; GISEL-NEXT: v_addc_co_u32_e64 v1, s[6:7], 0, -1, vcc
+; GISEL-NEXT: v_mov_b32_e32 v3, -1
+; GISEL-NEXT: v_addc_co_u32_e64 v8, s[6:7], 0, -1, s[6:7]
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[0:1], v[2:3]
+; GISEL-NEXT: v_addc_co_u32_e64 v9, s[6:7], 0, -1, s[6:7]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_cmp_le_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cmp_lt_i32_e64 s[4:5], -1, v4
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_xor_b64 s[14:15], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB3_7
+; GISEL-NEXT: ; %bb.2: ; %fp-to-i-if-end9
+; GISEL-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[6:7]
+; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v2, 1, v0
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[6:7]
+; GISEL-NEXT: v_lshlrev_b16_e32 v3, 2, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v5, 3, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v8, 4, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v9, 5, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v10, 6, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v11, 7, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v12, 8, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v13, 9, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v14, 10, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v15, 11, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v16, 12, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v17, 13, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v18, 14, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v19, 15, v0
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v2
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v3
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v3
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v5
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v5
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v8
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v8
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v9
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v9
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v10
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v10
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v11
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v11
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v12
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v12
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v13
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v13
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v14
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v14
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v15
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v15
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v16
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v16
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v17
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v17
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v18
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v18
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v19
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v19
+; GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v0
+; GISEL-NEXT: v_lshl_or_b32 v10, v0, 16, v0
+; GISEL-NEXT: v_or3_b32 v9, v1, v2, 1
+; GISEL-NEXT: v_or3_b32 v8, v0, v2, 0
+; GISEL-NEXT: v_mov_b32_e32 v0, 0x96
+; GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-NEXT: v_and_b32_e32 v2, 0x7fffff, v4
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1]
+; GISEL-NEXT: v_or_b32_e32 v4, 0x800000, v2
+; GISEL-NEXT: v_mov_b32_e32 v5, 0
+; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_xor_b64 s[16:17], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB3_4
+; GISEL-NEXT: ; %bb.3: ; %fp-to-i-if-else
+; GISEL-NEXT: v_add_u32_e32 v6, 0xffffff6a, v6
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v6, v[4:5]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v12, 0, v1, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v11, v10, 0
+; GISEL-NEXT: v_subrev_u32_e32 v7, 64, v6
+; GISEL-NEXT: v_sub_u32_e32 v2, 64, v6
+; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, v[4:5]
+; GISEL-NEXT: v_lshlrev_b64 v[4:5], v7, v[4:5]
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v6
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v8, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v13, v2, 0, s[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v9, v[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v11, v9, 0
+; GISEL-NEXT: v_mov_b32_e32 v2, v6
+; GISEL-NEXT: v_mul_lo_u32 v6, v11, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v11, v8, v[1:2]
+; GISEL-NEXT: v_mul_lo_u32 v4, v12, v10
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v12, v9, v[1:2]
+; GISEL-NEXT: v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11]
+; GISEL-NEXT: v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9]
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v8, v[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[6:7]
+; GISEL-NEXT: ; implicit-def: $vgpr10
+; GISEL-NEXT: ; implicit-def: $vgpr8
+; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v3, v9, v[6:7]
+; GISEL-NEXT: ; implicit-def: $vgpr6
+; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GISEL-NEXT: ; implicit-def: $vgpr9
+; GISEL-NEXT: .LBB3_4: ; %Flow
+; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[16:17]
+; GISEL-NEXT: s_cbranch_execz .LBB3_6
+; GISEL-NEXT: ; %bb.5: ; %fp-to-i-if-then12
+; GISEL-NEXT: v_sub_co_u32_e32 v3, vcc, 0x96, v6
+; GISEL-NEXT: v_subrev_u32_e32 v2, 64, v3
+; GISEL-NEXT: v_lshrrev_b64 v[0:1], v3, v[4:5]
+; GISEL-NEXT: v_lshrrev_b64 v[1:2], v2, 0
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v0, v4, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v4, v9, 0
+; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[8:9], v4, v10, 0
+; GISEL-NEXT: v_mul_lo_u32 v5, v4, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], vcc, v4, v8, v[1:2]
+; GISEL-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
+; GISEL-NEXT: .LBB3_6: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: .LBB3_7: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[14:15]
+; GISEL-NEXT: s_cbranch_execz .LBB3_9
+; GISEL-NEXT: ; %bb.8: ; %fp-to-i-if-then5
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v1, 1, v1
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GISEL-NEXT: v_lshlrev_b32_e32 v2, 1, v1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_lshlrev_b32_e32 v3, 2, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v4, 3, v1
+; GISEL-NEXT: v_or_b32_e32 v2, v1, v2
+; GISEL-NEXT: v_or3_b32 v0, v0, v3, v4
+; GISEL-NEXT: v_lshlrev_b32_e32 v5, 4, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v6, 5, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v3, v4
+; GISEL-NEXT: v_or3_b32 v0, v0, v5, v6
+; GISEL-NEXT: v_lshlrev_b32_e32 v7, 6, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v8, 7, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v5, v6
+; GISEL-NEXT: v_or3_b32 v0, v0, v7, v8
+; GISEL-NEXT: v_lshlrev_b32_e32 v9, 8, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v10, 9, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v7, v8
+; GISEL-NEXT: v_or3_b32 v0, v0, v9, v10
+; GISEL-NEXT: v_lshlrev_b32_e32 v11, 10, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v12, 11, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v9, v10
+; GISEL-NEXT: v_or3_b32 v0, v0, v11, v12
+; GISEL-NEXT: v_lshlrev_b32_e32 v13, 12, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v14, 13, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v11, v12
+; GISEL-NEXT: v_or3_b32 v0, v0, v13, v14
+; GISEL-NEXT: v_lshlrev_b32_e32 v15, 14, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v16, 15, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v13, v14
+; GISEL-NEXT: v_or3_b32 v0, v0, v15, v16
+; GISEL-NEXT: v_lshlrev_b32_e32 v17, 16, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v18, 17, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v15, v16
+; GISEL-NEXT: v_or3_b32 v0, v0, v17, v18
+; GISEL-NEXT: v_lshlrev_b32_e32 v19, 18, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v20, 19, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v17, v18
+; GISEL-NEXT: v_or3_b32 v0, v0, v19, v20
+; GISEL-NEXT: v_lshlrev_b32_e32 v3, 20, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v4, 21, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v19, v20
+; GISEL-NEXT: v_or3_b32 v0, v0, v3, v4
+; GISEL-NEXT: v_lshlrev_b32_e32 v5, 22, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v6, 23, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v3, v4
+; GISEL-NEXT: v_or3_b32 v0, v0, v5, v6
+; GISEL-NEXT: v_lshlrev_b32_e32 v7, 24, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v8, 25, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v5, v6
+; GISEL-NEXT: v_or3_b32 v0, v0, v7, v8
+; GISEL-NEXT: v_lshlrev_b32_e32 v9, 26, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v10, 27, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v7, v8
+; GISEL-NEXT: v_or3_b32 v0, v0, v9, v10
+; GISEL-NEXT: v_lshlrev_b32_e32 v11, 28, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v12, 29, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v9, v10
+; GISEL-NEXT: v_or3_b32 v0, v0, v11, v12
+; GISEL-NEXT: v_lshlrev_b32_e32 v13, 30, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v1, 31, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v11, v12
+; GISEL-NEXT: v_or3_b32 v0, v0, v13, v1
+; GISEL-NEXT: v_or3_b32 v1, v2, v13, v1
+; GISEL-NEXT: v_add_u32_e32 v3, 0x80000000, v1
+; GISEL-NEXT: v_mov_b32_e32 v2, v1
+; GISEL-NEXT: .LBB3_9: ; %Flow3
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: .LBB3_10: ; %fp-to-i-cleanup
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = fptoui float %x to i128
+ ret i128 %cvt
+}
+
+define i128 @fptosi_f16_to_i128(half %x) {
+; GCN-LABEL: fptosi_f16_to_i128:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GCN-NEXT: v_cvt_i32_f32_e32 v0, v0
+; GCN-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; GCN-NEXT: v_mov_b32_e32 v2, v1
+; GCN-NEXT: v_mov_b32_e32 v3, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cvt = fptosi half %x to i128
+ ret i128 %cvt
+}
+
+define i128 @fptoui_f16_to_i128(half %x) {
+; GCN-LABEL: fptoui_f16_to_i128:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GCN-NEXT: v_mov_b32_e32 v1, 0
+; GCN-NEXT: v_mov_b32_e32 v2, 0
+; GCN-NEXT: v_mov_b32_e32 v3, 0
+; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cvt = fptoui half %x to i128
+ ret i128 %cvt
+}
+
+; FIXME: ExpandLargeFpConvert asserts on bfloat
+; define i128 @fptosi_bf16_to_i128(bfloat %x) {
+; %cvt = fptosi bfloat %x to i128
+; ret i128 %cvt
+; }
+
+; define i128 @fptoui_bf16_to_i128(bfloat %x) {
+; %cvt = fptoui bfloat %x to i128
+; ret i128 %cvt
+; }
diff --git a/llvm/test/CodeGen/AMDGPU/fract-match.ll b/llvm/test/CodeGen/AMDGPU/fract-match.ll
index 3a0b825..e361aa4 100644
--- a/llvm/test/CodeGen/AMDGPU/fract-match.ll
+++ b/llvm/test/CodeGen/AMDGPU/fract-match.ll
@@ -1705,16 +1705,16 @@ define <2 x float> @safe_math_fract_v2f32(<2 x float> %x, ptr addrspace(1) nocap
; GFX6-NEXT: v_min_f32_e32 v7, 0x3f7fffff, v7
; GFX6-NEXT: v_cndmask_b32_e32 v6, v6, v1, vcc
; GFX6-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX6-NEXT: s_movk_i32 s10, 0x204
+; GFX6-NEXT: v_mov_b32_e32 v8, 0x204
; GFX6-NEXT: v_cndmask_b32_e32 v7, v7, v0, vcc
-; GFX6-NEXT: v_cmp_class_f32_e64 s[8:9], v0, s10
+; GFX6-NEXT: v_cmp_class_f32_e32 vcc, v0, v8
; GFX6-NEXT: s_mov_b32 s6, 0
-; GFX6-NEXT: v_cndmask_b32_e64 v0, v7, 0, s[8:9]
-; GFX6-NEXT: v_cmp_class_f32_e64 s[8:9], v1, s10
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v7, 0, vcc
+; GFX6-NEXT: v_cmp_class_f32_e32 vcc, v1, v8
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
-; GFX6-NEXT: v_cndmask_b32_e64 v1, v6, 0, s[8:9]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v6, 0, vcc
; GFX6-NEXT: buffer_store_dwordx2 v[4:5], v[2:3], s[4:7], 0 addr64
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; GFX6-NEXT: s_setpc_b64 s[30:31]
@@ -1722,19 +1722,19 @@ define <2 x float> @safe_math_fract_v2f32(<2 x float> %x, ptr addrspace(1) nocap
; GFX7-LABEL: safe_math_fract_v2f32:
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: s_mov_b32 s8, 0x7f800000
+; GFX7-NEXT: v_mov_b32_e32 v8, 0x204
; GFX7-NEXT: v_fract_f32_e32 v6, v0
-; GFX7-NEXT: v_cmp_neq_f32_e64 vcc, |v0|, s8
+; GFX7-NEXT: v_cmp_class_f32_e32 vcc, v0, v8
; GFX7-NEXT: s_mov_b32 s6, 0
; GFX7-NEXT: v_floor_f32_e32 v4, v0
; GFX7-NEXT: v_fract_f32_e32 v7, v1
-; GFX7-NEXT: v_cndmask_b32_e32 v0, 0, v6, vcc
-; GFX7-NEXT: v_cmp_neq_f32_e64 vcc, |v1|, s8
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX7-NEXT: v_cmp_class_f32_e32 vcc, v1, v8
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
; GFX7-NEXT: v_floor_f32_e32 v5, v1
-; GFX7-NEXT: v_cndmask_b32_e32 v1, 0, v7, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v1, v7, 0, vcc
; GFX7-NEXT: buffer_store_dwordx2 v[4:5], v[2:3], s[4:7], 0 addr64
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: s_setpc_b64 s[30:31]
@@ -1742,15 +1742,15 @@ define <2 x float> @safe_math_fract_v2f32(<2 x float> %x, ptr addrspace(1) nocap
; GFX8-LABEL: safe_math_fract_v2f32:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: s_mov_b32 s4, 0x7f800000
+; GFX8-NEXT: v_mov_b32_e32 v8, 0x204
; GFX8-NEXT: v_fract_f32_e32 v6, v0
-; GFX8-NEXT: v_cmp_neq_f32_e64 vcc, |v0|, s4
+; GFX8-NEXT: v_cmp_class_f32_e32 vcc, v0, v8
; GFX8-NEXT: v_floor_f32_e32 v4, v0
; GFX8-NEXT: v_fract_f32_e32 v7, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v0, 0, v6, vcc
-; GFX8-NEXT: v_cmp_neq_f32_e64 vcc, |v1|, s4
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX8-NEXT: v_cmp_class_f32_e32 vcc, v1, v8
; GFX8-NEXT: v_floor_f32_e32 v5, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v1, 0, v7, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v7, 0, vcc
; GFX8-NEXT: global_store_dwordx2 v[2:3], v[4:5], off
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: s_setpc_b64 s[30:31]
@@ -1759,14 +1759,15 @@ define <2 x float> @safe_math_fract_v2f32(<2 x float> %x, ptr addrspace(1) nocap
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_fract_f32_e32 v6, v0
-; GFX11-NEXT: v_cmp_neq_f32_e64 vcc_lo, 0x7f800000, |v0|
+; GFX11-NEXT: v_cmp_class_f32_e64 s0, v0, 0x204
; GFX11-NEXT: v_fract_f32_e32 v7, v1
; GFX11-NEXT: v_floor_f32_e32 v4, v0
; GFX11-NEXT: v_floor_f32_e32 v5, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v0, 0, v6, vcc_lo
-; GFX11-NEXT: v_cmp_neq_f32_e64 vcc_lo, 0x7f800000, |v1|
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v6, 0, s0
+; GFX11-NEXT: v_cmp_class_f32_e64 s0, v1, 0x204
; GFX11-NEXT: global_store_b64 v[2:3], v[4:5], off
-; GFX11-NEXT: v_cndmask_b32_e32 v1, 0, v7, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v7, 0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%floor = tail call <2 x float> @llvm.floor.v2f32(<2 x float> %x)
@@ -1937,21 +1938,22 @@ define half @safe_math_fract_f16(half %x, ptr addrspace(1) nocapture writeonly %
; GFX6: ; %bb.0: ; %entry
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX6-NEXT: s_mov_b32 s8, 0x7f800000
+; GFX6-NEXT: s_movk_i32 s8, 0x7c00
; GFX6-NEXT: s_mov_b32 s6, 0
; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v0
+; GFX6-NEXT: v_and_b32_e32 v0, 0x7fff, v0
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
-; GFX6-NEXT: v_floor_f32_e32 v3, v0
-; GFX6-NEXT: v_sub_f32_e32 v4, v0, v3
-; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
-; GFX6-NEXT: v_min_f32_e32 v4, 0x3f7fe000, v4
-; GFX6-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX6-NEXT: v_cndmask_b32_e32 v4, v4, v0, vcc
-; GFX6-NEXT: v_cmp_neq_f32_e64 vcc, |v0|, s8
-; GFX6-NEXT: v_cndmask_b32_e32 v0, 0, v4, vcc
-; GFX6-NEXT: buffer_store_short v3, v[1:2], s[4:7], 0 addr64
+; GFX6-NEXT: v_floor_f32_e32 v4, v3
+; GFX6-NEXT: v_sub_f32_e32 v5, v3, v4
+; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4
+; GFX6-NEXT: v_min_f32_e32 v5, 0x3f7fe000, v5
+; GFX6-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, s8, v0
+; GFX6-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
+; GFX6-NEXT: buffer_store_short v4, v[1:2], s[4:7], 0 addr64
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
@@ -1959,21 +1961,22 @@ define half @safe_math_fract_f16(half %x, ptr addrspace(1) nocapture writeonly %
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX7-NEXT: s_mov_b32 s8, 0x7f800000
+; GFX7-NEXT: s_movk_i32 s8, 0x7c00
; GFX7-NEXT: s_mov_b32 s6, 0
; GFX7-NEXT: s_mov_b32 s7, 0xf000
-; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v0
+; GFX7-NEXT: v_and_b32_e32 v0, 0x7fff, v0
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
-; GFX7-NEXT: v_floor_f32_e32 v3, v0
-; GFX7-NEXT: v_sub_f32_e32 v4, v0, v3
-; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
-; GFX7-NEXT: v_min_f32_e32 v4, 0x3f7fe000, v4
-; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v0, vcc
-; GFX7-NEXT: v_cmp_neq_f32_e64 vcc, |v0|, s8
-; GFX7-NEXT: v_cndmask_b32_e32 v0, 0, v4, vcc
-; GFX7-NEXT: buffer_store_short v3, v[1:2], s[4:7], 0 addr64
+; GFX7-NEXT: v_floor_f32_e32 v4, v3
+; GFX7-NEXT: v_sub_f32_e32 v5, v3, v4
+; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
+; GFX7-NEXT: v_min_f32_e32 v5, 0x3f7fe000, v5
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX7-NEXT: v_cmp_ne_u32_e32 vcc, s8, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
+; GFX7-NEXT: buffer_store_short v4, v[1:2], s[4:7], 0 addr64
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
@@ -2062,12 +2065,12 @@ define <2 x half> @safe_math_fract_v2f16(<2 x half> %x, ptr addrspace(1) nocaptu
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX6-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX6-NEXT: s_mov_b32 s8, 0x7f800000
+; GFX6-NEXT: s_movk_i32 s8, 0x7c00
; GFX6-NEXT: s_mov_b32 s6, 0
; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v1
; GFX6-NEXT: v_cvt_f32_f16_e32 v5, v0
-; GFX6-NEXT: v_cvt_f32_f16_e64 v0, |v0|
-; GFX6-NEXT: v_cvt_f32_f16_e64 v1, |v1|
+; GFX6-NEXT: v_and_b32_e32 v0, 0x7fff, v0
+; GFX6-NEXT: v_and_b32_e32 v1, 0x7fff, v1
; GFX6-NEXT: v_floor_f32_e32 v6, v4
; GFX6-NEXT: v_cvt_f16_f32_e32 v7, v6
; GFX6-NEXT: v_floor_f32_e32 v8, v5
@@ -2080,10 +2083,10 @@ define <2 x half> @safe_math_fract_v2f16(<2 x half> %x, ptr addrspace(1) nocaptu
; GFX6-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc
; GFX6-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX6-NEXT: v_cndmask_b32_e32 v5, v8, v5, vcc
-; GFX6-NEXT: v_cmp_neq_f32_e32 vcc, s8, v0
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, s8, v0
; GFX6-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; GFX6-NEXT: v_cndmask_b32_e32 v0, 0, v5, vcc
-; GFX6-NEXT: v_cmp_neq_f32_e32 vcc, s8, v1
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, s8, v1
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
@@ -2098,12 +2101,12 @@ define <2 x half> @safe_math_fract_v2f16(<2 x half> %x, ptr addrspace(1) nocaptu
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX7-NEXT: s_mov_b32 s8, 0x7f800000
+; GFX7-NEXT: s_movk_i32 s8, 0x7c00
; GFX7-NEXT: s_mov_b32 s6, 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v0
-; GFX7-NEXT: v_cvt_f32_f16_e64 v0, |v0|
-; GFX7-NEXT: v_cvt_f32_f16_e64 v1, |v1|
+; GFX7-NEXT: v_and_b32_e32 v0, 0x7fff, v0
+; GFX7-NEXT: v_and_b32_e32 v1, 0x7fff, v1
; GFX7-NEXT: v_floor_f32_e32 v6, v4
; GFX7-NEXT: v_cvt_f16_f32_e32 v7, v6
; GFX7-NEXT: v_floor_f32_e32 v8, v5
@@ -2116,10 +2119,10 @@ define <2 x half> @safe_math_fract_v2f16(<2 x half> %x, ptr addrspace(1) nocaptu
; GFX7-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc
; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX7-NEXT: v_cndmask_b32_e32 v5, v8, v5, vcc
-; GFX7-NEXT: v_cmp_neq_f32_e32 vcc, s8, v0
+; GFX7-NEXT: v_cmp_ne_u32_e32 vcc, s8, v0
; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; GFX7-NEXT: v_cndmask_b32_e32 v0, 0, v5, vcc
-; GFX7-NEXT: v_cmp_neq_f32_e32 vcc, s8, v1
+; GFX7-NEXT: v_cmp_ne_u32_e32 vcc, s8, v1
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
@@ -2133,16 +2136,16 @@ define <2 x half> @safe_math_fract_v2f16(<2 x half> %x, ptr addrspace(1) nocaptu
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; GFX8-NEXT: s_movk_i32 s6, 0x204
+; GFX8-NEXT: v_mov_b32_e32 v7, 0x204
; GFX8-NEXT: v_floor_f16_e32 v4, v3
; GFX8-NEXT: v_floor_f16_e32 v5, v0
; GFX8-NEXT: v_fract_f16_e32 v6, v3
-; GFX8-NEXT: v_cmp_class_f16_e64 s[4:5], v3, s6
+; GFX8-NEXT: v_cmp_class_f16_e32 vcc, v3, v7
; GFX8-NEXT: v_pack_b32_f16 v4, v5, v4
; GFX8-NEXT: v_fract_f16_e32 v5, v0
-; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, 0, s[4:5]
-; GFX8-NEXT: v_cmp_class_f16_e64 s[4:5], v0, s6
-; GFX8-NEXT: v_cndmask_b32_e64 v0, v5, 0, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, 0, vcc
+; GFX8-NEXT: v_cmp_class_f16_e32 vcc, v0, v7
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v5, 0, vcc
; GFX8-NEXT: v_pack_b32_f16 v0, v0, v3
; GFX8-NEXT: global_store_dword v[1:2], v4, off
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -2237,19 +2240,19 @@ define <2 x double> @safe_math_fract_v2f64(<2 x double> %x, ptr addrspace(1) noc
; GFX6-NEXT: v_cndmask_b32_e32 v11, v11, v3, vcc
; GFX6-NEXT: v_cndmask_b32_e32 v10, v10, v2, vcc
; GFX6-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
-; GFX6-NEXT: s_movk_i32 s10, 0x204
-; GFX6-NEXT: v_cmp_class_f64_e64 s[8:9], v[0:1], s10
+; GFX6-NEXT: v_mov_b32_e32 v14, 0x204
; GFX6-NEXT: v_cndmask_b32_e32 v13, v13, v1, vcc
; GFX6-NEXT: v_cndmask_b32_e32 v12, v12, v0, vcc
-; GFX6-NEXT: v_cndmask_b32_e64 v0, v12, 0, s[8:9]
-; GFX6-NEXT: v_cndmask_b32_e64 v1, v13, 0, s[8:9]
-; GFX6-NEXT: v_cmp_class_f64_e64 s[8:9], v[2:3], s10
+; GFX6-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v14
; GFX6-NEXT: s_mov_b32 s6, 0
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v12, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v13, 0, vcc
+; GFX6-NEXT: v_cmp_class_f64_e32 vcc, v[2:3], v14
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
-; GFX6-NEXT: v_cndmask_b32_e64 v2, v10, 0, s[8:9]
-; GFX6-NEXT: v_cndmask_b32_e64 v3, v11, 0, s[8:9]
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v10, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v11, 0, vcc
; GFX6-NEXT: buffer_store_dwordx4 v[6:9], v[4:5], s[4:7], 0 addr64
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; GFX6-NEXT: s_setpc_b64 s[30:31]
@@ -2257,39 +2260,39 @@ define <2 x double> @safe_math_fract_v2f64(<2 x double> %x, ptr addrspace(1) noc
; GFX7-LABEL: safe_math_fract_v2f64:
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: s_movk_i32 s4, 0x204
+; GFX7-NEXT: v_mov_b32_e32 v6, 0x204
; GFX7-NEXT: v_fract_f64_e32 v[10:11], v[0:1]
-; GFX7-NEXT: v_cmp_class_f64_e64 s[8:9], v[0:1], s4
+; GFX7-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v6
; GFX7-NEXT: v_fract_f64_e32 v[12:13], v[2:3]
-; GFX7-NEXT: v_cmp_class_f64_e64 s[10:11], v[2:3], s4
+; GFX7-NEXT: v_cmp_class_f64_e64 s[4:5], v[2:3], v6
; GFX7-NEXT: v_floor_f64_e32 v[8:9], v[2:3]
; GFX7-NEXT: v_floor_f64_e32 v[6:7], v[0:1]
-; GFX7-NEXT: s_mov_b32 s6, 0
-; GFX7-NEXT: s_mov_b32 s7, 0xf000
-; GFX7-NEXT: s_mov_b32 s4, s6
-; GFX7-NEXT: s_mov_b32 s5, s6
-; GFX7-NEXT: v_cndmask_b32_e64 v0, v10, 0, s[8:9]
-; GFX7-NEXT: v_cndmask_b32_e64 v1, v11, 0, s[8:9]
-; GFX7-NEXT: v_cndmask_b32_e64 v2, v12, 0, s[10:11]
-; GFX7-NEXT: v_cndmask_b32_e64 v3, v13, 0, s[10:11]
-; GFX7-NEXT: buffer_store_dwordx4 v[6:9], v[4:5], s[4:7], 0 addr64
+; GFX7-NEXT: s_mov_b32 s10, 0
+; GFX7-NEXT: s_mov_b32 s11, 0xf000
+; GFX7-NEXT: s_mov_b32 s8, s10
+; GFX7-NEXT: s_mov_b32 s9, s10
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v10, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v1, v11, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v2, v12, 0, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v3, v13, 0, s[4:5]
+; GFX7-NEXT: buffer_store_dwordx4 v[6:9], v[4:5], s[8:11], 0 addr64
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: safe_math_fract_v2f64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: s_movk_i32 s6, 0x204
+; GFX8-NEXT: v_mov_b32_e32 v6, 0x204
; GFX8-NEXT: v_fract_f64_e32 v[10:11], v[0:1]
-; GFX8-NEXT: v_cmp_class_f64_e64 s[4:5], v[0:1], s6
+; GFX8-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v6
; GFX8-NEXT: v_fract_f64_e32 v[12:13], v[2:3]
-; GFX8-NEXT: v_cmp_class_f64_e64 s[6:7], v[2:3], s6
+; GFX8-NEXT: v_cmp_class_f64_e64 s[4:5], v[2:3], v6
; GFX8-NEXT: v_floor_f64_e32 v[8:9], v[2:3]
; GFX8-NEXT: v_floor_f64_e32 v[6:7], v[0:1]
-; GFX8-NEXT: v_cndmask_b32_e64 v0, v10, 0, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e64 v1, v11, 0, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e64 v2, v12, 0, s[6:7]
-; GFX8-NEXT: v_cndmask_b32_e64 v3, v13, 0, s[6:7]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v10, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v11, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v12, 0, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v13, 0, s[4:5]
; GFX8-NEXT: global_store_dwordx4 v[4:5], v[6:9], off
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll b/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
index e3fada3..b717280 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
@@ -1,71 +1,43 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -amdgpu-atomic-optimizer-strategy=Iterative -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck -check-prefix=IR-ITERATIVE %s
-; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -amdgpu-atomic-optimizer-strategy=DPP -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck -check-prefix=IR-DPP %s
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -passes='amdgpu-atomic-optimizer<strategy=iterative>,verify<domtree>' %s | FileCheck --check-prefixes=IR,IR-ITERATIVE %s
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -passes='amdgpu-atomic-optimizer<strategy=dpp>,verify<domtree>' %s | FileCheck --check-prefixes=IR,IR-DPP %s
+
+; Tests various combinations of uniform/divergent address and uniform/divergent value inputs of various types for atomic operations.
+; Optimization remains same for Iterative and DPP strategies when value in uniform. These different scan/reduction
+; strategies are valid for only divergent values. This optimization is valid for divergent addresses. Test also covers different scopes.
define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, float inreg %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
-; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]]
-; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
-; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
-; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to float
-; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = fmul float [[VAL:%.*]], [[TMP11]]
-; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-ITERATIVE: 14:
-; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: br label [[TMP16]]
-; IR-ITERATIVE: 16:
-; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
-; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP18]])
-; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
-; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = uitofp i32 [[TMP8]] to float
-; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = fmul float [[VAL]], [[TMP21]]
-; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = fadd float [[TMP20]], [[TMP22]]
-; IR-ITERATIVE-NEXT: br label [[TMP24]]
-; IR-ITERATIVE: 24:
-; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ]
-; IR-ITERATIVE-NEXT: ret float [[TMP25]]
-;
-; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
-; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]]
-; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
-; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
-; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-DPP-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to float
-; IR-DPP-NEXT: [[TMP12:%.*]] = fmul float [[VAL:%.*]], [[TMP11]]
-; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-DPP: 14:
-; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: br label [[TMP16]]
-; IR-DPP: 16:
-; IR-DPP-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
-; IR-DPP-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP18]])
-; IR-DPP-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
-; IR-DPP-NEXT: [[TMP21:%.*]] = uitofp i32 [[TMP8]] to float
-; IR-DPP-NEXT: [[TMP22:%.*]] = fmul float [[VAL]], [[TMP21]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = fadd float [[TMP20]], [[TMP22]]
-; IR-DPP-NEXT: br label [[TMP24]]
-; IR-DPP: 24:
-; IR-DPP-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ]
-; IR-DPP-NEXT: ret float [[TMP25]]
+; IR-LABEL: @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
+; IR-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to float
+; IR-NEXT: [[TMP12:%.*]] = fmul float [[VAL:%.*]], [[TMP11]]
+; IR-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR: 14:
+; IR-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("agent") monotonic, align 4
+; IR-NEXT: br label [[TMP16]]
+; IR: 16:
+; IR-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
+; IR-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP18]])
+; IR-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
+; IR-NEXT: [[TMP21:%.*]] = uitofp i32 [[TMP8]] to float
+; IR-NEXT: [[TMP22:%.*]] = fmul float [[VAL]], [[TMP21]]
+; IR-NEXT: [[TMP23:%.*]] = fadd float [[TMP20]], [[TMP22]]
+; IR-NEXT: br label [[TMP24]]
+; IR: 24:
+; IR-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ]
+; IR-NEXT: ret float [[TMP25]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic, align 4
ret float %result
@@ -411,7 +383,6 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_uni_value_agent_scope_str
ret float %result
}
-
define amdgpu_ps float @global_atomic_fsub_uni_address_div_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, float %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fsub_uni_address_div_value_agent_scope_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
@@ -514,61 +485,33 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_div_value_agent_scope_str
}
define amdgpu_ps float @global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, float inreg %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
-; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP20:%.*]]
-; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
-; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-ITERATIVE-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
-; IR-ITERATIVE: 10:
-; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: br label [[TMP12]]
-; IR-ITERATIVE: 12:
-; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = bitcast float [[TMP13]] to i32
-; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP14]])
-; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float
-; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = uitofp i32 [[TMP8]] to float
-; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = select i1 [[TMP9]], float 0x7FF0000000000000, float [[VAL]]
-; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call float @llvm.minnum.f32(float [[TMP16]], float [[TMP18]])
-; IR-ITERATIVE-NEXT: br label [[TMP20]]
-; IR-ITERATIVE: 20:
-; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP19]], [[TMP12]] ]
-; IR-ITERATIVE-NEXT: ret float [[TMP21]]
-;
-; IR-DPP-LABEL: @global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
-; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP20:%.*]]
-; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
-; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
-; IR-DPP-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-DPP-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
-; IR-DPP: 10:
-; IR-DPP-NEXT: [[TMP11:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: br label [[TMP12]]
-; IR-DPP: 12:
-; IR-DPP-NEXT: [[TMP13:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
-; IR-DPP-NEXT: [[TMP14:%.*]] = bitcast float [[TMP13]] to i32
-; IR-DPP-NEXT: [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP14]])
-; IR-DPP-NEXT: [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float
-; IR-DPP-NEXT: [[TMP17:%.*]] = uitofp i32 [[TMP8]] to float
-; IR-DPP-NEXT: [[TMP18:%.*]] = select i1 [[TMP9]], float 0x7FF0000000000000, float [[VAL]]
-; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.minnum.f32(float [[TMP16]], float [[TMP18]])
-; IR-DPP-NEXT: br label [[TMP20]]
-; IR-DPP: 20:
-; IR-DPP-NEXT: [[TMP21:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP19]], [[TMP12]] ]
-; IR-DPP-NEXT: ret float [[TMP21]]
+; IR-LABEL: @global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP20:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR: 10:
+; IR-NEXT: [[TMP11:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: br label [[TMP12]]
+; IR: 12:
+; IR-NEXT: [[TMP13:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
+; IR-NEXT: [[TMP14:%.*]] = bitcast float [[TMP13]] to i32
+; IR-NEXT: [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP14]])
+; IR-NEXT: [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float
+; IR-NEXT: [[TMP17:%.*]] = uitofp i32 [[TMP8]] to float
+; IR-NEXT: [[TMP18:%.*]] = select i1 [[TMP9]], float 0x7FF0000000000000, float [[VAL]]
+; IR-NEXT: [[TMP19:%.*]] = call float @llvm.minnum.f32(float [[TMP16]], float [[TMP18]])
+; IR-NEXT: br label [[TMP20]]
+; IR: 20:
+; IR-NEXT: [[TMP21:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP19]], [[TMP12]] ]
+; IR-NEXT: ret float [[TMP21]]
;
%result = atomicrmw fmin ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret float %result
@@ -1007,164 +950,674 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_system_scope_st
ret float %result
}
-
define amdgpu_ps float @global_atomic_fadd_div_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr, float inreg %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_uni_value_agent_scope_unsafe(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_uni_value_agent_scope_unsafe(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fadd_div_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic, align 4
ret float %result
}
define amdgpu_ps float @global_atomic_fadd_div_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr, float %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_div_value_agent_scope_unsafe(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_div_value_agent_scope_unsafe(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fadd_div_address_div_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic, align 4
ret float %result
}
define amdgpu_ps float @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, float inreg %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic
ret float %result
}
define amdgpu_ps float @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, float %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic
ret float %result
}
define amdgpu_ps float @global_atomic_fsub_div_address_uni_value_agent_scope_strictfp(ptr addrspace(1) %ptr, float inreg %val) #2 {
-; IR-ITERATIVE-LABEL: @global_atomic_fsub_div_address_uni_value_agent_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fsub_div_address_uni_value_agent_scope_strictfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fsub_div_address_uni_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret float %result
}
-
define amdgpu_ps float @global_atomic_fsub_div_address_div_value_agent_scope_strictfp(ptr addrspace(1) %ptr, float %val) #2 {
-; IR-ITERATIVE-LABEL: @global_atomic_fsub_div_address_div_value_agent_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fsub_div_address_div_value_agent_scope_strictfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fsub_div_address_div_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fsub ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret float %result
}
define amdgpu_ps float @global_atomic_fmin_div_address_uni_value_agent_scope(ptr addrspace(1) %ptr, float inreg %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fmin_div_address_uni_value_agent_scope(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fmin_div_address_uni_value_agent_scope(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fmin_div_address_uni_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fmin ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret float %result
}
define amdgpu_ps float @global_atomic_fmin_div_address_div_value_agent_scope(ptr addrspace(1) %ptr, float %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fmin_div_address_div_value_agent_scope(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fmin_div_address_div_value_agent_scope(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fmin_div_address_div_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fmin ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret float %result
}
define amdgpu_ps float @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, float inreg %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fmax ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret float %result
}
define amdgpu_ps float @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, float %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fmax ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret float %result
}
define amdgpu_ps float @global_atomic_fadd_div_address_uni_value_system_scope_strictfp(ptr addrspace(1) %ptr, float inreg %val) #2 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_uni_value_system_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_uni_value_system_scope_strictfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fadd_div_address_uni_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val monotonic, align 4
ret float %result
}
define amdgpu_ps float @global_atomic_fadd_div_address_div_value_system_scope_strictfp(ptr addrspace(1) %ptr, float %val) #2 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_div_value_system_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_div_value_system_scope_strictfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fadd_div_address_div_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val monotonic, align 4
ret float %result
}
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
+; IR-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to double
+; IR-NEXT: [[TMP12:%.*]] = fmul double [[VAL:%.*]], [[TMP11]]
+; IR-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR: 14:
+; IR-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 4
+; IR-NEXT: br label [[TMP16]]
+; IR: 16:
+; IR-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]])
+; IR-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]])
+; IR-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-NEXT: [[TMP27:%.*]] = uitofp i32 [[TMP8]] to double
+; IR-NEXT: [[TMP28:%.*]] = fmul double [[VAL]], [[TMP27]]
+; IR-NEXT: [[TMP29:%.*]] = fadd double [[TMP26]], [[TMP28]]
+; IR-NEXT: br label [[TMP30]]
+; IR: 30:
+; IR-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-NEXT: ret double [[TMP31]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_scope_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_scope_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-ITERATIVE: 14:
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("one-as") monotonic, align 8
+; IR-ITERATIVE-NEXT: br label [[TMP16]]
+; IR-ITERATIVE: 16:
+; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br label [[TMP30]]
+; IR-ITERATIVE: 30:
+; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-ITERATIVE-NEXT: ret double [[TMP31]]
+;
+; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-DPP: 14:
+; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("one-as") monotonic, align 8
+; IR-DPP-NEXT: br label [[TMP16]]
+; IR-DPP: 16:
+; IR-DPP-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-DPP-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-DPP-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-DPP-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-DPP-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-DPP-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-DPP-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: br label [[TMP30]]
+; IR-DPP: 30:
+; IR-DPP-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-DPP-NEXT: ret double [[TMP31]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 {
+; IR-ITERATIVE-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-ITERATIVE: 14:
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 8
+; IR-ITERATIVE-NEXT: br label [[TMP16]]
+; IR-ITERATIVE: 16:
+; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br label [[TMP30]]
+; IR-ITERATIVE: 30:
+; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-ITERATIVE-NEXT: ret double [[TMP31]]
+;
+; IR-DPP-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-DPP: 14:
+; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 8
+; IR-DPP-NEXT: br label [[TMP16]]
+; IR-DPP: 16:
+; IR-DPP-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-DPP-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-DPP-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-DPP-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-DPP-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-DPP-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-DPP-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: br label [[TMP30]]
+; IR-DPP: 30:
+; IR-DPP-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-DPP-NEXT: ret double [[TMP31]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fsub_double_uni_address_div_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fsub_double_uni_address_div_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP26:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR: 10:
+; IR-NEXT: [[TMP11:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: br label [[TMP12]]
+; IR: 12:
+; IR-NEXT: [[TMP13:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
+; IR-NEXT: [[TMP14:%.*]] = bitcast double [[TMP13]] to i64
+; IR-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
+; IR-NEXT: [[TMP16:%.*]] = lshr i64 [[TMP14]], 32
+; IR-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP16]] to i32
+; IR-NEXT: [[TMP18:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP15]])
+; IR-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP17]])
+; IR-NEXT: [[TMP20:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0
+; IR-NEXT: [[TMP21:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP19]], i32 1
+; IR-NEXT: [[TMP22:%.*]] = bitcast <2 x i32> [[TMP21]] to double
+; IR-NEXT: [[TMP23:%.*]] = uitofp i32 [[TMP8]] to double
+; IR-NEXT: [[TMP24:%.*]] = select i1 [[TMP9]], double 0x7FF0000000000000, double [[VAL]]
+; IR-NEXT: [[TMP25:%.*]] = call double @llvm.minnum.f64(double [[TMP22]], double [[TMP24]])
+; IR-NEXT: br label [[TMP26]]
+; IR: 26:
+; IR-NEXT: [[TMP27:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP25]], [[TMP12]] ]
+; IR-NEXT: ret double [[TMP27]]
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1{
+; IR-ITERATIVE-LABEL: @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP26:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR-ITERATIVE: 10:
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-ITERATIVE-NEXT: br label [[TMP12]]
+; IR-ITERATIVE: 12:
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = bitcast double [[TMP13]] to i64
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
+; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = lshr i64 [[TMP14]], 32
+; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP16]] to i32
+; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP15]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP17]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP19]], i32 1
+; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = bitcast <2 x i32> [[TMP21]] to double
+; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = select i1 [[TMP9]], double 0xFFF0000000000000, double [[VAL]]
+; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP22]], double [[TMP24]], metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br label [[TMP26]]
+; IR-ITERATIVE: 26:
+; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP25]], [[TMP12]] ]
+; IR-ITERATIVE-NEXT: ret double [[TMP27]]
+;
+; IR-DPP-LABEL: @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP26:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR-DPP: 10:
+; IR-DPP-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-DPP-NEXT: br label [[TMP12]]
+; IR-DPP: 12:
+; IR-DPP-NEXT: [[TMP13:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
+; IR-DPP-NEXT: [[TMP14:%.*]] = bitcast double [[TMP13]] to i64
+; IR-DPP-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
+; IR-DPP-NEXT: [[TMP16:%.*]] = lshr i64 [[TMP14]], 32
+; IR-DPP-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP16]] to i32
+; IR-DPP-NEXT: [[TMP18:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP15]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP17]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0
+; IR-DPP-NEXT: [[TMP21:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP19]], i32 1
+; IR-DPP-NEXT: [[TMP22:%.*]] = bitcast <2 x i32> [[TMP21]] to double
+; IR-DPP-NEXT: [[TMP23:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP24:%.*]] = select i1 [[TMP9]], double 0xFFF0000000000000, double [[VAL]]
+; IR-DPP-NEXT: [[TMP25:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP22]], double [[TMP24]], metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: br label [[TMP26]]
+; IR-DPP: 26:
+; IR-DPP-NEXT: [[TMP27:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP25]], [[TMP12]] ]
+; IR-DPP-NEXT: ret double [[TMP27]]
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic__fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1{
+; IR-LABEL: @global_atomic__fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-ITERATIVE: 14:
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] monotonic, align 4
+; IR-ITERATIVE-NEXT: br label [[TMP16]]
+; IR-ITERATIVE: 16:
+; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br label [[TMP30]]
+; IR-ITERATIVE: 30:
+; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-ITERATIVE-NEXT: ret double [[TMP31]]
+;
+; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-DPP: 14:
+; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] monotonic, align 4
+; IR-DPP-NEXT: br label [[TMP16]]
+; IR-DPP: 16:
+; IR-DPP-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-DPP-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-DPP-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-DPP-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-DPP-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-DPP-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-DPP-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: br label [[TMP30]]
+; IR-DPP: 30:
+; IR-DPP-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-DPP-NEXT: ret double [[TMP31]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_div_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_div_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fsub_double_div_address_uni_value_agent_scope_strictfp(ptr addrspace(1) %ptr, double inreg %val) #2 {
+; IR-LABEL: @global_atomic_fsub_double_div_address_uni_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fsub_double_div_address_div_value_agent_scope_strictfp(ptr addrspace(1) %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fsub_double_div_address_div_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fmin_double_div_address_uni_value_agent_scope(ptr addrspace(1) %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_div_address_uni_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fmin_double_div_address_div_value_agent_scope(ptr addrspace(1) %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_div_address_div_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic__fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1{
+; IR-LABEL: @global_atomic__fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic__fmax_double_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1{
+; IR-LABEL: @global_atomic__fmax_double_div_address_div_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_div_address_uni_value_system_scope_strictfp(ptr addrspace(1) %ptr, double inreg %val) #2 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_div_address_div_value_system_scope_strictfp(ptr addrspace(1) %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret double %result
+}
+
attributes #0 = { "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #1 = { strictfp "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #2 = { strictfp }
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll
index 76ec1cc..99d02ff 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll
@@ -358,65 +358,6 @@ define amdgpu_gfx i32 @global_atomic_xchg_i32_ret_offset_scalar(ptr addrspace(1)
; ---------------------------------------------------------------------
define void @global_atomic_xchg_f32_noret(ptr addrspace(1) %ptr, float %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_noret:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_load_dword v3, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB0_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v3, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB0_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_noret:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_load_dword v3, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB0_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v3, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB0_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_noret:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v3, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB0_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB0_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_noret:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -450,69 +391,6 @@ define void @global_atomic_xchg_f32_noret(ptr addrspace(1) %ptr, float %in) {
}
define void @global_atomic_xchg_f32_noret_offset(ptr addrspace(1) %out, float %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_noret_offset:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_f32_e32 v0, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN1-NEXT: global_load_dword v3, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB1_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v3, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB1_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_noret_offset:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN2-NEXT: global_load_dword v3, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB1_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v3, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB1_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_noret_offset:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v3, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB1_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB1_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_noret_offset:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -549,71 +427,6 @@ define void @global_atomic_xchg_f32_noret_offset(ptr addrspace(1) %out, float %i
}
define float @global_atomic_xchg_f32_ret(ptr addrspace(1) %ptr, float %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_ret:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_load_dword v4, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB2_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v3, v4
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB2_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v0, v4
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_ret:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_load_dword v4, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB2_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v3, v4
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB2_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v0, v4
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_ret:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v4, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB2_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB2_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v4
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_ret:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -648,73 +461,6 @@ define float @global_atomic_xchg_f32_ret(ptr addrspace(1) %ptr, float %in) {
}
define float @global_atomic_xchg_f32_ret_offset(ptr addrspace(1) %out, float %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_ret_offset:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_f32_e32 v4, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN1-NEXT: global_load_dword v0, v[4:5]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB3_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v3, v0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[4:5], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB3_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_ret_offset:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v4, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN2-NEXT: global_load_dword v0, v[4:5]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB3_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v3, v0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[4:5], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB3_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_ret_offset:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v4, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB3_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB3_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v4
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_ret_offset:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -752,80 +498,6 @@ define float @global_atomic_xchg_f32_ret_offset(ptr addrspace(1) %out, float %in
}
define amdgpu_gfx void @global_atomic_xchg_f32_noret_scalar(ptr addrspace(1) inreg %ptr, float inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_noret_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v0, s4
-; GCN1-NEXT: v_mov_b32_e32 v1, s5
-; GCN1-NEXT: global_load_dword v1, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[34:35], 0
-; GCN1-NEXT: .LBB4_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v2, s4
-; GCN1-NEXT: v_mov_b32_e32 v0, s6
-; GCN1-NEXT: v_mov_b32_e32 v3, s5
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_cbranch_execnz .LBB4_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_noret_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v0, s4
-; GCN2-NEXT: v_mov_b32_e32 v1, s5
-; GCN2-NEXT: global_load_dword v1, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[34:35], 0
-; GCN2-NEXT: .LBB4_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v2, s4
-; GCN2-NEXT: v_mov_b32_e32 v0, s6
-; GCN2-NEXT: v_mov_b32_e32 v3, s5
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_cbranch_execnz .LBB4_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_noret_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v1, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB4_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: v_mov_b32_e32 v0, s6
-; GCN3-NEXT: v_mov_b32_e32 v3, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: v_mov_b32_e32 v1, v0
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB4_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_noret_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -876,84 +548,6 @@ define amdgpu_gfx void @global_atomic_xchg_f32_noret_scalar(ptr addrspace(1) inr
}
define amdgpu_gfx void @global_atomic_xchg_f32_noret_offset_scalar(ptr addrspace(1) inreg %out, float inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_noret_offset_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: s_add_u32 s34, s4, 16
-; GCN1-NEXT: s_addc_u32 s35, s5, 0
-; GCN1-NEXT: v_mov_b32_e32 v0, s34
-; GCN1-NEXT: v_mov_b32_e32 v1, s35
-; GCN1-NEXT: global_load_dword v1, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[36:37], 0
-; GCN1-NEXT: .LBB5_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v2, s34
-; GCN1-NEXT: v_mov_b32_e32 v0, s6
-; GCN1-NEXT: v_mov_b32_e32 v3, s35
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_cbranch_execnz .LBB5_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_noret_offset_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: s_add_u32 s34, s4, 16
-; GCN2-NEXT: s_addc_u32 s35, s5, 0
-; GCN2-NEXT: v_mov_b32_e32 v0, s34
-; GCN2-NEXT: v_mov_b32_e32 v1, s35
-; GCN2-NEXT: global_load_dword v1, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[36:37], 0
-; GCN2-NEXT: .LBB5_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v2, s34
-; GCN2-NEXT: v_mov_b32_e32 v0, s6
-; GCN2-NEXT: v_mov_b32_e32 v3, s35
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_cbranch_execnz .LBB5_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_noret_offset_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v1, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB5_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: v_mov_b32_e32 v0, s6
-; GCN3-NEXT: v_mov_b32_e32 v3, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: v_mov_b32_e32 v1, v0
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB5_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_noret_offset_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1007,83 +601,6 @@ define amdgpu_gfx void @global_atomic_xchg_f32_noret_offset_scalar(ptr addrspace
}
define amdgpu_gfx float @global_atomic_xchg_f32_ret_scalar(ptr addrspace(1) inreg %ptr, float inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_ret_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v0, s4
-; GCN1-NEXT: v_mov_b32_e32 v1, s5
-; GCN1-NEXT: global_load_dword v0, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[34:35], 0
-; GCN1-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v3, s4
-; GCN1-NEXT: v_mov_b32_e32 v1, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v2, v0
-; GCN1-NEXT: v_mov_b32_e32 v4, s5
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN1-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_cbranch_execnz .LBB6_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_ret_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v0, s4
-; GCN2-NEXT: v_mov_b32_e32 v1, s5
-; GCN2-NEXT: global_load_dword v0, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[34:35], 0
-; GCN2-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v3, s4
-; GCN2-NEXT: v_mov_b32_e32 v1, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v2, v0
-; GCN2-NEXT: v_mov_b32_e32 v4, s5
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN2-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_cbranch_execnz .LBB6_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_ret_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v0, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v3, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v2, v0
-; GCN3-NEXT: v_mov_b32_e32 v4, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB6_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_ret_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1134,87 +651,6 @@ define amdgpu_gfx float @global_atomic_xchg_f32_ret_scalar(ptr addrspace(1) inre
}
define amdgpu_gfx float @global_atomic_xchg_f32_ret_offset_scalar(ptr addrspace(1) inreg %out, float inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_ret_offset_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: s_add_u32 s34, s4, 16
-; GCN1-NEXT: s_addc_u32 s35, s5, 0
-; GCN1-NEXT: v_mov_b32_e32 v0, s34
-; GCN1-NEXT: v_mov_b32_e32 v1, s35
-; GCN1-NEXT: global_load_dword v0, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[36:37], 0
-; GCN1-NEXT: .LBB7_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v3, s34
-; GCN1-NEXT: v_mov_b32_e32 v1, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v2, v0
-; GCN1-NEXT: v_mov_b32_e32 v4, s35
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN1-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_cbranch_execnz .LBB7_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_ret_offset_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: s_add_u32 s34, s4, 16
-; GCN2-NEXT: s_addc_u32 s35, s5, 0
-; GCN2-NEXT: v_mov_b32_e32 v0, s34
-; GCN2-NEXT: v_mov_b32_e32 v1, s35
-; GCN2-NEXT: global_load_dword v0, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[36:37], 0
-; GCN2-NEXT: .LBB7_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v3, s34
-; GCN2-NEXT: v_mov_b32_e32 v1, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v2, v0
-; GCN2-NEXT: v_mov_b32_e32 v4, s35
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN2-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_cbranch_execnz .LBB7_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_ret_offset_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v0, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB7_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v3, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v2, v0
-; GCN3-NEXT: v_mov_b32_e32 v4, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB7_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_ret_offset_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
index d137f47..380ce7f 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
@@ -372,65 +372,6 @@ define amdgpu_gfx i64 @global_atomic_xchg_i64_ret_offset_scalar(ptr addrspace(1)
; ---------------------------------------------------------------------
define void @global_atomic_xchg_f64_noret(ptr addrspace(1) %ptr, double %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_noret:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_load_dword v3, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB0_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v3, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB0_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_noret:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_load_dword v3, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB0_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v3, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB0_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_noret:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v3, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB0_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB0_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_noret:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -464,69 +405,6 @@ define void @global_atomic_xchg_f64_noret(ptr addrspace(1) %ptr, double %in) {
}
define void @global_atomic_xchg_f64_noret_offset(ptr addrspace(1) %out, double %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_noret_offset:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_f64_e32 v0, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN1-NEXT: global_load_dword v3, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB1_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v3, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB1_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_noret_offset:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN2-NEXT: global_load_dword v3, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB1_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v3, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB1_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_noret_offset:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v3, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB1_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB1_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_noret_offset:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -563,71 +441,6 @@ define void @global_atomic_xchg_f64_noret_offset(ptr addrspace(1) %out, double %
}
define double @global_atomic_xchg_f64_ret(ptr addrspace(1) %ptr, double %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_ret:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_load_dword v4, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB2_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v3, v4
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB2_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v0, v4
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_ret:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_load_dword v4, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB2_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v3, v4
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB2_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v0, v4
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_ret:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v4, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB2_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB2_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v4
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_ret:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -663,73 +476,6 @@ define double @global_atomic_xchg_f64_ret(ptr addrspace(1) %ptr, double %in) {
}
define double @global_atomic_xchg_f64_ret_offset(ptr addrspace(1) %out, double %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_ret_offset:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_f64_e32 v4, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN1-NEXT: global_load_dword v0, v[4:5]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB3_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v3, v0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[4:5], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB3_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_ret_offset:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v4, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN2-NEXT: global_load_dword v0, v[4:5]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB3_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v3, v0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[4:5], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB3_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_ret_offset:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v4, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB3_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB3_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v4
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_ret_offset:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -768,80 +514,6 @@ define double @global_atomic_xchg_f64_ret_offset(ptr addrspace(1) %out, double %
}
define amdgpu_gfx void @global_atomic_xchg_f64_noret_scalar(ptr addrspace(1) inreg %ptr, double inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_noret_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v0, s4
-; GCN1-NEXT: v_mov_b32_e32 v1, s5
-; GCN1-NEXT: global_load_dword v1, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[34:35], 0
-; GCN1-NEXT: .LBB4_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v2, s4
-; GCN1-NEXT: v_mov_b32_e32 v0, s6
-; GCN1-NEXT: v_mov_b32_e32 v3, s5
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_cbranch_execnz .LBB4_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_noret_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v0, s4
-; GCN2-NEXT: v_mov_b32_e32 v1, s5
-; GCN2-NEXT: global_load_dword v1, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[34:35], 0
-; GCN2-NEXT: .LBB4_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v2, s4
-; GCN2-NEXT: v_mov_b32_e32 v0, s6
-; GCN2-NEXT: v_mov_b32_e32 v3, s5
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_cbranch_execnz .LBB4_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_noret_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v1, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB4_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: v_mov_b32_e32 v0, s6
-; GCN3-NEXT: v_mov_b32_e32 v3, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: v_mov_b32_e32 v1, v0
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB4_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_noret_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -896,84 +568,6 @@ define amdgpu_gfx void @global_atomic_xchg_f64_noret_scalar(ptr addrspace(1) inr
}
define amdgpu_gfx void @global_atomic_xchg_f64_noret_offset_scalar(ptr addrspace(1) inreg %out, double inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_noret_offset_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: s_add_u32 s34, s4, 16
-; GCN1-NEXT: s_addc_u32 s35, s5, 0
-; GCN1-NEXT: v_mov_b32_e32 v0, s34
-; GCN1-NEXT: v_mov_b32_e32 v1, s35
-; GCN1-NEXT: global_load_dword v1, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[36:37], 0
-; GCN1-NEXT: .LBB5_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v2, s34
-; GCN1-NEXT: v_mov_b32_e32 v0, s6
-; GCN1-NEXT: v_mov_b32_e32 v3, s35
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_cbranch_execnz .LBB5_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_noret_offset_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: s_add_u32 s34, s4, 16
-; GCN2-NEXT: s_addc_u32 s35, s5, 0
-; GCN2-NEXT: v_mov_b32_e32 v0, s34
-; GCN2-NEXT: v_mov_b32_e32 v1, s35
-; GCN2-NEXT: global_load_dword v1, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[36:37], 0
-; GCN2-NEXT: .LBB5_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v2, s34
-; GCN2-NEXT: v_mov_b32_e32 v0, s6
-; GCN2-NEXT: v_mov_b32_e32 v3, s35
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_cbranch_execnz .LBB5_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_noret_offset_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v1, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB5_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: v_mov_b32_e32 v0, s6
-; GCN3-NEXT: v_mov_b32_e32 v3, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: v_mov_b32_e32 v1, v0
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB5_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_noret_offset_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1029,83 +623,6 @@ define amdgpu_gfx void @global_atomic_xchg_f64_noret_offset_scalar(ptr addrspace
}
define amdgpu_gfx double @global_atomic_xchg_f64_ret_scalar(ptr addrspace(1) inreg %ptr, double inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_ret_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v0, s4
-; GCN1-NEXT: v_mov_b32_e32 v1, s5
-; GCN1-NEXT: global_load_dword v0, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[34:35], 0
-; GCN1-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v3, s4
-; GCN1-NEXT: v_mov_b32_e32 v1, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v2, v0
-; GCN1-NEXT: v_mov_b32_e32 v4, s5
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN1-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_cbranch_execnz .LBB6_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_ret_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v0, s4
-; GCN2-NEXT: v_mov_b32_e32 v1, s5
-; GCN2-NEXT: global_load_dword v0, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[34:35], 0
-; GCN2-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v3, s4
-; GCN2-NEXT: v_mov_b32_e32 v1, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v2, v0
-; GCN2-NEXT: v_mov_b32_e32 v4, s5
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN2-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_cbranch_execnz .LBB6_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_ret_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v0, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v3, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v2, v0
-; GCN3-NEXT: v_mov_b32_e32 v4, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB6_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_ret_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1160,87 +677,6 @@ define amdgpu_gfx double @global_atomic_xchg_f64_ret_scalar(ptr addrspace(1) inr
}
define amdgpu_gfx double @global_atomic_xchg_f64_ret_offset_scalar(ptr addrspace(1) inreg %out, double inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_ret_offset_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: s_add_u32 s34, s4, 16
-; GCN1-NEXT: s_addc_u32 s35, s5, 0
-; GCN1-NEXT: v_mov_b32_e32 v0, s34
-; GCN1-NEXT: v_mov_b32_e32 v1, s35
-; GCN1-NEXT: global_load_dword v0, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[36:37], 0
-; GCN1-NEXT: .LBB7_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v3, s34
-; GCN1-NEXT: v_mov_b32_e32 v1, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v2, v0
-; GCN1-NEXT: v_mov_b32_e32 v4, s35
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN1-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_cbranch_execnz .LBB7_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_ret_offset_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: s_add_u32 s34, s4, 16
-; GCN2-NEXT: s_addc_u32 s35, s5, 0
-; GCN2-NEXT: v_mov_b32_e32 v0, s34
-; GCN2-NEXT: v_mov_b32_e32 v1, s35
-; GCN2-NEXT: global_load_dword v0, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[36:37], 0
-; GCN2-NEXT: .LBB7_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v3, s34
-; GCN2-NEXT: v_mov_b32_e32 v1, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v2, v0
-; GCN2-NEXT: v_mov_b32_e32 v4, s35
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN2-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_cbranch_execnz .LBB7_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_ret_offset_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v0, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB7_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v3, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v2, v0
-; GCN3-NEXT: v_mov_b32_e32 v4, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB7_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_ret_offset_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll
index fab24e1..86e3d93 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-- -amdgpu-atomic-optimizer-strategy=Iterative -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck -check-prefix=IR-ITERATIVE %s
-; RUN: opt -S -mtriple=amdgcn-- -amdgpu-atomic-optimizer-strategy=DPP -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck -check-prefix=IR-DPP %s
+; RUN: opt -S -mtriple=amdgcn-- -passes='amdgpu-atomic-optimizer<strategy=iterative>,verify<domtree>' %s | FileCheck -check-prefix=IR-ITERATIVE %s
+; RUN: opt -S -mtriple=amdgcn-- -passes='amdgpu-atomic-optimizer<strategy=dpp>,verify<domtree>' %s | FileCheck -check-prefix=IR-DPP %s
declare i32 @llvm.amdgcn.workitem.id.x()
define amdgpu_kernel void @global_atomic_fadd_uni_value(ptr addrspace(1) %ptr) #0 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_value(
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
index f87932b..b9234f4 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
@@ -1,55 +1,35 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -amdgpu-atomic-optimizer-strategy=Iterative -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck -check-prefix=IR-ITERATIVE %s
-; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -amdgpu-atomic-optimizer-strategy=DPP -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck -check-prefix=IR-DPP %s
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -passes='amdgpu-atomic-optimizer<strategy=iterative>,verify<domtree>' %s | FileCheck --check-prefixes=IR,IR-ITERATIVE %s
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -passes='amdgpu-atomic-optimizer<strategy=dpp>,verify<domtree>' %s | FileCheck --check-prefixes=IR,IR-DPP %s
+
+; Tests various combinations of uniform/divergent address and uniform/divergent value inputs of various types for atomic operations.
+; Optimization remains same for Iterative and DPP strategies when value in uniform. These different scan/reduction
+; strategies are valid for only divergent values. This optimization is valid for divergent addresses. Test also covers different scopes.
define amdgpu_ps void @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, float inreg %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
-; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
-; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
-; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
-; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to float
-; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = fmul float [[VAL:%.*]], [[TMP11]]
-; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-ITERATIVE: 14:
-; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: br label [[TMP16]]
-; IR-ITERATIVE: 16:
-; IR-ITERATIVE-NEXT: br label [[TMP17]]
-; IR-ITERATIVE: 17:
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
-; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
-; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
-; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
-; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-DPP-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to float
-; IR-DPP-NEXT: [[TMP12:%.*]] = fmul float [[VAL:%.*]], [[TMP11]]
-; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-DPP: 14:
-; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: br label [[TMP16]]
-; IR-DPP: 16:
-; IR-DPP-NEXT: br label [[TMP17]]
-; IR-DPP: 17:
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
+; IR-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to float
+; IR-NEXT: [[TMP12:%.*]] = fmul float [[VAL:%.*]], [[TMP11]]
+; IR-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR: 14:
+; IR-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("agent") monotonic, align 4
+; IR-NEXT: br label [[TMP16]]
+; IR: 16:
+; IR-NEXT: br label [[TMP17]]
+; IR: 17:
+; IR-NEXT: ret void
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic, align 4
ret void
@@ -325,7 +305,6 @@ define amdgpu_ps void @global_atomic_fsub_uni_address_uni_value_agent_scope_stri
ret void
}
-
define amdgpu_ps void @global_atomic_fsub_uni_address_div_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, float %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fsub_uni_address_div_value_agent_scope_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
@@ -409,45 +388,25 @@ define amdgpu_ps void @global_atomic_fsub_uni_address_div_value_agent_scope_stri
}
define amdgpu_ps void @global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, float inreg %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
-; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
-; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
-; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-ITERATIVE-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
-; IR-ITERATIVE: 10:
-; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: br label [[TMP12]]
-; IR-ITERATIVE: 12:
-; IR-ITERATIVE-NEXT: br label [[TMP13]]
-; IR-ITERATIVE: 13:
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
-; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
-; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
-; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
-; IR-DPP-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-DPP-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
-; IR-DPP: 10:
-; IR-DPP-NEXT: [[TMP11:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: br label [[TMP12]]
-; IR-DPP: 12:
-; IR-DPP-NEXT: br label [[TMP13]]
-; IR-DPP: 13:
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR: 10:
+; IR-NEXT: [[TMP11:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: br label [[TMP12]]
+; IR: 12:
+; IR-NEXT: br label [[TMP13]]
+; IR: 13:
+; IR-NEXT: ret void
;
%result = atomicrmw fmin ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret void
@@ -797,161 +756,531 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_system_scope_str
ret void
}
-
define amdgpu_ps void @global_atomic_fadd_div_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr, float inreg %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_uni_value_agent_scope_unsafe(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_uni_value_agent_scope_unsafe(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fadd_div_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic, align 4
ret void
}
define amdgpu_ps void @global_atomic_fadd_div_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr, float %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_div_value_agent_scope_unsafe(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_div_value_agent_scope_unsafe(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fadd_div_address_div_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic, align 4
ret void
}
define amdgpu_ps void @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, float inreg %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
+; IR-NEXT: ret void
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic
ret void
}
define amdgpu_ps void @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, float %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
+; IR-NEXT: ret void
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic
ret void
}
define amdgpu_ps void @global_atomic_fsub_div_address_uni_value_agent_scope_strictfp(ptr addrspace(1) %ptr, float inreg %val) #2 {
-; IR-ITERATIVE-LABEL: @global_atomic_fsub_div_address_uni_value_agent_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fsub_div_address_uni_value_agent_scope_strictfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fsub_div_address_uni_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret void
}
-
define amdgpu_ps void @global_atomic_fsub_div_address_div_value_agent_scope_strictfp(ptr addrspace(1) %ptr, float %val) #2 {
-; IR-ITERATIVE-LABEL: @global_atomic_fsub_div_address_div_value_agent_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fsub_div_address_div_value_agent_scope_strictfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fsub_div_address_div_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
;
%result = atomicrmw fsub ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret void
}
define amdgpu_ps void @global_atomic_fmin_div_address_uni_value_agent_scope(ptr addrspace(1) %ptr, float inreg %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fmin_div_address_uni_value_agent_scope(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fmin_div_address_uni_value_agent_scope(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fmin_div_address_uni_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
;
%result = atomicrmw fmin ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret void
}
define amdgpu_ps void @global_atomic_fmin_div_address_div_value_agent_scope(ptr addrspace(1) %ptr, float %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fmin_div_address_div_value_agent_scope(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fmin_div_address_div_value_agent_scope(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fmin_div_address_div_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
;
%result = atomicrmw fmin ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret void
}
define amdgpu_ps void @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, float inreg %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, float %val) #1{
+; IR-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_div_address_uni_value_system_scope_strictfp(ptr addrspace(1) %ptr, float inreg %val) #2 {
+; IR-LABEL: @global_atomic_fadd_div_address_uni_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_div_address_div_value_system_scope_strictfp(ptr addrspace(1) %ptr, float %val) #2 {
+; IR-LABEL: @global_atomic_fadd_div_address_div_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
+; IR-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to double
+; IR-NEXT: [[TMP12:%.*]] = fmul double [[VAL:%.*]], [[TMP11]]
+; IR-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR: 14:
+; IR-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 4
+; IR-NEXT: br label [[TMP16]]
+; IR: 16:
+; IR-NEXT: br label [[TMP17]]
+; IR: 17:
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_scope_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_scope_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-ITERATIVE: 14:
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("one-as") monotonic, align 8
+; IR-ITERATIVE-NEXT: br label [[TMP16]]
+; IR-ITERATIVE: 16:
+; IR-ITERATIVE-NEXT: br label [[TMP17]]
+; IR-ITERATIVE: 17:
; IR-ITERATIVE-NEXT: ret void
;
-; IR-DPP-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-DPP: 14:
+; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("one-as") monotonic, align 8
+; IR-DPP-NEXT: br label [[TMP16]]
+; IR-DPP: 16:
+; IR-DPP-NEXT: br label [[TMP17]]
+; IR-DPP: 17:
; IR-DPP-NEXT: ret void
;
- %result = atomicrmw fmax ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
ret void
}
-define amdgpu_ps void @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, float %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 {
+; IR-ITERATIVE-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-ITERATIVE: 14:
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 8
+; IR-ITERATIVE-NEXT: br label [[TMP16]]
+; IR-ITERATIVE: 16:
+; IR-ITERATIVE-NEXT: br label [[TMP17]]
+; IR-ITERATIVE: 17:
; IR-ITERATIVE-NEXT: ret void
;
-; IR-DPP-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-DPP-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-DPP: 14:
+; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 8
+; IR-DPP-NEXT: br label [[TMP16]]
+; IR-DPP: 16:
+; IR-DPP-NEXT: br label [[TMP17]]
+; IR-DPP: 17:
; IR-DPP-NEXT: ret void
;
- %result = atomicrmw fmax ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
ret void
}
-define amdgpu_ps void @global_atomic_fadd_div_address_uni_value_system_scope_strictfp(ptr addrspace(1) %ptr, float inreg %val) #2 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_uni_value_system_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
+define amdgpu_ps void @global_atomic_fsub_double_uni_address_div_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fsub_double_uni_address_div_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR: 10:
+; IR-NEXT: [[TMP11:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: br label [[TMP12]]
+; IR: 12:
+; IR-NEXT: br label [[TMP13]]
+; IR: 13:
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1{
+; IR-ITERATIVE-LABEL: @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR-ITERATIVE: 10:
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-ITERATIVE-NEXT: br label [[TMP12]]
+; IR-ITERATIVE: 12:
+; IR-ITERATIVE-NEXT: br label [[TMP13]]
+; IR-ITERATIVE: 13:
; IR-ITERATIVE-NEXT: ret void
;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_uni_value_system_scope_strictfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
+; IR-DPP-LABEL: @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR-DPP: 10:
+; IR-DPP-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-DPP-NEXT: br label [[TMP12]]
+; IR-DPP: 12:
+; IR-DPP-NEXT: br label [[TMP13]]
+; IR-DPP: 13:
; IR-DPP-NEXT: ret void
;
- %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val monotonic, align 4
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
ret void
}
-define amdgpu_ps void @global_atomic_fadd_div_address_div_value_system_scope_strictfp(ptr addrspace(1) %ptr, float %val) #2 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_div_value_system_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
+define amdgpu_ps void @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1{
+; IR-LABEL: @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-ITERATIVE: 14:
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] monotonic, align 4
+; IR-ITERATIVE-NEXT: br label [[TMP16]]
+; IR-ITERATIVE: 16:
+; IR-ITERATIVE-NEXT: br label [[TMP17]]
+; IR-ITERATIVE: 17:
; IR-ITERATIVE-NEXT: ret void
;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_div_value_system_scope_strictfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
+; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-DPP: 14:
+; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] monotonic, align 4
+; IR-DPP-NEXT: br label [[TMP16]]
+; IR-DPP: 16:
+; IR-DPP-NEXT: br label [[TMP17]]
+; IR-DPP: 17:
; IR-DPP-NEXT: ret void
;
- %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val monotonic, align 4
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_div_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_div_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fsub_double_div_address_uni_value_agent_scope_strictfp(ptr addrspace(1) %ptr, double inreg %val) #2 {
+; IR-LABEL: @global_atomic_fsub_double_div_address_uni_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fsub_double_div_address_div_value_agent_scope_strictfp(ptr addrspace(1) %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fsub_double_div_address_div_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmin_double_div_address_uni_value_agent_scope(ptr addrspace(1) %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_div_address_uni_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmin_double_div_address_div_value_agent_scope(ptr addrspace(1) %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_div_address_div_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1{
+; IR-LABEL: @global_atomic_fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmax_double_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1{
+; IR-LABEL: @global_atomic_fmax_double_div_address_div_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_div_address_uni_value_system_scope_strictfp(ptr addrspace(1) %ptr, double inreg %val) #2 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_div_address_div_value_system_scope_strictfp(ptr addrspace(1) %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
index 96c615b..4f00d48 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
@@ -13,6 +13,7 @@
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,-wavefrontsize64 -amdgpu-atomic-optimizer-strategy=DPP -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX1132-DPP %s
declare float @div.float.value()
+declare double @div.double.value()
define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe:
@@ -5408,6 +5409,5583 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_defalut_scop
ret void
}
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB9_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX7LESS-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX7LESS-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[41:42]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_2
+; GFX7LESS-NEXT: .LBB9_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB9_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB9_2
+; GFX9-NEXT: .LBB9_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB9_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1064-NEXT: .LBB9_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB9_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1032-NEXT: .LBB9_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB9_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1164-NEXT: .LBB9_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB9_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1132-NEXT: .LBB9_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX9-DPP-NEXT: .LBB9_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1064-DPP-NEXT: .LBB9_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1032-DPP-NEXT: .LBB9_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1164-DPP-NEXT: .LBB9_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1132-DPP-NEXT: .LBB9_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[40:41]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB10_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.float.value()
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s14, -1
+; GFX7LESS-NEXT: s_mov_b32 s15, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s12, s12, s3
+; GFX7LESS-NEXT: s_addc_u32 s13, s13, 0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB11_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX7LESS-NEXT: s_mov_b32 s7, 0x43300000
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[6:7], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s8
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s9
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
+; GFX7LESS-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB11_2
+; GFX7LESS-NEXT: .LBB11_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s10, -1
+; GFX9-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-NEXT: s_add_u32 s8, s8, s3
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB11_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB11_2
+; GFX9-NEXT: .LBB11_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s10, -1
+; GFX1064-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB11_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1064-NEXT: .LBB11_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s10, -1
+; GFX1032-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB11_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1032-NEXT: .LBB11_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-NEXT: s_cbranch_execz .LBB11_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1164-NEXT: .LBB11_3:
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-NEXT: s_cbranch_execz .LBB11_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1132-NEXT: .LBB11_3:
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s10, -1
+; GFX9-DPP-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX9-DPP-NEXT: .LBB11_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1064-DPP-NEXT: .LBB11_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1032-DPP-NEXT: .LBB11_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1164-DPP-NEXT: .LBB11_3:
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1132-DPP-NEXT: .LBB11_3:
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB12_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB12_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value() strictfp
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) %ptr) #2{
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s14, -1
+; GFX7LESS-NEXT: s_mov_b32 s15, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s12, s12, s3
+; GFX7LESS-NEXT: s_addc_u32 s13, s13, 0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB13_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX7LESS-NEXT: s_mov_b32 s7, 0x43300000
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[6:7], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s8
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s9
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
+; GFX7LESS-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB13_2
+; GFX7LESS-NEXT: .LBB13_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s10, -1
+; GFX9-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-NEXT: s_add_u32 s8, s8, s3
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB13_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB13_2
+; GFX9-NEXT: .LBB13_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s10, -1
+; GFX1064-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB13_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1064-NEXT: .LBB13_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s10, -1
+; GFX1032-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB13_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1032-NEXT: .LBB13_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-NEXT: s_cbranch_execz .LBB13_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1164-NEXT: .LBB13_3:
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-NEXT: s_cbranch_execz .LBB13_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1132-NEXT: .LBB13_3:
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s10, -1
+; GFX9-DPP-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX9-DPP-NEXT: .LBB13_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1064-DPP-NEXT: .LBB13_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1032-DPP-NEXT: .LBB13_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1164-DPP-NEXT: .LBB13_3:
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1132-DPP-NEXT: .LBB13_3:
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB14_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB14_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB15_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB15_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.float.value() strictfp
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp(ptr addrspace(1) %ptr) #2 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB16_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s1, 0x43300000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[41:42]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB16_2
+; GFX7LESS-NEXT: .LBB16_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB16_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-NEXT: s_mov_b32 s1, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB16_2
+; GFX9-NEXT: .LBB16_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB16_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1064-NEXT: .LBB16_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB16_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1032-NEXT: .LBB16_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_bcnt1_i32_b64 s0, exec
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1164-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-NEXT: s_cbranch_execz .LBB16_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1164-NEXT: .LBB16_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1132-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-NEXT: s_cbranch_execz .LBB16_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1132-NEXT: .LBB16_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-DPP-NEXT: s_mov_b32 s1, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX9-DPP-NEXT: .LBB16_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1064-DPP-NEXT: .LBB16_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-DPP-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1032-DPP-NEXT: .LBB16_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1164-DPP-NEXT: .LBB16_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1132-DPP-NEXT: .LBB16_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp(ptr addrspace(1) %ptr) #2 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[40:41]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB17_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB17_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.float.value() strictfp
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ ret void
+}
+
attributes #0 = { "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #1 = { strictfp "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #2 = { strictfp}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
index 3cc5a4c..622be43 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
@@ -13,6 +13,7 @@
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,-wavefrontsize64 -amdgpu-atomic-optimizer-strategy=DPP -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX1132-DPP %s
declare float @div.float.value()
+declare float @div.double.value()
define amdgpu_kernel void @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe:
@@ -3550,6 +3551,3965 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_defalut_scop
ret void
}
+define amdgpu_kernel void @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB6_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], 4.0
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB6_2
+; GFX7LESS-NEXT: .LBB6_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB6_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB6_2
+; GFX9-NEXT: .LBB6_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB6_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1064-NEXT: .LBB6_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB6_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1032-NEXT: .LBB6_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB6_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1164-NEXT: .LBB6_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB6_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1132-NEXT: .LBB6_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX9-DPP-NEXT: .LBB6_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1064-DPP-NEXT: .LBB6_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1032-DPP-NEXT: .LBB6_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1164-DPP-NEXT: .LBB6_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1132-DPP-NEXT: .LBB6_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB7_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB7_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB8_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s7
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
+; GFX7LESS-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v5
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB8_2
+; GFX7LESS-NEXT: .LBB8_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB8_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB8_2
+; GFX9-NEXT: .LBB8_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB8_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1064-NEXT: .LBB8_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_and_saveexec_b32 s3, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB8_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1032-NEXT: .LBB8_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB8_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1164-NEXT: .LBB8_3:
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB8_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1132-NEXT: .LBB8_3:
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX9-DPP-NEXT: .LBB8_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1064-DPP-NEXT: .LBB8_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s3, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1032-DPP-NEXT: .LBB8_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1164-DPP-NEXT: .LBB8_3:
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1132-DPP-NEXT: .LBB8_3:
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX7LESS-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %divValue syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB10_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], 4.0
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_2
+; GFX7LESS-NEXT: .LBB10_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB10_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB10_2
+; GFX9-NEXT: .LBB10_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB10_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1064-NEXT: .LBB10_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB10_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1032-NEXT: .LBB10_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB10_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1164-NEXT: .LBB10_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB10_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1132-NEXT: .LBB10_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX9-DPP-NEXT: .LBB10_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1064-DPP-NEXT: .LBB10_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1032-DPP-NEXT: .LBB10_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1164-DPP-NEXT: .LBB10_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1132-DPP-NEXT: .LBB10_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB11_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB11_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ ret void
+}
+
attributes #0 = { "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
!llvm.module.flags = !{!0}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
index 314c52a..49d415c 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
@@ -13,6 +13,7 @@
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,-wavefrontsize64 -amdgpu-atomic-optimizer-strategy=DPP -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX1132-DPP %s
declare float @div.float.value()
+declare float @div.double.value()
define amdgpu_kernel void @global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe:
@@ -3550,6 +3551,3965 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_defalut_scop
ret void
}
+define amdgpu_kernel void @global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB6_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[2:3], 4.0
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB6_2
+; GFX7LESS-NEXT: .LBB6_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB6_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB6_2
+; GFX9-NEXT: .LBB6_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB6_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1064-NEXT: .LBB6_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB6_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1032-NEXT: .LBB6_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB6_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1164-NEXT: .LBB6_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB6_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1132-NEXT: .LBB6_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX9-DPP-NEXT: .LBB6_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1064-DPP-NEXT: .LBB6_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1032-DPP-NEXT: .LBB6_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1164-DPP-NEXT: .LBB6_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1132-DPP-NEXT: .LBB6_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB7_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB7_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB8_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s7
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
+; GFX7LESS-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v5
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB8_2
+; GFX7LESS-NEXT: .LBB8_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB8_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB8_2
+; GFX9-NEXT: .LBB8_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB8_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1064-NEXT: .LBB8_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_and_saveexec_b32 s3, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB8_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1032-NEXT: .LBB8_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB8_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1164-NEXT: .LBB8_3:
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB8_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1132-NEXT: .LBB8_3:
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX9-DPP-NEXT: .LBB8_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1064-DPP-NEXT: .LBB8_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s3, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1032-DPP-NEXT: .LBB8_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1164-DPP-NEXT: .LBB8_3:
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1132-DPP-NEXT: .LBB8_3:
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX7LESS-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %divValue syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB10_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[2:3], 4.0
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_2
+; GFX7LESS-NEXT: .LBB10_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB10_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB10_2
+; GFX9-NEXT: .LBB10_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB10_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1064-NEXT: .LBB10_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB10_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1032-NEXT: .LBB10_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB10_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1164-NEXT: .LBB10_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB10_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1132-NEXT: .LBB10_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX9-DPP-NEXT: .LBB10_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1064-DPP-NEXT: .LBB10_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1032-DPP-NEXT: .LBB10_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1164-DPP-NEXT: .LBB10_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1132-DPP-NEXT: .LBB10_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB11_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB11_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ ret void
+}
+
attributes #0 = { "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
!llvm.module.flags = !{!0}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
index bc9125e..7a7ddbe 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
@@ -13,6 +13,7 @@
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,-wavefrontsize64 -amdgpu-atomic-optimizer-strategy=DPP -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX1132-DPP %s
declare float @div.float.value()
+declare double @div.double.value()
define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fsub_uni_address_uni_value_agent_scope_unsafe:
@@ -5616,6 +5617,5581 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_defalut_scop
ret void
}
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB9_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX7LESS-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX7LESS-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[41:42]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_2
+; GFX7LESS-NEXT: .LBB9_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB9_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB9_2
+; GFX9-NEXT: .LBB9_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB9_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1064-NEXT: .LBB9_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB9_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1032-NEXT: .LBB9_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB9_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1164-NEXT: .LBB9_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB9_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1132-NEXT: .LBB9_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX9-DPP-NEXT: .LBB9_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1064-DPP-NEXT: .LBB9_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1032-DPP-NEXT: .LBB9_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1164-DPP-NEXT: .LBB9_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1132-DPP-NEXT: .LBB9_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[40:41]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB10_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.float.value()
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s14, -1
+; GFX7LESS-NEXT: s_mov_b32 s15, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s12, s12, s3
+; GFX7LESS-NEXT: s_addc_u32 s13, s13, 0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB11_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX7LESS-NEXT: s_mov_b32 s7, 0x43300000
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[6:7], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s8
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s9
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
+; GFX7LESS-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB11_2
+; GFX7LESS-NEXT: .LBB11_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s10, -1
+; GFX9-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-NEXT: s_add_u32 s8, s8, s3
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB11_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB11_2
+; GFX9-NEXT: .LBB11_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s10, -1
+; GFX1064-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB11_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1064-NEXT: .LBB11_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s10, -1
+; GFX1032-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB11_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1032-NEXT: .LBB11_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-NEXT: s_cbranch_execz .LBB11_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1164-NEXT: .LBB11_3:
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-NEXT: s_cbranch_execz .LBB11_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1132-NEXT: .LBB11_3:
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s10, -1
+; GFX9-DPP-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX9-DPP-NEXT: .LBB11_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1064-DPP-NEXT: .LBB11_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1032-DPP-NEXT: .LBB11_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1164-DPP-NEXT: .LBB11_3:
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1132-DPP-NEXT: .LBB11_3:
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") monotonic
+ ret void
+}
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB12_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB12_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value() strictfp
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) %ptr) #2{
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s14, -1
+; GFX7LESS-NEXT: s_mov_b32 s15, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s12, s12, s3
+; GFX7LESS-NEXT: s_addc_u32 s13, s13, 0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB13_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX7LESS-NEXT: s_mov_b32 s7, 0x43300000
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[6:7], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s8
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s9
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
+; GFX7LESS-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB13_2
+; GFX7LESS-NEXT: .LBB13_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s10, -1
+; GFX9-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-NEXT: s_add_u32 s8, s8, s3
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB13_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB13_2
+; GFX9-NEXT: .LBB13_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s10, -1
+; GFX1064-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB13_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1064-NEXT: .LBB13_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s10, -1
+; GFX1032-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB13_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1032-NEXT: .LBB13_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-NEXT: s_cbranch_execz .LBB13_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1164-NEXT: .LBB13_3:
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-NEXT: s_cbranch_execz .LBB13_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1132-NEXT: .LBB13_3:
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s10, -1
+; GFX9-DPP-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX9-DPP-NEXT: .LBB13_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1064-DPP-NEXT: .LBB13_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1032-DPP-NEXT: .LBB13_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1164-DPP-NEXT: .LBB13_3:
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1132-DPP-NEXT: .LBB13_3:
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB14_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB14_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB15_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB15_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.float.value() strictfp
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic
+ ret void
+}
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp(ptr addrspace(1) %ptr) #2 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB16_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s1, 0x43300000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[41:42]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB16_2
+; GFX7LESS-NEXT: .LBB16_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB16_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-NEXT: s_mov_b32 s1, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB16_2
+; GFX9-NEXT: .LBB16_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB16_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1064-NEXT: .LBB16_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB16_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1032-NEXT: .LBB16_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_bcnt1_i32_b64 s0, exec
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1164-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-NEXT: s_cbranch_execz .LBB16_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1164-NEXT: .LBB16_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1132-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-NEXT: s_cbranch_execz .LBB16_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1132-NEXT: .LBB16_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-DPP-NEXT: s_mov_b32 s1, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX9-DPP-NEXT: .LBB16_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1064-DPP-NEXT: .LBB16_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-DPP-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1032-DPP-NEXT: .LBB16_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1164-DPP-NEXT: .LBB16_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1132-DPP-NEXT: .LBB16_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp(ptr addrspace(1) %ptr) #2 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[40:41]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB17_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB17_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.float.value() strictfp
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ ret void
+}
+
attributes #0 = { "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #1 = { strictfp "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #2 = { strictfp}
diff --git a/llvm/test/CodeGen/AMDGPU/greedy-alloc-fail-sgpr1024-spill.mir b/llvm/test/CodeGen/AMDGPU/greedy-alloc-fail-sgpr1024-spill.mir
index bdd89a9..dde84af 100644
--- a/llvm/test/CodeGen/AMDGPU/greedy-alloc-fail-sgpr1024-spill.mir
+++ b/llvm/test/CodeGen/AMDGPU/greedy-alloc-fail-sgpr1024-spill.mir
@@ -13,6 +13,7 @@
name: greedy_fail_alloc_sgpr1024_spill
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
machineFunctionInfo:
explicitKernArgSize: 16
diff --git a/llvm/test/CodeGen/AMDGPU/implicitarg-offset-attributes.ll b/llvm/test/CodeGen/AMDGPU/implicitarg-offset-attributes.ll
index a5792bf..4c21f87 100644
--- a/llvm/test/CodeGen/AMDGPU/implicitarg-offset-attributes.ll
+++ b/llvm/test/CodeGen/AMDGPU/implicitarg-offset-attributes.ll
@@ -258,25 +258,25 @@ attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memo
;.
; V4: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; V4: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; V4: attributes #[[ATTR2]] = { "amdgpu-no-completion-action" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V4: attributes #[[ATTR3]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V4: attributes #[[ATTR4]] = { "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V4: attributes #[[ATTR5]] = { "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V4: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; V4: attributes #[[ATTR2]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V4: attributes #[[ATTR3]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V4: attributes #[[ATTR4]] = { "amdgpu-no-agpr" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V4: attributes #[[ATTR5]] = { "amdgpu-no-agpr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
;.
; V5: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; V5: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; V5: attributes #[[ATTR2]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V5: attributes #[[ATTR3]] = { "amdgpu-no-completion-action" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V5: attributes #[[ATTR4]] = { "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V5: attributes #[[ATTR5]] = { "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V5: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; V5: attributes #[[ATTR2]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V5: attributes #[[ATTR3]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V5: attributes #[[ATTR4]] = { "amdgpu-no-agpr" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V5: attributes #[[ATTR5]] = { "amdgpu-no-agpr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
;.
; V6: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; V6: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; V6: attributes #[[ATTR2]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V6: attributes #[[ATTR3]] = { "amdgpu-no-completion-action" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V6: attributes #[[ATTR4]] = { "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V6: attributes #[[ATTR5]] = { "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V6: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; V6: attributes #[[ATTR2]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V6: attributes #[[ATTR3]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V6: attributes #[[ATTR4]] = { "amdgpu-no-agpr" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V6: attributes #[[ATTR5]] = { "amdgpu-no-agpr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
;.
; V4: [[META0:![0-9]+]] = !{i32 1, !"amdhsa_code_object_version", i32 400}
;.
diff --git a/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll b/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll
index e015095a..ab160ff 100644
--- a/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll
+++ b/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll
@@ -92,7 +92,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc(<4 x i32> inreg %a, <4 x i32> %b
; DAGISEL-GFX11-NEXT: $vgpr5 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr6 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr7 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -122,7 +121,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc(<4 x i32> inreg %a, <4 x i32> %b
; DAGISEL-GFX10-NEXT: $vgpr5 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr6 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr7 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -234,7 +232,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_ptr(ptr inreg %a, ptr %b, ptr ad
; DAGISEL-GFX11-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -272,7 +269,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_ptr(ptr inreg %a, ptr %b, ptr ad
; DAGISEL-GFX10-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -404,7 +400,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_struct( {ptr, i32, <4 x i32>} in
; DAGISEL-GFX11-NEXT: $vgpr11 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr12 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr13 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF2:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -454,7 +449,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_struct( {ptr, i32, <4 x i32>} in
; DAGISEL-GFX10-NEXT: $vgpr11 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr12 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr13 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF2:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -506,7 +500,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_float(float inreg %a, float %b)
; DAGISEL-GFX11-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
; DAGISEL-GFX11-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -524,7 +517,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_float(float inreg %a, float %b)
; DAGISEL-GFX10-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -576,7 +568,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_half(half inreg %a, half %b) {
; DAGISEL-GFX11-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
; DAGISEL-GFX11-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -594,7 +585,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_half(half inreg %a, half %b) {
; DAGISEL-GFX10-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -646,7 +636,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_bfloat(bfloat inreg %a, bfloat %
; DAGISEL-GFX11-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
; DAGISEL-GFX11-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -664,7 +653,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_bfloat(bfloat inreg %a, bfloat %
; DAGISEL-GFX10-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -716,7 +704,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_i16(i16 inreg %a, i16 %b) {
; DAGISEL-GFX11-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
; DAGISEL-GFX11-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -734,7 +721,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_i16(i16 inreg %a, i16 %b) {
; DAGISEL-GFX10-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -870,7 +856,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_v16i16(<16 x i16> inreg %a, <16
; DAGISEL-GFX11-NEXT: $vgpr13 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr14 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr15 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -916,7 +901,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_v16i16(<16 x i16> inreg %a, <16
; DAGISEL-GFX10-NEXT: $vgpr13 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr14 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr15 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -2480,7 +2464,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_many_regs(<36 x i32> inreg %a, <128
; DAGISEL-GFX11-NEXT: $vgpr29 = COPY [[COPY134]]
; DAGISEL-GFX11-NEXT: $vgpr30 = COPY [[COPY133]]
; DAGISEL-GFX11-NEXT: $vgpr31 = COPY [[COPY132]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 528, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -2827,7 +2810,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_many_regs(<36 x i32> inreg %a, <128
; DAGISEL-GFX10-NEXT: $vgpr29 = COPY [[COPY134]]
; DAGISEL-GFX10-NEXT: $vgpr30 = COPY [[COPY133]]
; DAGISEL-GFX10-NEXT: $vgpr31 = COPY [[COPY132]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 528, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
diff --git a/llvm/test/CodeGen/AMDGPU/itofp.i128.ll b/llvm/test/CodeGen/AMDGPU/itofp.i128.ll
new file mode 100644
index 0000000..bfeb214
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/itofp.i128.ll
@@ -0,0 +1,1618 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GISEL %s
+
+define float @sitofp_i128_to_f32(i128 %x) {
+; SDAG-LABEL: sitofp_i128_to_f32:
+; SDAG: ; %bb.0: ; %itofp-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_or_b32_e32 v5, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v4, v0, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v4, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB0_14
+; SDAG-NEXT: ; %bb.1: ; %itofp-if-end
+; SDAG-NEXT: v_ashrrev_i32_e32 v5, 31, v3
+; SDAG-NEXT: v_xor_b32_e32 v0, v5, v0
+; SDAG-NEXT: v_xor_b32_e32 v1, v5, v1
+; SDAG-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v5
+; SDAG-NEXT: v_xor_b32_e32 v2, v5, v2
+; SDAG-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v5, vcc
+; SDAG-NEXT: v_xor_b32_e32 v6, v5, v3
+; SDAG-NEXT: v_subb_co_u32_e32 v4, vcc, v2, v5, vcc
+; SDAG-NEXT: v_subb_co_u32_e32 v5, vcc, v6, v5, vcc
+; SDAG-NEXT: v_ffbh_u32_e32 v2, v4
+; SDAG-NEXT: v_add_u32_e32 v2, 32, v2
+; SDAG-NEXT: v_ffbh_u32_e32 v6, v5
+; SDAG-NEXT: v_min_u32_e32 v2, v2, v6
+; SDAG-NEXT: v_ffbh_u32_e32 v6, v0
+; SDAG-NEXT: v_add_u32_e32 v6, 32, v6
+; SDAG-NEXT: v_ffbh_u32_e32 v7, v1
+; SDAG-NEXT: v_min_u32_e32 v6, v6, v7
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; SDAG-NEXT: v_add_u32_e32 v6, 64, v6
+; SDAG-NEXT: v_cndmask_b32_e32 v7, v6, v2, vcc
+; SDAG-NEXT: v_sub_u32_e32 v6, 0x80, v7
+; SDAG-NEXT: v_sub_u32_e32 v2, 0x7f, v7
+; SDAG-NEXT: v_cmp_gt_i32_e32 vcc, 25, v6
+; SDAG-NEXT: ; implicit-def: $vgpr8
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; SDAG-NEXT: ; %bb.2: ; %itofp-if-else
+; SDAG-NEXT: v_add_u32_e32 v4, 0xffffff98, v7
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v4, v[0:1]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v4
+; SDAG-NEXT: v_cndmask_b32_e32 v8, 0, v0, vcc
+; SDAG-NEXT: ; implicit-def: $vgpr6
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr7
+; SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; SDAG-NEXT: ; %bb.3: ; %Flow3
+; SDAG-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB0_13
+; SDAG-NEXT: ; %bb.4: ; %NodeBlock
+; SDAG-NEXT: v_cmp_lt_i32_e32 vcc, 25, v6
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB0_8
+; SDAG-NEXT: ; %bb.5: ; %LeafBlock
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 26, v6
+; SDAG-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB0_7
+; SDAG-NEXT: ; %bb.6: ; %itofp-sw-default
+; SDAG-NEXT: v_sub_u32_e32 v12, 0x66, v7
+; SDAG-NEXT: v_sub_u32_e32 v10, 64, v12
+; SDAG-NEXT: v_lshrrev_b64 v[8:9], v12, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[10:11], v10, v[4:5]
+; SDAG-NEXT: v_sub_u32_e32 v13, 38, v7
+; SDAG-NEXT: v_or_b32_e32 v11, v9, v11
+; SDAG-NEXT: v_or_b32_e32 v10, v8, v10
+; SDAG-NEXT: v_lshrrev_b64 v[8:9], v13, v[4:5]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v12
+; SDAG-NEXT: v_add_u32_e32 v14, 26, v7
+; SDAG-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v12
+; SDAG-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
+; SDAG-NEXT: v_lshrrev_b64 v[10:11], v13, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[12:13], v14, v[4:5]
+; SDAG-NEXT: v_subrev_u32_e32 v7, 38, v7
+; SDAG-NEXT: v_cndmask_b32_e64 v15, v8, v0, s[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[7:8], v7, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v9, v9, v1, s[4:5]
+; SDAG-NEXT: v_or_b32_e32 v11, v13, v11
+; SDAG-NEXT: v_or_b32_e32 v10, v12, v10
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v14
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v14, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e32 v8, v8, v11, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v14
+; SDAG-NEXT: v_cndmask_b32_e32 v7, v7, v10, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v5, v8, v5, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v4, v7, v4, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; SDAG-NEXT: v_or_b32_e32 v1, v1, v5
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v4
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v8, v15, v0
+; SDAG-NEXT: v_mov_b32_e32 v0, v8
+; SDAG-NEXT: v_mov_b32_e32 v1, v9
+; SDAG-NEXT: .LBB0_7: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
+; SDAG-NEXT: .LBB0_8: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.9: ; %itofp-sw-bb
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
+; SDAG-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: v_lshrrev_b32_e32 v4, 2, v0
+; SDAG-NEXT: v_and_or_b32 v0, v4, 1, v0
+; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; SDAG-NEXT: v_and_b32_e32 v4, 0x4000000, v0
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; SDAG-NEXT: v_alignbit_b32 v8, v1, v0, 2
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: ; %bb.11: ; %itofp-if-then20
+; SDAG-NEXT: v_alignbit_b32 v8, v1, v0, 3
+; SDAG-NEXT: v_mov_b32_e32 v2, v6
+; SDAG-NEXT: ; %bb.12: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB0_13: ; %Flow4
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_and_b32_e32 v0, 0x80000000, v3
+; SDAG-NEXT: v_lshl_add_u32 v1, v2, 23, 1.0
+; SDAG-NEXT: v_and_b32_e32 v2, 0x7fffff, v8
+; SDAG-NEXT: v_or3_b32 v4, v2, v0, v1
+; SDAG-NEXT: .LBB0_14: ; %Flow5
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v0, v4
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: sitofp_i128_to_f32:
+; GISEL: ; %bb.0: ; %itofp-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_or_b32_e32 v4, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v5, v1, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; GISEL-NEXT: s_mov_b32 s4, 0
+; GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB0_14
+; GISEL-NEXT: ; %bb.1: ; %itofp-if-end
+; GISEL-NEXT: v_ashrrev_i32_e32 v6, 31, v3
+; GISEL-NEXT: v_xor_b32_e32 v0, v6, v0
+; GISEL-NEXT: v_xor_b32_e32 v1, v6, v1
+; GISEL-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v6
+; GISEL-NEXT: v_xor_b32_e32 v2, v6, v2
+; GISEL-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v6, vcc
+; GISEL-NEXT: v_xor_b32_e32 v3, v6, v3
+; GISEL-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v6, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v0
+; GISEL-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v6, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v4, v1
+; GISEL-NEXT: v_add_u32_e32 v5, 32, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v7, v2
+; GISEL-NEXT: v_min_u32_e32 v4, v4, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v3
+; GISEL-NEXT: v_add_u32_e32 v7, 32, v7
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_add_u32_e32 v4, 64, v4
+; GISEL-NEXT: v_min_u32_e32 v5, v5, v7
+; GISEL-NEXT: v_cndmask_b32_e32 v5, v5, v4, vcc
+; GISEL-NEXT: v_sub_u32_e32 v8, 0x80, v5
+; GISEL-NEXT: v_sub_u32_e32 v7, 0x7f, v5
+; GISEL-NEXT: v_cmp_ge_i32_e32 vcc, 24, v8
+; GISEL-NEXT: ; implicit-def: $vgpr4
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GISEL-NEXT: ; %bb.2: ; %itofp-if-else
+; GISEL-NEXT: v_add_u32_e32 v2, 0xffffff98, v5
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v0, vcc
+; GISEL-NEXT: ; implicit-def: $vgpr8
+; GISEL-NEXT: ; implicit-def: $vgpr0
+; GISEL-NEXT: ; implicit-def: $vgpr5
+; GISEL-NEXT: ; implicit-def: $vgpr2
+; GISEL-NEXT: ; %bb.3: ; %Flow3
+; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB0_13
+; GISEL-NEXT: ; %bb.4: ; %NodeBlock
+; GISEL-NEXT: v_cmp_le_i32_e32 vcc, 26, v8
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB0_8
+; GISEL-NEXT: ; %bb.5: ; %LeafBlock
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 26, v8
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB0_7
+; GISEL-NEXT: ; %bb.6: ; %itofp-sw-default
+; GISEL-NEXT: v_sub_u32_e32 v4, 0x66, v5
+; GISEL-NEXT: v_sub_u32_e32 v11, 64, v4
+; GISEL-NEXT: v_lshrrev_b64 v[9:10], v4, v[0:1]
+; GISEL-NEXT: v_lshlrev_b64 v[11:12], v11, v[2:3]
+; GISEL-NEXT: v_subrev_u32_e32 v13, 64, v4
+; GISEL-NEXT: v_or_b32_e32 v11, v9, v11
+; GISEL-NEXT: v_or_b32_e32 v12, v10, v12
+; GISEL-NEXT: v_lshrrev_b64 v[9:10], v13, v[2:3]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v4
+; GISEL-NEXT: v_add_u32_e32 v5, 26, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v10, v10, v12, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GISEL-NEXT: v_sub_u32_e32 v11, 64, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v13, v9, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v10, v1, vcc
+; GISEL-NEXT: v_lshrrev_b64 v[9:10], v5, -1
+; GISEL-NEXT: v_lshlrev_b64 v[11:12], v11, -1
+; GISEL-NEXT: v_subrev_u32_e32 v14, 64, v5
+; GISEL-NEXT: v_or_b32_e32 v15, v9, v11
+; GISEL-NEXT: v_or_b32_e32 v16, v10, v12
+; GISEL-NEXT: v_lshrrev_b64 v[11:12], v14, -1
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v11, v11, v15, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v12, v12, v16, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v9, 0, v9, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v10, 0, v10, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v5, v11, -1, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v11, v12, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v2, v9, v2
+; GISEL-NEXT: v_and_b32_e32 v3, v10, v3
+; GISEL-NEXT: v_and_or_b32 v0, v5, v0, v2
+; GISEL-NEXT: v_and_or_b32 v1, v11, v1, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v3, v13, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GISEL-NEXT: v_mov_b32_e32 v1, v4
+; GISEL-NEXT: v_mov_b32_e32 v2, v5
+; GISEL-NEXT: v_mov_b32_e32 v3, v6
+; GISEL-NEXT: .LBB0_7: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: .LBB0_8: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; GISEL-NEXT: ; %bb.9: ; %itofp-sw-bb
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
+; GISEL-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: v_bfe_u32 v2, v0, 2, 1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GISEL-NEXT: v_and_b32_e32 v2, 0x4000000, v0
+; GISEL-NEXT: v_mov_b32_e32 v3, 0
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 2, v[0:1]
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: ; %bb.11: ; %itofp-if-then20
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 3, v[0:1]
+; GISEL-NEXT: v_mov_b32_e32 v7, v8
+; GISEL-NEXT: ; %bb.12: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB0_13: ; %Flow4
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_and_b32_e32 v0, 0x80000000, v6
+; GISEL-NEXT: v_lshl_add_u32 v1, v7, 23, 1.0
+; GISEL-NEXT: v_and_b32_e32 v2, 0x7fffff, v4
+; GISEL-NEXT: v_or3_b32 v4, v2, v0, v1
+; GISEL-NEXT: .LBB0_14: ; %Flow5
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: v_mov_b32_e32 v0, v4
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = sitofp i128 %x to float
+ ret float %cvt
+}
+
+define float @uitofp_i128_to_f32(i128 %x) {
+; SDAG-LABEL: uitofp_i128_to_f32:
+; SDAG: ; %bb.0: ; %itofp-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_or_b32_e32 v5, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v4, v0, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v4, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB1_14
+; SDAG-NEXT: ; %bb.1: ; %itofp-if-end
+; SDAG-NEXT: v_ffbh_u32_e32 v4, v2
+; SDAG-NEXT: v_add_u32_e32 v4, 32, v4
+; SDAG-NEXT: v_ffbh_u32_e32 v5, v3
+; SDAG-NEXT: v_min_u32_e32 v4, v4, v5
+; SDAG-NEXT: v_ffbh_u32_e32 v5, v0
+; SDAG-NEXT: v_add_u32_e32 v5, 32, v5
+; SDAG-NEXT: v_ffbh_u32_e32 v6, v1
+; SDAG-NEXT: v_min_u32_e32 v5, v5, v6
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; SDAG-NEXT: v_add_u32_e32 v5, 64, v5
+; SDAG-NEXT: v_cndmask_b32_e32 v6, v5, v4, vcc
+; SDAG-NEXT: v_sub_u32_e32 v5, 0x80, v6
+; SDAG-NEXT: v_sub_u32_e32 v4, 0x7f, v6
+; SDAG-NEXT: v_cmp_gt_i32_e32 vcc, 25, v5
+; SDAG-NEXT: ; implicit-def: $vgpr7
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; SDAG-NEXT: ; %bb.2: ; %itofp-if-else
+; SDAG-NEXT: v_add_u32_e32 v2, 0xffffff98, v6
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; SDAG-NEXT: v_cndmask_b32_e32 v7, 0, v0, vcc
+; SDAG-NEXT: ; implicit-def: $vgpr5
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr6
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: ; %bb.3: ; %Flow3
+; SDAG-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB1_13
+; SDAG-NEXT: ; %bb.4: ; %NodeBlock
+; SDAG-NEXT: v_cmp_lt_i32_e32 vcc, 25, v5
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB1_8
+; SDAG-NEXT: ; %bb.5: ; %LeafBlock
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 26, v5
+; SDAG-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB1_7
+; SDAG-NEXT: ; %bb.6: ; %itofp-sw-default
+; SDAG-NEXT: v_sub_u32_e32 v11, 0x66, v6
+; SDAG-NEXT: v_sub_u32_e32 v9, 64, v11
+; SDAG-NEXT: v_lshrrev_b64 v[7:8], v11, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[9:10], v9, v[2:3]
+; SDAG-NEXT: v_sub_u32_e32 v12, 38, v6
+; SDAG-NEXT: v_or_b32_e32 v10, v8, v10
+; SDAG-NEXT: v_or_b32_e32 v9, v7, v9
+; SDAG-NEXT: v_lshrrev_b64 v[7:8], v12, v[2:3]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v11
+; SDAG-NEXT: v_add_u32_e32 v13, 26, v6
+; SDAG-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v11
+; SDAG-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc
+; SDAG-NEXT: v_lshrrev_b64 v[9:10], v12, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[11:12], v13, v[2:3]
+; SDAG-NEXT: v_subrev_u32_e32 v6, 38, v6
+; SDAG-NEXT: v_cndmask_b32_e64 v14, v7, v0, s[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[6:7], v6, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v8, v1, s[4:5]
+; SDAG-NEXT: v_or_b32_e32 v10, v12, v10
+; SDAG-NEXT: v_or_b32_e32 v9, v11, v9
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v13
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v13, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e32 v7, v7, v10, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v13
+; SDAG-NEXT: v_cndmask_b32_e32 v6, v6, v9, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; SDAG-NEXT: v_or_b32_e32 v1, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v7, v14, v0
+; SDAG-NEXT: v_mov_b32_e32 v0, v7
+; SDAG-NEXT: v_mov_b32_e32 v1, v8
+; SDAG-NEXT: .LBB1_7: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
+; SDAG-NEXT: .LBB1_8: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.9: ; %itofp-sw-bb
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
+; SDAG-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: v_lshrrev_b32_e32 v2, 2, v0
+; SDAG-NEXT: v_and_or_b32 v0, v2, 1, v0
+; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; SDAG-NEXT: v_and_b32_e32 v2, 0x4000000, v0
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; SDAG-NEXT: v_alignbit_b32 v7, v1, v0, 2
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: ; %bb.11: ; %itofp-if-then20
+; SDAG-NEXT: v_alignbit_b32 v7, v1, v0, 3
+; SDAG-NEXT: v_mov_b32_e32 v4, v5
+; SDAG-NEXT: ; %bb.12: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB1_13: ; %Flow4
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_and_b32_e32 v0, 0x7fffff, v7
+; SDAG-NEXT: v_lshl_or_b32 v0, v4, 23, v0
+; SDAG-NEXT: v_add_u32_e32 v4, 1.0, v0
+; SDAG-NEXT: .LBB1_14: ; %Flow5
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v0, v4
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: uitofp_i128_to_f32:
+; GISEL: ; %bb.0: ; %itofp-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_or_b32_e32 v4, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v5, v1, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; GISEL-NEXT: s_mov_b32 s4, 0
+; GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB1_14
+; GISEL-NEXT: ; %bb.1: ; %itofp-if-end
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v0
+; GISEL-NEXT: v_ffbh_u32_e32 v4, v1
+; GISEL-NEXT: v_add_u32_e32 v5, 32, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v6, v2
+; GISEL-NEXT: v_min_u32_e32 v4, v4, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v3
+; GISEL-NEXT: v_add_u32_e32 v6, 32, v6
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_add_u32_e32 v4, 64, v4
+; GISEL-NEXT: v_min_u32_e32 v5, v5, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v5, v5, v4, vcc
+; GISEL-NEXT: v_sub_u32_e32 v7, 0x80, v5
+; GISEL-NEXT: v_sub_u32_e32 v6, 0x7f, v5
+; GISEL-NEXT: v_cmp_ge_i32_e32 vcc, 24, v7
+; GISEL-NEXT: ; implicit-def: $vgpr4
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GISEL-NEXT: ; %bb.2: ; %itofp-if-else
+; GISEL-NEXT: v_add_u32_e32 v2, 0xffffff98, v5
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v0, vcc
+; GISEL-NEXT: ; implicit-def: $vgpr7
+; GISEL-NEXT: ; implicit-def: $vgpr0
+; GISEL-NEXT: ; implicit-def: $vgpr5
+; GISEL-NEXT: ; implicit-def: $vgpr2
+; GISEL-NEXT: ; %bb.3: ; %Flow3
+; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB1_13
+; GISEL-NEXT: ; %bb.4: ; %NodeBlock
+; GISEL-NEXT: v_cmp_le_i32_e32 vcc, 26, v7
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB1_8
+; GISEL-NEXT: ; %bb.5: ; %LeafBlock
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 26, v7
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB1_7
+; GISEL-NEXT: ; %bb.6: ; %itofp-sw-default
+; GISEL-NEXT: v_sub_u32_e32 v4, 0x66, v5
+; GISEL-NEXT: v_sub_u32_e32 v10, 64, v4
+; GISEL-NEXT: v_lshrrev_b64 v[8:9], v4, v[0:1]
+; GISEL-NEXT: v_lshlrev_b64 v[10:11], v10, v[2:3]
+; GISEL-NEXT: v_subrev_u32_e32 v12, 64, v4
+; GISEL-NEXT: v_or_b32_e32 v10, v8, v10
+; GISEL-NEXT: v_or_b32_e32 v11, v9, v11
+; GISEL-NEXT: v_lshrrev_b64 v[8:9], v12, v[2:3]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v4
+; GISEL-NEXT: v_add_u32_e32 v5, 26, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GISEL-NEXT: v_sub_u32_e32 v10, 64, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v12, v8, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v9, v1, vcc
+; GISEL-NEXT: v_lshrrev_b64 v[8:9], v5, -1
+; GISEL-NEXT: v_lshlrev_b64 v[10:11], v10, -1
+; GISEL-NEXT: v_subrev_u32_e32 v13, 64, v5
+; GISEL-NEXT: v_or_b32_e32 v14, v8, v10
+; GISEL-NEXT: v_or_b32_e32 v15, v9, v11
+; GISEL-NEXT: v_lshrrev_b64 v[10:11], v13, -1
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v10, v10, v14, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v11, v11, v15, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v8, 0, v8, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v9, 0, v9, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v5, v10, -1, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v10, v11, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v2, v8, v2
+; GISEL-NEXT: v_and_b32_e32 v3, v9, v3
+; GISEL-NEXT: v_and_or_b32 v0, v5, v0, v2
+; GISEL-NEXT: v_and_or_b32 v1, v10, v1, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v3, v12, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GISEL-NEXT: v_mov_b32_e32 v1, v4
+; GISEL-NEXT: v_mov_b32_e32 v2, v5
+; GISEL-NEXT: v_mov_b32_e32 v3, v6
+; GISEL-NEXT: .LBB1_7: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: .LBB1_8: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; GISEL-NEXT: ; %bb.9: ; %itofp-sw-bb
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
+; GISEL-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: v_bfe_u32 v2, v0, 2, 1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GISEL-NEXT: v_and_b32_e32 v2, 0x4000000, v0
+; GISEL-NEXT: v_mov_b32_e32 v3, 0
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 2, v[0:1]
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: ; %bb.11: ; %itofp-if-then20
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 3, v[0:1]
+; GISEL-NEXT: v_mov_b32_e32 v6, v7
+; GISEL-NEXT: ; %bb.12: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB1_13: ; %Flow4
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_lshl_add_u32 v0, v6, 23, 1.0
+; GISEL-NEXT: v_mov_b32_e32 v1, 0x7fffff
+; GISEL-NEXT: v_and_or_b32 v4, v4, v1, v0
+; GISEL-NEXT: .LBB1_14: ; %Flow5
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: v_mov_b32_e32 v0, v4
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = uitofp i128 %x to float
+ ret float %cvt
+}
+
+define double @sitofp_i128_to_f64(i128 %x) {
+; SDAG-LABEL: sitofp_i128_to_f64:
+; SDAG: ; %bb.0: ; %itofp-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_mov_b32_e32 v5, v1
+; SDAG-NEXT: v_mov_b32_e32 v4, v0
+; SDAG-NEXT: v_or_b32_e32 v1, v5, v3
+; SDAG-NEXT: v_or_b32_e32 v0, v4, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; SDAG-NEXT: v_mov_b32_e32 v0, 0
+; SDAG-NEXT: v_mov_b32_e32 v1, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB2_14
+; SDAG-NEXT: ; %bb.1: ; %itofp-if-end
+; SDAG-NEXT: v_ashrrev_i32_e32 v0, 31, v3
+; SDAG-NEXT: v_xor_b32_e32 v4, v0, v4
+; SDAG-NEXT: v_xor_b32_e32 v5, v0, v5
+; SDAG-NEXT: v_sub_co_u32_e32 v4, vcc, v4, v0
+; SDAG-NEXT: v_xor_b32_e32 v2, v0, v2
+; SDAG-NEXT: v_subb_co_u32_e32 v5, vcc, v5, v0, vcc
+; SDAG-NEXT: v_xor_b32_e32 v1, v0, v3
+; SDAG-NEXT: v_subb_co_u32_e32 v6, vcc, v2, v0, vcc
+; SDAG-NEXT: v_subb_co_u32_e32 v7, vcc, v1, v0, vcc
+; SDAG-NEXT: v_ffbh_u32_e32 v0, v6
+; SDAG-NEXT: v_add_u32_e32 v0, 32, v0
+; SDAG-NEXT: v_ffbh_u32_e32 v1, v7
+; SDAG-NEXT: v_min_u32_e32 v0, v0, v1
+; SDAG-NEXT: v_ffbh_u32_e32 v1, v4
+; SDAG-NEXT: v_add_u32_e32 v1, 32, v1
+; SDAG-NEXT: v_ffbh_u32_e32 v2, v5
+; SDAG-NEXT: v_min_u32_e32 v1, v1, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; SDAG-NEXT: v_add_u32_e32 v1, 64, v1
+; SDAG-NEXT: v_cndmask_b32_e32 v9, v1, v0, vcc
+; SDAG-NEXT: v_sub_u32_e32 v8, 0x80, v9
+; SDAG-NEXT: v_sub_u32_e32 v2, 0x7f, v9
+; SDAG-NEXT: v_cmp_gt_i32_e32 vcc, 54, v8
+; SDAG-NEXT: ; implicit-def: $vgpr10
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; SDAG-NEXT: ; %bb.2: ; %itofp-if-else
+; SDAG-NEXT: v_add_u32_e32 v6, 0xffffffb5, v9
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v6, v[4:5]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6
+; SDAG-NEXT: v_cndmask_b32_e32 v10, 0, v1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; SDAG-NEXT: ; implicit-def: $vgpr8
+; SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
+; SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; SDAG-NEXT: ; implicit-def: $vgpr9
+; SDAG-NEXT: ; %bb.3: ; %Flow3
+; SDAG-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB2_13
+; SDAG-NEXT: ; %bb.4: ; %NodeBlock
+; SDAG-NEXT: v_cmp_lt_i32_e32 vcc, 54, v8
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB2_8
+; SDAG-NEXT: ; %bb.5: ; %LeafBlock
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 55, v8
+; SDAG-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB2_7
+; SDAG-NEXT: ; %bb.6: ; %itofp-sw-default
+; SDAG-NEXT: v_sub_u32_e32 v12, 0x49, v9
+; SDAG-NEXT: v_sub_u32_e32 v10, 64, v12
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v12, v[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[10:11], v10, v[6:7]
+; SDAG-NEXT: v_sub_u32_e32 v13, 9, v9
+; SDAG-NEXT: v_or_b32_e32 v11, v1, v11
+; SDAG-NEXT: v_or_b32_e32 v10, v0, v10
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v13, v[6:7]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v12
+; SDAG-NEXT: v_add_u32_e32 v16, 55, v9
+; SDAG-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v12
+; SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc
+; SDAG-NEXT: v_lshrrev_b64 v[10:11], v12, v[6:7]
+; SDAG-NEXT: v_lshrrev_b64 v[12:13], v13, v[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[14:15], v16, v[6:7]
+; SDAG-NEXT: v_add_u32_e32 v9, -9, v9
+; SDAG-NEXT: v_or_b32_e32 v15, v15, v13
+; SDAG-NEXT: v_or_b32_e32 v14, v14, v12
+; SDAG-NEXT: v_lshlrev_b64 v[12:13], v9, v[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v11, 0, v11, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v10, 0, v10, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v16
+; SDAG-NEXT: v_cndmask_b32_e64 v1, v1, v5, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v0, v0, v4, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v9, v13, v15, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v16
+; SDAG-NEXT: v_lshlrev_b64 v[4:5], v16, v[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v7, v9, v7, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v9, v12, v14, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v6, v9, v6, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
+; SDAG-NEXT: v_or_b32_e32 v5, v5, v7
+; SDAG-NEXT: v_or_b32_e32 v4, v4, v6
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v6, v10
+; SDAG-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v4
+; SDAG-NEXT: v_mov_b32_e32 v5, v1
+; SDAG-NEXT: v_mov_b32_e32 v4, v0
+; SDAG-NEXT: v_mov_b32_e32 v7, v11
+; SDAG-NEXT: .LBB2_7: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
+; SDAG-NEXT: .LBB2_8: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.9: ; %itofp-sw-bb
+; SDAG-NEXT: v_lshlrev_b64 v[6:7], 1, v[6:7]
+; SDAG-NEXT: v_lshrrev_b32_e32 v0, 31, v5
+; SDAG-NEXT: v_lshlrev_b64 v[4:5], 1, v[4:5]
+; SDAG-NEXT: v_or_b32_e32 v6, v6, v0
+; SDAG-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: v_lshrrev_b32_e32 v0, 2, v4
+; SDAG-NEXT: v_and_or_b32 v0, v0, 1, v4
+; SDAG-NEXT: v_add_co_u32_e32 v4, vcc, 1, v0
+; SDAG-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
+; SDAG-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], 2, v[4:5]
+; SDAG-NEXT: v_lshlrev_b32_e32 v7, 30, v6
+; SDAG-NEXT: v_or_b32_e32 v10, v1, v7
+; SDAG-NEXT: v_and_b32_e32 v1, 0x800000, v5
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: ; %bb.11: ; %itofp-if-then20
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], 3, v[4:5]
+; SDAG-NEXT: v_lshlrev_b32_e32 v2, 29, v6
+; SDAG-NEXT: v_or_b32_e32 v10, v1, v2
+; SDAG-NEXT: v_mov_b32_e32 v2, v8
+; SDAG-NEXT: ; %bb.12: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB2_13: ; %Flow4
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_and_b32_e32 v1, 0x80000000, v3
+; SDAG-NEXT: v_mov_b32_e32 v3, 0x3ff00000
+; SDAG-NEXT: v_lshl_add_u32 v2, v2, 20, v3
+; SDAG-NEXT: v_and_b32_e32 v3, 0xfffff, v10
+; SDAG-NEXT: v_or3_b32 v1, v3, v1, v2
+; SDAG-NEXT: .LBB2_14: ; %Flow5
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: sitofp_i128_to_f64:
+; GISEL: ; %bb.0: ; %itofp-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_mov_b32_e32 v4, v0
+; GISEL-NEXT: v_mov_b32_e32 v5, v1
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_or_b32_e32 v0, v4, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v5, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB2_14
+; GISEL-NEXT: ; %bb.1: ; %itofp-if-end
+; GISEL-NEXT: v_ashrrev_i32_e32 v6, 31, v3
+; GISEL-NEXT: v_xor_b32_e32 v0, v6, v4
+; GISEL-NEXT: v_xor_b32_e32 v1, v6, v5
+; GISEL-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v6
+; GISEL-NEXT: v_xor_b32_e32 v2, v6, v2
+; GISEL-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v6, vcc
+; GISEL-NEXT: v_xor_b32_e32 v3, v6, v3
+; GISEL-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v6, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v0
+; GISEL-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v6, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v4, v1
+; GISEL-NEXT: v_add_u32_e32 v5, 32, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v7, v2
+; GISEL-NEXT: v_min_u32_e32 v4, v4, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v3
+; GISEL-NEXT: v_add_u32_e32 v7, 32, v7
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_add_u32_e32 v4, 64, v4
+; GISEL-NEXT: v_min_u32_e32 v5, v5, v7
+; GISEL-NEXT: v_cndmask_b32_e32 v9, v5, v4, vcc
+; GISEL-NEXT: v_sub_u32_e32 v8, 0x80, v9
+; GISEL-NEXT: v_sub_u32_e32 v7, 0x7f, v9
+; GISEL-NEXT: v_cmp_ge_i32_e32 vcc, 53, v8
+; GISEL-NEXT: ; implicit-def: $vgpr10
+; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GISEL-NEXT: ; %bb.2: ; %itofp-if-else
+; GISEL-NEXT: v_add_u32_e32 v2, 0xffffffb5, v9
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v10, 0, v1, vcc
+; GISEL-NEXT: ; implicit-def: $vgpr8
+; GISEL-NEXT: ; implicit-def: $vgpr0
+; GISEL-NEXT: ; implicit-def: $vgpr9
+; GISEL-NEXT: ; %bb.3: ; %Flow3
+; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB2_13
+; GISEL-NEXT: ; %bb.4: ; %NodeBlock
+; GISEL-NEXT: v_cmp_le_i32_e32 vcc, 55, v8
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB2_8
+; GISEL-NEXT: ; %bb.5: ; %LeafBlock
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 55, v8
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB2_7
+; GISEL-NEXT: ; %bb.6: ; %itofp-sw-default
+; GISEL-NEXT: v_sub_u32_e32 v14, 0x49, v9
+; GISEL-NEXT: v_sub_u32_e32 v10, 64, v14
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], v14, v[0:1]
+; GISEL-NEXT: v_lshlrev_b64 v[10:11], v10, v[2:3]
+; GISEL-NEXT: v_subrev_u32_e32 v15, 64, v14
+; GISEL-NEXT: v_or_b32_e32 v10, v4, v10
+; GISEL-NEXT: v_or_b32_e32 v11, v5, v11
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], v15, v[2:3]
+; GISEL-NEXT: v_lshrrev_b64 v[12:13], v14, v[2:3]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v14
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v14
+; GISEL-NEXT: v_add_u32_e32 v14, 55, v9
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v5, v5, v11, vcc
+; GISEL-NEXT: v_sub_u32_e32 v11, 64, v14
+; GISEL-NEXT: v_cndmask_b32_e64 v13, v4, v0, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v4, v5, v1, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e32 v5, 0, v12, vcc
+; GISEL-NEXT: v_lshrrev_b64 v[9:10], v14, -1
+; GISEL-NEXT: v_lshlrev_b64 v[11:12], v11, -1
+; GISEL-NEXT: v_subrev_u32_e32 v15, 64, v14
+; GISEL-NEXT: v_or_b32_e32 v16, v9, v11
+; GISEL-NEXT: v_or_b32_e32 v17, v10, v12
+; GISEL-NEXT: v_lshrrev_b64 v[11:12], v15, -1
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v14
+; GISEL-NEXT: v_cndmask_b32_e32 v11, v11, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v12, v12, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v14
+; GISEL-NEXT: v_cndmask_b32_e32 v9, 0, v9, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v10, 0, v10, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v11, v11, -1, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v12, v12, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v2, v9, v2
+; GISEL-NEXT: v_and_b32_e32 v3, v10, v3
+; GISEL-NEXT: v_and_or_b32 v0, v11, v0, v2
+; GISEL-NEXT: v_and_or_b32 v1, v12, v1, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v3, v13, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GISEL-NEXT: v_mov_b32_e32 v1, v4
+; GISEL-NEXT: v_mov_b32_e32 v2, v5
+; GISEL-NEXT: v_mov_b32_e32 v3, v6
+; GISEL-NEXT: .LBB2_7: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: .LBB2_8: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; GISEL-NEXT: ; %bb.9: ; %itofp-sw-bb
+; GISEL-NEXT: v_lshlrev_b64 v[9:10], 1, v[0:1]
+; GISEL-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
+; GISEL-NEXT: v_lshrrev_b32_e32 v0, 31, v1
+; GISEL-NEXT: v_or_b32_e32 v11, v2, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v9
+; GISEL-NEXT: v_mov_b32_e32 v1, v10
+; GISEL-NEXT: v_mov_b32_e32 v2, v11
+; GISEL-NEXT: v_mov_b32_e32 v3, v12
+; GISEL-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: v_bfe_u32 v3, v0, 2, 1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v3
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GISEL-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 2, v[0:1]
+; GISEL-NEXT: v_mov_b32_e32 v9, 0
+; GISEL-NEXT: v_and_b32_e32 v10, 0x800000, v1
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[9:10]
+; GISEL-NEXT: v_lshl_or_b32 v10, v2, 30, v5
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: ; %bb.11: ; %itofp-if-then20
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 3, v[0:1]
+; GISEL-NEXT: v_mov_b32_e32 v7, v8
+; GISEL-NEXT: v_lshl_or_b32 v10, v2, 29, v5
+; GISEL-NEXT: ; %bb.12: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB2_13: ; %Flow4
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_and_b32_e32 v0, 0x80000000, v6
+; GISEL-NEXT: v_mov_b32_e32 v1, 0x3ff00000
+; GISEL-NEXT: v_mov_b32_e32 v2, 0xfffff
+; GISEL-NEXT: v_lshl_add_u32 v1, v7, 20, v1
+; GISEL-NEXT: v_and_or_b32 v2, v10, v2, v0
+; GISEL-NEXT: v_and_or_b32 v0, v4, -1, 0
+; GISEL-NEXT: v_or3_b32 v1, v2, v1, 0
+; GISEL-NEXT: .LBB2_14: ; %Flow5
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = sitofp i128 %x to double
+ ret double %cvt
+}
+
+define double @uitofp_i128_to_f64(i128 %x) {
+; SDAG-LABEL: uitofp_i128_to_f64:
+; SDAG: ; %bb.0: ; %itofp-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_or_b32_e32 v5, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v4, v0, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v4, 0
+; SDAG-NEXT: v_mov_b32_e32 v5, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB3_14
+; SDAG-NEXT: ; %bb.1: ; %itofp-if-end
+; SDAG-NEXT: v_ffbh_u32_e32 v4, v2
+; SDAG-NEXT: v_add_u32_e32 v4, 32, v4
+; SDAG-NEXT: v_ffbh_u32_e32 v5, v3
+; SDAG-NEXT: v_min_u32_e32 v4, v4, v5
+; SDAG-NEXT: v_ffbh_u32_e32 v5, v0
+; SDAG-NEXT: v_add_u32_e32 v5, 32, v5
+; SDAG-NEXT: v_ffbh_u32_e32 v6, v1
+; SDAG-NEXT: v_min_u32_e32 v5, v5, v6
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; SDAG-NEXT: v_add_u32_e32 v5, 64, v5
+; SDAG-NEXT: v_cndmask_b32_e32 v8, v5, v4, vcc
+; SDAG-NEXT: v_sub_u32_e32 v7, 0x80, v8
+; SDAG-NEXT: v_sub_u32_e32 v6, 0x7f, v8
+; SDAG-NEXT: v_cmp_gt_i32_e32 vcc, 54, v7
+; SDAG-NEXT: ; implicit-def: $vgpr9
+; SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; SDAG-NEXT: ; %bb.2: ; %itofp-if-else
+; SDAG-NEXT: v_add_u32_e32 v2, 0xffffffb5, v8
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; SDAG-NEXT: v_cndmask_b32_e32 v9, 0, v1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v4, 0, v0, vcc
+; SDAG-NEXT: ; implicit-def: $vgpr7
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr8
+; SDAG-NEXT: ; %bb.3: ; %Flow3
+; SDAG-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB3_13
+; SDAG-NEXT: ; %bb.4: ; %NodeBlock
+; SDAG-NEXT: v_cmp_lt_i32_e32 vcc, 54, v7
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB3_8
+; SDAG-NEXT: ; %bb.5: ; %LeafBlock
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 55, v7
+; SDAG-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB3_7
+; SDAG-NEXT: ; %bb.6: ; %itofp-sw-default
+; SDAG-NEXT: v_sub_u32_e32 v11, 0x49, v8
+; SDAG-NEXT: v_sub_u32_e32 v9, 64, v11
+; SDAG-NEXT: v_lshrrev_b64 v[4:5], v11, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[9:10], v9, v[2:3]
+; SDAG-NEXT: v_sub_u32_e32 v12, 9, v8
+; SDAG-NEXT: v_or_b32_e32 v10, v5, v10
+; SDAG-NEXT: v_or_b32_e32 v9, v4, v9
+; SDAG-NEXT: v_lshrrev_b64 v[4:5], v12, v[2:3]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v11
+; SDAG-NEXT: v_add_u32_e32 v15, 55, v8
+; SDAG-NEXT: v_cndmask_b32_e32 v5, v5, v10, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v11
+; SDAG-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc
+; SDAG-NEXT: v_lshrrev_b64 v[9:10], v11, v[2:3]
+; SDAG-NEXT: v_lshrrev_b64 v[11:12], v12, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[13:14], v15, v[2:3]
+; SDAG-NEXT: v_add_u32_e32 v8, -9, v8
+; SDAG-NEXT: v_or_b32_e32 v14, v14, v12
+; SDAG-NEXT: v_or_b32_e32 v13, v13, v11
+; SDAG-NEXT: v_lshlrev_b64 v[11:12], v8, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e32 v10, 0, v10, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v9, 0, v9, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v15
+; SDAG-NEXT: v_cndmask_b32_e64 v5, v5, v1, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v4, v4, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v8, v12, v14, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v15
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v15, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v3, v8, v3, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v8, v11, v13, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v8, v2, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; SDAG-NEXT: v_or_b32_e32 v1, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; SDAG-NEXT: v_mov_b32_e32 v2, v9
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v4, v4, v0
+; SDAG-NEXT: v_mov_b32_e32 v0, v4
+; SDAG-NEXT: v_mov_b32_e32 v1, v5
+; SDAG-NEXT: v_mov_b32_e32 v3, v10
+; SDAG-NEXT: .LBB3_7: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
+; SDAG-NEXT: .LBB3_8: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.9: ; %itofp-sw-bb
+; SDAG-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
+; SDAG-NEXT: v_lshrrev_b32_e32 v3, 31, v1
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
+; SDAG-NEXT: v_or_b32_e32 v2, v2, v3
+; SDAG-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: v_lshrrev_b32_e32 v3, 2, v0
+; SDAG-NEXT: v_and_or_b32 v0, v3, 1, v0
+; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
+; SDAG-NEXT: v_lshrrev_b64 v[4:5], 2, v[0:1]
+; SDAG-NEXT: v_and_b32_e32 v3, 0x800000, v1
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
+; SDAG-NEXT: v_alignbit_b32 v9, v2, v1, 2
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: ; %bb.11: ; %itofp-if-then20
+; SDAG-NEXT: v_lshrrev_b64 v[4:5], 3, v[0:1]
+; SDAG-NEXT: v_alignbit_b32 v9, v2, v1, 3
+; SDAG-NEXT: v_mov_b32_e32 v6, v7
+; SDAG-NEXT: ; %bb.12: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB3_13: ; %Flow4
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_and_b32_e32 v0, 0xfffff, v9
+; SDAG-NEXT: v_lshl_or_b32 v0, v6, 20, v0
+; SDAG-NEXT: v_add_u32_e32 v5, 0x3ff00000, v0
+; SDAG-NEXT: .LBB3_14: ; %Flow5
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v0, v4
+; SDAG-NEXT: v_mov_b32_e32 v1, v5
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: uitofp_i128_to_f64:
+; GISEL: ; %bb.0: ; %itofp-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_or_b32_e32 v4, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v5, v1, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GISEL-NEXT: v_mov_b32_e32 v5, s5
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB3_14
+; GISEL-NEXT: ; %bb.1: ; %itofp-if-end
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v0
+; GISEL-NEXT: v_ffbh_u32_e32 v4, v1
+; GISEL-NEXT: v_add_u32_e32 v5, 32, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v6, v2
+; GISEL-NEXT: v_min_u32_e32 v4, v4, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v3
+; GISEL-NEXT: v_add_u32_e32 v6, 32, v6
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_add_u32_e32 v4, 64, v4
+; GISEL-NEXT: v_min_u32_e32 v5, v5, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v8, v5, v4, vcc
+; GISEL-NEXT: v_sub_u32_e32 v7, 0x80, v8
+; GISEL-NEXT: v_sub_u32_e32 v6, 0x7f, v8
+; GISEL-NEXT: v_cmp_ge_i32_e32 vcc, 53, v7
+; GISEL-NEXT: ; implicit-def: $vgpr9
+; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GISEL-NEXT: ; %bb.2: ; %itofp-if-else
+; GISEL-NEXT: v_add_u32_e32 v2, 0xffffffb5, v8
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v9, 0, v1, vcc
+; GISEL-NEXT: ; implicit-def: $vgpr7
+; GISEL-NEXT: ; implicit-def: $vgpr0
+; GISEL-NEXT: ; implicit-def: $vgpr8
+; GISEL-NEXT: ; %bb.3: ; %Flow3
+; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB3_13
+; GISEL-NEXT: ; %bb.4: ; %NodeBlock
+; GISEL-NEXT: v_cmp_le_i32_e32 vcc, 55, v7
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB3_8
+; GISEL-NEXT: ; %bb.5: ; %LeafBlock
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 55, v7
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB3_7
+; GISEL-NEXT: ; %bb.6: ; %itofp-sw-default
+; GISEL-NEXT: v_sub_u32_e32 v13, 0x49, v8
+; GISEL-NEXT: v_sub_u32_e32 v9, 64, v13
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], v13, v[0:1]
+; GISEL-NEXT: v_lshlrev_b64 v[9:10], v9, v[2:3]
+; GISEL-NEXT: v_subrev_u32_e32 v14, 64, v13
+; GISEL-NEXT: v_lshrrev_b64 v[11:12], v13, v[2:3]
+; GISEL-NEXT: v_or_b32_e32 v9, v4, v9
+; GISEL-NEXT: v_or_b32_e32 v10, v5, v10
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], v14, v[2:3]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v13
+; GISEL-NEXT: v_add_u32_e32 v8, 55, v8
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v5, v5, v10, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v13
+; GISEL-NEXT: v_cndmask_b32_e32 v10, 0, v11, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v12, vcc
+; GISEL-NEXT: v_sub_u32_e32 v12, 64, v8
+; GISEL-NEXT: v_cndmask_b32_e64 v14, v4, v0, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v9, v5, v1, s[4:5]
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], v8, -1
+; GISEL-NEXT: v_lshlrev_b64 v[12:13], v12, -1
+; GISEL-NEXT: v_subrev_u32_e32 v15, 64, v8
+; GISEL-NEXT: v_or_b32_e32 v16, v4, v12
+; GISEL-NEXT: v_or_b32_e32 v17, v5, v13
+; GISEL-NEXT: v_lshrrev_b64 v[12:13], v15, -1
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v8
+; GISEL-NEXT: v_cndmask_b32_e32 v12, v12, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v13, v13, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v8
+; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v8, v12, -1, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v12, v13, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v2, v4, v2
+; GISEL-NEXT: v_and_b32_e32 v3, v5, v3
+; GISEL-NEXT: v_and_or_b32 v0, v8, v0, v2
+; GISEL-NEXT: v_and_or_b32 v1, v12, v1, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v8, v14, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v8
+; GISEL-NEXT: v_mov_b32_e32 v1, v9
+; GISEL-NEXT: v_mov_b32_e32 v2, v10
+; GISEL-NEXT: v_mov_b32_e32 v3, v11
+; GISEL-NEXT: .LBB3_7: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: .LBB3_8: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; GISEL-NEXT: ; %bb.9: ; %itofp-sw-bb
+; GISEL-NEXT: v_lshlrev_b64 v[8:9], 1, v[0:1]
+; GISEL-NEXT: v_lshlrev_b64 v[10:11], 1, v[2:3]
+; GISEL-NEXT: v_lshrrev_b32_e32 v0, 31, v1
+; GISEL-NEXT: v_or_b32_e32 v10, v10, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v8
+; GISEL-NEXT: v_mov_b32_e32 v1, v9
+; GISEL-NEXT: v_mov_b32_e32 v2, v10
+; GISEL-NEXT: v_mov_b32_e32 v3, v11
+; GISEL-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: v_bfe_u32 v4, v0, 2, 1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v4
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GISEL-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
+; GISEL-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GISEL-NEXT: v_mov_b32_e32 v8, 0
+; GISEL-NEXT: v_and_b32_e32 v9, 0x800000, v1
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 2, v[0:1]
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GISEL-NEXT: v_lshlrev_b64 v[8:9], 30, v[2:3]
+; GISEL-NEXT: v_lshrrev_b32_e32 v5, 2, v1
+; GISEL-NEXT: v_or_b32_e32 v9, v5, v8
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: ; %bb.11: ; %itofp-if-then20
+; GISEL-NEXT: v_lshlrev_b64 v[2:3], 29, v[2:3]
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 3, v[0:1]
+; GISEL-NEXT: v_lshrrev_b32_e32 v0, 3, v1
+; GISEL-NEXT: v_or_b32_e32 v9, v0, v2
+; GISEL-NEXT: v_mov_b32_e32 v6, v7
+; GISEL-NEXT: ; %bb.12: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB3_13: ; %Flow4
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_mov_b32_e32 v0, 0x3ff00000
+; GISEL-NEXT: v_lshl_add_u32 v0, v6, 20, v0
+; GISEL-NEXT: v_and_b32_e32 v1, 0xfffff, v9
+; GISEL-NEXT: v_and_or_b32 v4, v4, -1, 0
+; GISEL-NEXT: v_or3_b32 v5, v1, v0, 0
+; GISEL-NEXT: .LBB3_14: ; %Flow5
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: v_mov_b32_e32 v0, v4
+; GISEL-NEXT: v_mov_b32_e32 v1, v5
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = uitofp i128 %x to double
+ ret double %cvt
+}
+
+define half @sitofp_i128_to_f16(i128 %x) {
+; SDAG-LABEL: sitofp_i128_to_f16:
+; SDAG: ; %bb.0: ; %itofp-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_or_b32_e32 v5, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v4, v0, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v4, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB4_14
+; SDAG-NEXT: ; %bb.1: ; %itofp-if-end
+; SDAG-NEXT: v_ashrrev_i32_e32 v5, 31, v3
+; SDAG-NEXT: v_xor_b32_e32 v0, v5, v0
+; SDAG-NEXT: v_xor_b32_e32 v1, v5, v1
+; SDAG-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v5
+; SDAG-NEXT: v_xor_b32_e32 v2, v5, v2
+; SDAG-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v5, vcc
+; SDAG-NEXT: v_xor_b32_e32 v6, v5, v3
+; SDAG-NEXT: v_subb_co_u32_e32 v4, vcc, v2, v5, vcc
+; SDAG-NEXT: v_subb_co_u32_e32 v5, vcc, v6, v5, vcc
+; SDAG-NEXT: v_ffbh_u32_e32 v2, v4
+; SDAG-NEXT: v_add_u32_e32 v2, 32, v2
+; SDAG-NEXT: v_ffbh_u32_e32 v6, v5
+; SDAG-NEXT: v_min_u32_e32 v2, v2, v6
+; SDAG-NEXT: v_ffbh_u32_e32 v6, v0
+; SDAG-NEXT: v_add_u32_e32 v6, 32, v6
+; SDAG-NEXT: v_ffbh_u32_e32 v7, v1
+; SDAG-NEXT: v_min_u32_e32 v6, v6, v7
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; SDAG-NEXT: v_add_u32_e32 v6, 64, v6
+; SDAG-NEXT: v_cndmask_b32_e32 v7, v6, v2, vcc
+; SDAG-NEXT: v_sub_u32_e32 v6, 0x80, v7
+; SDAG-NEXT: v_sub_u32_e32 v2, 0x7f, v7
+; SDAG-NEXT: v_cmp_gt_i32_e32 vcc, 25, v6
+; SDAG-NEXT: ; implicit-def: $vgpr8
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; SDAG-NEXT: ; %bb.2: ; %itofp-if-else
+; SDAG-NEXT: v_add_u32_e32 v4, 0xffffff98, v7
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v4, v[0:1]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v4
+; SDAG-NEXT: v_cndmask_b32_e32 v8, 0, v0, vcc
+; SDAG-NEXT: ; implicit-def: $vgpr6
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr7
+; SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; SDAG-NEXT: ; %bb.3: ; %Flow3
+; SDAG-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB4_13
+; SDAG-NEXT: ; %bb.4: ; %NodeBlock
+; SDAG-NEXT: v_cmp_lt_i32_e32 vcc, 25, v6
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB4_8
+; SDAG-NEXT: ; %bb.5: ; %LeafBlock
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 26, v6
+; SDAG-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB4_7
+; SDAG-NEXT: ; %bb.6: ; %itofp-sw-default
+; SDAG-NEXT: v_sub_u32_e32 v12, 0x66, v7
+; SDAG-NEXT: v_sub_u32_e32 v10, 64, v12
+; SDAG-NEXT: v_lshrrev_b64 v[8:9], v12, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[10:11], v10, v[4:5]
+; SDAG-NEXT: v_sub_u32_e32 v13, 38, v7
+; SDAG-NEXT: v_or_b32_e32 v11, v9, v11
+; SDAG-NEXT: v_or_b32_e32 v10, v8, v10
+; SDAG-NEXT: v_lshrrev_b64 v[8:9], v13, v[4:5]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v12
+; SDAG-NEXT: v_add_u32_e32 v14, 26, v7
+; SDAG-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v12
+; SDAG-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
+; SDAG-NEXT: v_lshrrev_b64 v[10:11], v13, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[12:13], v14, v[4:5]
+; SDAG-NEXT: v_subrev_u32_e32 v7, 38, v7
+; SDAG-NEXT: v_cndmask_b32_e64 v15, v8, v0, s[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[7:8], v7, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v9, v9, v1, s[4:5]
+; SDAG-NEXT: v_or_b32_e32 v11, v13, v11
+; SDAG-NEXT: v_or_b32_e32 v10, v12, v10
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v14
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v14, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e32 v8, v8, v11, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v14
+; SDAG-NEXT: v_cndmask_b32_e32 v7, v7, v10, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v5, v8, v5, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v4, v7, v4, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; SDAG-NEXT: v_or_b32_e32 v1, v1, v5
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v4
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v8, v15, v0
+; SDAG-NEXT: v_mov_b32_e32 v0, v8
+; SDAG-NEXT: v_mov_b32_e32 v1, v9
+; SDAG-NEXT: .LBB4_7: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
+; SDAG-NEXT: .LBB4_8: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.9: ; %itofp-sw-bb
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
+; SDAG-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: v_lshrrev_b32_e32 v4, 2, v0
+; SDAG-NEXT: v_and_or_b32 v0, v4, 1, v0
+; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; SDAG-NEXT: v_and_b32_e32 v4, 0x4000000, v0
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; SDAG-NEXT: v_alignbit_b32 v8, v1, v0, 2
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: ; %bb.11: ; %itofp-if-then20
+; SDAG-NEXT: v_alignbit_b32 v8, v1, v0, 3
+; SDAG-NEXT: v_mov_b32_e32 v2, v6
+; SDAG-NEXT: ; %bb.12: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB4_13: ; %Flow4
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_and_b32_e32 v0, 0x80000000, v3
+; SDAG-NEXT: v_lshl_add_u32 v1, v2, 23, 1.0
+; SDAG-NEXT: v_and_b32_e32 v2, 0x7fffff, v8
+; SDAG-NEXT: v_or3_b32 v0, v2, v0, v1
+; SDAG-NEXT: v_cvt_f16_f32_e32 v4, v0
+; SDAG-NEXT: .LBB4_14: ; %Flow5
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v0, v4
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: sitofp_i128_to_f16:
+; GISEL: ; %bb.0: ; %itofp-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_or_b32_e32 v4, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v5, v1, v3
+; GISEL-NEXT: s_mov_b32 s4, 0
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB4_14
+; GISEL-NEXT: ; %bb.1: ; %itofp-if-end
+; GISEL-NEXT: v_ashrrev_i32_e32 v6, 31, v3
+; GISEL-NEXT: v_xor_b32_e32 v0, v6, v0
+; GISEL-NEXT: v_xor_b32_e32 v1, v6, v1
+; GISEL-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v6
+; GISEL-NEXT: v_xor_b32_e32 v2, v6, v2
+; GISEL-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v6, vcc
+; GISEL-NEXT: v_xor_b32_e32 v3, v6, v3
+; GISEL-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v6, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v0
+; GISEL-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v6, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v4, v1
+; GISEL-NEXT: v_add_u32_e32 v5, 32, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v7, v2
+; GISEL-NEXT: v_min_u32_e32 v4, v4, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v3
+; GISEL-NEXT: v_add_u32_e32 v7, 32, v7
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_add_u32_e32 v4, 64, v4
+; GISEL-NEXT: v_min_u32_e32 v5, v5, v7
+; GISEL-NEXT: v_cndmask_b32_e32 v5, v5, v4, vcc
+; GISEL-NEXT: v_sub_u32_e32 v8, 0x80, v5
+; GISEL-NEXT: v_sub_u32_e32 v7, 0x7f, v5
+; GISEL-NEXT: v_cmp_ge_i32_e32 vcc, 24, v8
+; GISEL-NEXT: ; implicit-def: $vgpr4
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GISEL-NEXT: ; %bb.2: ; %itofp-if-else
+; GISEL-NEXT: v_add_u32_e32 v2, 0xffffff98, v5
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v0, vcc
+; GISEL-NEXT: ; implicit-def: $vgpr8
+; GISEL-NEXT: ; implicit-def: $vgpr0
+; GISEL-NEXT: ; implicit-def: $vgpr5
+; GISEL-NEXT: ; implicit-def: $vgpr2
+; GISEL-NEXT: ; %bb.3: ; %Flow3
+; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB4_13
+; GISEL-NEXT: ; %bb.4: ; %NodeBlock
+; GISEL-NEXT: v_cmp_le_i32_e32 vcc, 26, v8
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB4_8
+; GISEL-NEXT: ; %bb.5: ; %LeafBlock
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 26, v8
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB4_7
+; GISEL-NEXT: ; %bb.6: ; %itofp-sw-default
+; GISEL-NEXT: v_sub_u32_e32 v4, 0x66, v5
+; GISEL-NEXT: v_sub_u32_e32 v11, 64, v4
+; GISEL-NEXT: v_lshrrev_b64 v[9:10], v4, v[0:1]
+; GISEL-NEXT: v_lshlrev_b64 v[11:12], v11, v[2:3]
+; GISEL-NEXT: v_subrev_u32_e32 v13, 64, v4
+; GISEL-NEXT: v_or_b32_e32 v11, v9, v11
+; GISEL-NEXT: v_or_b32_e32 v12, v10, v12
+; GISEL-NEXT: v_lshrrev_b64 v[9:10], v13, v[2:3]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v4
+; GISEL-NEXT: v_add_u32_e32 v5, 26, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v10, v10, v12, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GISEL-NEXT: v_sub_u32_e32 v11, 64, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v13, v9, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v10, v1, vcc
+; GISEL-NEXT: v_lshrrev_b64 v[9:10], v5, -1
+; GISEL-NEXT: v_lshlrev_b64 v[11:12], v11, -1
+; GISEL-NEXT: v_subrev_u32_e32 v14, 64, v5
+; GISEL-NEXT: v_or_b32_e32 v15, v9, v11
+; GISEL-NEXT: v_or_b32_e32 v16, v10, v12
+; GISEL-NEXT: v_lshrrev_b64 v[11:12], v14, -1
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v11, v11, v15, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v12, v12, v16, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v9, 0, v9, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v10, 0, v10, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v5, v11, -1, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v11, v12, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v2, v9, v2
+; GISEL-NEXT: v_and_b32_e32 v3, v10, v3
+; GISEL-NEXT: v_and_or_b32 v0, v5, v0, v2
+; GISEL-NEXT: v_and_or_b32 v1, v11, v1, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v3, v13, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GISEL-NEXT: v_mov_b32_e32 v1, v4
+; GISEL-NEXT: v_mov_b32_e32 v2, v5
+; GISEL-NEXT: v_mov_b32_e32 v3, v6
+; GISEL-NEXT: .LBB4_7: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: .LBB4_8: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; GISEL-NEXT: ; %bb.9: ; %itofp-sw-bb
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
+; GISEL-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: v_bfe_u32 v2, v0, 2, 1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GISEL-NEXT: v_and_b32_e32 v2, 0x4000000, v0
+; GISEL-NEXT: v_mov_b32_e32 v3, 0
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 2, v[0:1]
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: ; %bb.11: ; %itofp-if-then20
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 3, v[0:1]
+; GISEL-NEXT: v_mov_b32_e32 v7, v8
+; GISEL-NEXT: ; %bb.12: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB4_13: ; %Flow4
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_and_b32_e32 v0, 0x80000000, v6
+; GISEL-NEXT: v_lshl_add_u32 v1, v7, 23, 1.0
+; GISEL-NEXT: v_and_b32_e32 v2, 0x7fffff, v4
+; GISEL-NEXT: v_or3_b32 v0, v2, v0, v1
+; GISEL-NEXT: v_cvt_f16_f32_e32 v4, v0
+; GISEL-NEXT: .LBB4_14: ; %Flow5
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: v_mov_b32_e32 v0, v4
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = sitofp i128 %x to half
+ ret half %cvt
+}
+
+define half @uitofp_i128_to_f16(i128 %x) {
+; SDAG-LABEL: uitofp_i128_to_f16:
+; SDAG: ; %bb.0: ; %itofp-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_or_b32_e32 v5, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v4, v0, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v4, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB5_14
+; SDAG-NEXT: ; %bb.1: ; %itofp-if-end
+; SDAG-NEXT: v_ffbh_u32_e32 v4, v2
+; SDAG-NEXT: v_add_u32_e32 v4, 32, v4
+; SDAG-NEXT: v_ffbh_u32_e32 v5, v3
+; SDAG-NEXT: v_min_u32_e32 v4, v4, v5
+; SDAG-NEXT: v_ffbh_u32_e32 v5, v0
+; SDAG-NEXT: v_add_u32_e32 v5, 32, v5
+; SDAG-NEXT: v_ffbh_u32_e32 v6, v1
+; SDAG-NEXT: v_min_u32_e32 v5, v5, v6
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; SDAG-NEXT: v_add_u32_e32 v5, 64, v5
+; SDAG-NEXT: v_cndmask_b32_e32 v6, v5, v4, vcc
+; SDAG-NEXT: v_sub_u32_e32 v5, 0x80, v6
+; SDAG-NEXT: v_sub_u32_e32 v4, 0x7f, v6
+; SDAG-NEXT: v_cmp_gt_i32_e32 vcc, 25, v5
+; SDAG-NEXT: ; implicit-def: $vgpr7
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; SDAG-NEXT: ; %bb.2: ; %itofp-if-else
+; SDAG-NEXT: v_add_u32_e32 v2, 0xffffff98, v6
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; SDAG-NEXT: v_cndmask_b32_e32 v7, 0, v0, vcc
+; SDAG-NEXT: ; implicit-def: $vgpr5
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr6
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: ; %bb.3: ; %Flow3
+; SDAG-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB5_13
+; SDAG-NEXT: ; %bb.4: ; %NodeBlock
+; SDAG-NEXT: v_cmp_lt_i32_e32 vcc, 25, v5
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB5_8
+; SDAG-NEXT: ; %bb.5: ; %LeafBlock
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 26, v5
+; SDAG-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB5_7
+; SDAG-NEXT: ; %bb.6: ; %itofp-sw-default
+; SDAG-NEXT: v_sub_u32_e32 v11, 0x66, v6
+; SDAG-NEXT: v_sub_u32_e32 v9, 64, v11
+; SDAG-NEXT: v_lshrrev_b64 v[7:8], v11, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[9:10], v9, v[2:3]
+; SDAG-NEXT: v_sub_u32_e32 v12, 38, v6
+; SDAG-NEXT: v_or_b32_e32 v10, v8, v10
+; SDAG-NEXT: v_or_b32_e32 v9, v7, v9
+; SDAG-NEXT: v_lshrrev_b64 v[7:8], v12, v[2:3]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v11
+; SDAG-NEXT: v_add_u32_e32 v13, 26, v6
+; SDAG-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v11
+; SDAG-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc
+; SDAG-NEXT: v_lshrrev_b64 v[9:10], v12, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[11:12], v13, v[2:3]
+; SDAG-NEXT: v_subrev_u32_e32 v6, 38, v6
+; SDAG-NEXT: v_cndmask_b32_e64 v14, v7, v0, s[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[6:7], v6, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v8, v1, s[4:5]
+; SDAG-NEXT: v_or_b32_e32 v10, v12, v10
+; SDAG-NEXT: v_or_b32_e32 v9, v11, v9
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v13
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v13, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e32 v7, v7, v10, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v13
+; SDAG-NEXT: v_cndmask_b32_e32 v6, v6, v9, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; SDAG-NEXT: v_or_b32_e32 v1, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v7, v14, v0
+; SDAG-NEXT: v_mov_b32_e32 v0, v7
+; SDAG-NEXT: v_mov_b32_e32 v1, v8
+; SDAG-NEXT: .LBB5_7: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
+; SDAG-NEXT: .LBB5_8: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.9: ; %itofp-sw-bb
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
+; SDAG-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: v_lshrrev_b32_e32 v2, 2, v0
+; SDAG-NEXT: v_and_or_b32 v0, v2, 1, v0
+; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; SDAG-NEXT: v_and_b32_e32 v2, 0x4000000, v0
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; SDAG-NEXT: v_alignbit_b32 v7, v1, v0, 2
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: ; %bb.11: ; %itofp-if-then20
+; SDAG-NEXT: v_alignbit_b32 v7, v1, v0, 3
+; SDAG-NEXT: v_mov_b32_e32 v4, v5
+; SDAG-NEXT: ; %bb.12: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB5_13: ; %Flow4
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_and_b32_e32 v0, 0x7fffff, v7
+; SDAG-NEXT: v_lshl_or_b32 v0, v4, 23, v0
+; SDAG-NEXT: v_add_u32_e32 v0, 1.0, v0
+; SDAG-NEXT: v_cvt_f16_f32_e32 v4, v0
+; SDAG-NEXT: .LBB5_14: ; %Flow5
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v0, v4
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: uitofp_i128_to_f16:
+; GISEL: ; %bb.0: ; %itofp-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_or_b32_e32 v4, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v5, v1, v3
+; GISEL-NEXT: s_mov_b32 s4, 0
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB5_14
+; GISEL-NEXT: ; %bb.1: ; %itofp-if-end
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v0
+; GISEL-NEXT: v_ffbh_u32_e32 v4, v1
+; GISEL-NEXT: v_add_u32_e32 v5, 32, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v6, v2
+; GISEL-NEXT: v_min_u32_e32 v4, v4, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v3
+; GISEL-NEXT: v_add_u32_e32 v6, 32, v6
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_add_u32_e32 v4, 64, v4
+; GISEL-NEXT: v_min_u32_e32 v5, v5, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v5, v5, v4, vcc
+; GISEL-NEXT: v_sub_u32_e32 v7, 0x80, v5
+; GISEL-NEXT: v_sub_u32_e32 v6, 0x7f, v5
+; GISEL-NEXT: v_cmp_ge_i32_e32 vcc, 24, v7
+; GISEL-NEXT: ; implicit-def: $vgpr4
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GISEL-NEXT: ; %bb.2: ; %itofp-if-else
+; GISEL-NEXT: v_add_u32_e32 v2, 0xffffff98, v5
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v0, vcc
+; GISEL-NEXT: ; implicit-def: $vgpr7
+; GISEL-NEXT: ; implicit-def: $vgpr0
+; GISEL-NEXT: ; implicit-def: $vgpr5
+; GISEL-NEXT: ; implicit-def: $vgpr2
+; GISEL-NEXT: ; %bb.3: ; %Flow3
+; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB5_13
+; GISEL-NEXT: ; %bb.4: ; %NodeBlock
+; GISEL-NEXT: v_cmp_le_i32_e32 vcc, 26, v7
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB5_8
+; GISEL-NEXT: ; %bb.5: ; %LeafBlock
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 26, v7
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB5_7
+; GISEL-NEXT: ; %bb.6: ; %itofp-sw-default
+; GISEL-NEXT: v_sub_u32_e32 v4, 0x66, v5
+; GISEL-NEXT: v_sub_u32_e32 v10, 64, v4
+; GISEL-NEXT: v_lshrrev_b64 v[8:9], v4, v[0:1]
+; GISEL-NEXT: v_lshlrev_b64 v[10:11], v10, v[2:3]
+; GISEL-NEXT: v_subrev_u32_e32 v12, 64, v4
+; GISEL-NEXT: v_or_b32_e32 v10, v8, v10
+; GISEL-NEXT: v_or_b32_e32 v11, v9, v11
+; GISEL-NEXT: v_lshrrev_b64 v[8:9], v12, v[2:3]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v4
+; GISEL-NEXT: v_add_u32_e32 v5, 26, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GISEL-NEXT: v_sub_u32_e32 v10, 64, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v12, v8, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v9, v1, vcc
+; GISEL-NEXT: v_lshrrev_b64 v[8:9], v5, -1
+; GISEL-NEXT: v_lshlrev_b64 v[10:11], v10, -1
+; GISEL-NEXT: v_subrev_u32_e32 v13, 64, v5
+; GISEL-NEXT: v_or_b32_e32 v14, v8, v10
+; GISEL-NEXT: v_or_b32_e32 v15, v9, v11
+; GISEL-NEXT: v_lshrrev_b64 v[10:11], v13, -1
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v10, v10, v14, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v11, v11, v15, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v8, 0, v8, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v9, 0, v9, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v5, v10, -1, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v10, v11, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v2, v8, v2
+; GISEL-NEXT: v_and_b32_e32 v3, v9, v3
+; GISEL-NEXT: v_and_or_b32 v0, v5, v0, v2
+; GISEL-NEXT: v_and_or_b32 v1, v10, v1, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v3, v12, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GISEL-NEXT: v_mov_b32_e32 v1, v4
+; GISEL-NEXT: v_mov_b32_e32 v2, v5
+; GISEL-NEXT: v_mov_b32_e32 v3, v6
+; GISEL-NEXT: .LBB5_7: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: .LBB5_8: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; GISEL-NEXT: ; %bb.9: ; %itofp-sw-bb
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
+; GISEL-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: v_bfe_u32 v2, v0, 2, 1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GISEL-NEXT: v_and_b32_e32 v2, 0x4000000, v0
+; GISEL-NEXT: v_mov_b32_e32 v3, 0
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 2, v[0:1]
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: ; %bb.11: ; %itofp-if-then20
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 3, v[0:1]
+; GISEL-NEXT: v_mov_b32_e32 v6, v7
+; GISEL-NEXT: ; %bb.12: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB5_13: ; %Flow4
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_lshl_add_u32 v0, v6, 23, 1.0
+; GISEL-NEXT: v_mov_b32_e32 v1, 0x7fffff
+; GISEL-NEXT: v_and_or_b32 v0, v4, v1, v0
+; GISEL-NEXT: v_cvt_f16_f32_e32 v4, v0
+; GISEL-NEXT: .LBB5_14: ; %Flow5
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: v_mov_b32_e32 v0, v4
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = uitofp i128 %x to half
+ ret half %cvt
+}
+
+; FIXME: ExpandLargeFpConvert asserts on bfloat
+; define bfloat @sitofp_i128_to_bf16(i128 %x) {
+; %cvt = sitofp i128 %x to bfloat
+; ret bfloat %cvt
+; }
+
+; define bfloat @uitofp_i128_to_bf16(i128 %x) {
+; %cvt = uitofp i128 %x to bfloat
+; ret bfloat %cvt
+; }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GCN: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll b/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll
index 1acbb09..fbf2ee1 100644
--- a/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll
+++ b/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll
@@ -60,7 +60,6 @@ define amdgpu_kernel void @test_kernel(i32 %val) #0 {
; CHECK-NEXT: ; implicit-def: $sgpr15
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: s_or_saveexec_b64 s[34:35], -1
diff --git a/llvm/test/CodeGen/AMDGPU/lds-mixed-absolute-addresses-unused.ll b/llvm/test/CodeGen/AMDGPU/lds-mixed-absolute-addresses-unused.ll
new file mode 100644
index 0000000..d101d8d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/lds-mixed-absolute-addresses-unused.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S -mtriple=amdgcn-- -amdgpu-lower-module-lds < %s 2>&1 | FileCheck %s
+; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds < %s 2>&1 | FileCheck %s
+
+; This looks like a partially lowered module, but the non-lowered GV isn't used by any kernels.
+; In such cases, LowerModuleLDS is free to leave it in and ignore it, and we want to make sure
+; LowerModuleLDS doesn't crash if it re-runs on such modules.
+@notLowered = addrspace(3) global i32 poison
+@lowered = addrspace(3) global i32 poison, !absolute_symbol !0
+
+@llvm.compiler.used = appending addrspace(1) global [1 x ptr] [ptr addrspacecast (ptr addrspace(3) @notLowered to ptr)], section "llvm.metadata"
+
+define amdgpu_kernel void @kern(i32 %val0) {
+; CHECK-LABEL: define amdgpu_kernel void @kern(
+; CHECK-SAME: i32 [[VAL0:%.*]]) {
+; CHECK-NEXT: [[VAL1:%.*]] = add i32 [[VAL0]], 4
+; CHECK-NEXT: store i32 [[VAL1]], ptr addrspace(3) @lowered, align 4
+; CHECK-NEXT: ret void
+;
+ %val1 = add i32 %val0, 4
+ store i32 %val1, ptr addrspace(3) @lowered
+ ret void
+}
+
+
+!0 = !{i32 0, i32 1}
diff --git a/llvm/test/CodeGen/AMDGPU/lds-reject-mixed-absolute-addresses.ll b/llvm/test/CodeGen/AMDGPU/lds-reject-mixed-absolute-addresses.ll
index b512a43..b1f4f2e 100644
--- a/llvm/test/CodeGen/AMDGPU/lds-reject-mixed-absolute-addresses.ll
+++ b/llvm/test/CodeGen/AMDGPU/lds-reject-mixed-absolute-addresses.ll
@@ -8,7 +8,7 @@
define amdgpu_kernel void @kern() {
%val0 = load i32, ptr addrspace(3) @var1
%val1 = add i32 %val0, 4
- store i32 %val1, ptr addrspace(3) @var1
+ store i32 %val1, ptr addrspace(3) @var2
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
index 5007f77..0ff5dd3 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
@@ -195,13 +195,13 @@
; GCN-O1-NEXT: Uniformity Analysis
; GCN-O1-NEXT: AMDGPU atomic optimizations
; GCN-O1-NEXT: Expand Atomic instructions
-; GCN-O1-NEXT: AMDGPU Promote Alloca
; GCN-O1-NEXT: Dominator Tree Construction
+; GCN-O1-NEXT: Natural Loop Information
+; GCN-O1-NEXT: AMDGPU Promote Alloca
; GCN-O1-NEXT: Cycle Info Analysis
; GCN-O1-NEXT: Uniformity Analysis
; GCN-O1-NEXT: AMDGPU IR optimizations
; GCN-O1-NEXT: Basic Alias Analysis (stateless AA impl)
-; GCN-O1-NEXT: Natural Loop Information
; GCN-O1-NEXT: Canonicalize natural loops
; GCN-O1-NEXT: Scalar Evolution Analysis
; GCN-O1-NEXT: Loop Pass Manager
@@ -470,9 +470,9 @@
; GCN-O1-OPTS-NEXT: Uniformity Analysis
; GCN-O1-OPTS-NEXT: AMDGPU atomic optimizations
; GCN-O1-OPTS-NEXT: Expand Atomic instructions
-; GCN-O1-OPTS-NEXT: AMDGPU Promote Alloca
; GCN-O1-OPTS-NEXT: Dominator Tree Construction
; GCN-O1-OPTS-NEXT: Natural Loop Information
+; GCN-O1-OPTS-NEXT: AMDGPU Promote Alloca
; GCN-O1-OPTS-NEXT: Canonicalize natural loops
; GCN-O1-OPTS-NEXT: Lazy Branch Probability Analysis
; GCN-O1-OPTS-NEXT: Lazy Block Frequency Analysis
@@ -775,9 +775,9 @@
; GCN-O2-NEXT: Uniformity Analysis
; GCN-O2-NEXT: AMDGPU atomic optimizations
; GCN-O2-NEXT: Expand Atomic instructions
-; GCN-O2-NEXT: AMDGPU Promote Alloca
; GCN-O2-NEXT: Dominator Tree Construction
; GCN-O2-NEXT: Natural Loop Information
+; GCN-O2-NEXT: AMDGPU Promote Alloca
; GCN-O2-NEXT: Split GEPs to a variadic base and a constant offset for better CSE
; GCN-O2-NEXT: Scalar Evolution Analysis
; GCN-O2-NEXT: Straight line strength reduction
@@ -1084,9 +1084,9 @@
; GCN-O3-NEXT: Uniformity Analysis
; GCN-O3-NEXT: AMDGPU atomic optimizations
; GCN-O3-NEXT: Expand Atomic instructions
-; GCN-O3-NEXT: AMDGPU Promote Alloca
; GCN-O3-NEXT: Dominator Tree Construction
; GCN-O3-NEXT: Natural Loop Information
+; GCN-O3-NEXT: AMDGPU Promote Alloca
; GCN-O3-NEXT: Split GEPs to a variadic base and a constant offset for better CSE
; GCN-O3-NEXT: Scalar Evolution Analysis
; GCN-O3-NEXT: Straight line strength reduction
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w32.ll
index b4415c1..f6197e0 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w32.ll
@@ -1,132 +1,44 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefixes=GFX12-SDAG-W32 %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefixes=GFX12-GISEL-W32 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefix=GFX12 %s
-declare <2 x i32> @llvm.amdgcn.global.load.tr.v2i32.p1(ptr addrspace(1))
-declare <8 x i16> @llvm.amdgcn.global.load.tr.v8i16.p1(ptr addrspace(1))
-declare <8 x half> @llvm.amdgcn.global.load.tr.v8f16.p1(ptr addrspace(1))
-declare <8 x bfloat> @llvm.amdgcn.global.load.tr.v8bf16.p1(ptr addrspace(1))
+declare <2 x i32> @llvm.amdgcn.global.load.tr.b64.v2i32.p1(ptr addrspace(1))
+declare <8 x i16> @llvm.amdgcn.global.load.tr.b128.v8i16.p1(ptr addrspace(1))
define amdgpu_kernel void @global_load_tr_b64(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W32-LABEL: global_load_tr_b64:
-; GFX12-SDAG-W32: ; %bb.0: ; %entry
-; GFX12-SDAG-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W32-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-SDAG-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_load_tr_b64 v[0:1], v2, s[0:1] offset:32
-; GFX12-SDAG-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-SDAG-W32-NEXT: s_nop 0
-; GFX12-SDAG-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W32-NEXT: s_endpgm
-;
-; GFX12-GISEL-W32-LABEL: global_load_tr_b64:
-; GFX12-GISEL-W32: ; %bb.0: ; %entry
-; GFX12-GISEL-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W32-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-GISEL-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_load_tr_b64 v[0:1], v2, s[0:1] offset:32
-; GFX12-GISEL-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-GISEL-W32-NEXT: s_nop 0
-; GFX12-GISEL-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W32-NEXT: s_endpgm
+; GFX12-LABEL: global_load_tr_b64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_tr_b64 v[0:1], v2, s[0:1] offset:32
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <2 x i32> @llvm.amdgcn.global.load.tr.v2i32.p1(ptr addrspace(1) %gep)
+ %val = call <2 x i32> @llvm.amdgcn.global.load.tr.b64.v2i32.p1(ptr addrspace(1) %gep)
store <2 x i32> %val, ptr addrspace(1) %use
ret void
}
-define amdgpu_kernel void @global_load_tr_b128_i16(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W32-LABEL: global_load_tr_b128_i16:
-; GFX12-SDAG-W32: ; %bb.0: ; %entry
-; GFX12-SDAG-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W32-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-SDAG-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
-; GFX12-SDAG-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_store_b128 v4, v[0:3], s[2:3]
-; GFX12-SDAG-W32-NEXT: s_nop 0
-; GFX12-SDAG-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W32-NEXT: s_endpgm
-;
-; GFX12-GISEL-W32-LABEL: global_load_tr_b128_i16:
-; GFX12-GISEL-W32: ; %bb.0: ; %entry
-; GFX12-GISEL-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W32-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-GISEL-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
-; GFX12-GISEL-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_store_b128 v4, v[0:3], s[2:3]
-; GFX12-GISEL-W32-NEXT: s_nop 0
-; GFX12-GISEL-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W32-NEXT: s_endpgm
+define amdgpu_kernel void @global_load_tr_b128(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
+; GFX12-LABEL: global_load_tr_b128:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b128 v4, v[0:3], s[2:3]
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <8 x i16> @llvm.amdgcn.global.load.tr.v8i16.p1(ptr addrspace(1) %gep)
+ %val = call <8 x i16> @llvm.amdgcn.global.load.tr.b128.v8i16.p1(ptr addrspace(1) %gep)
store <8 x i16> %val, ptr addrspace(1) %use
ret void
}
-
-define amdgpu_kernel void @global_load_tr_b128_half(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W32-LABEL: global_load_tr_b128_half:
-; GFX12-SDAG-W32: ; %bb.0: ; %entry
-; GFX12-SDAG-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W32-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-SDAG-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
-; GFX12-SDAG-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_store_b128 v4, v[0:3], s[2:3]
-; GFX12-SDAG-W32-NEXT: s_nop 0
-; GFX12-SDAG-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W32-NEXT: s_endpgm
-;
-; GFX12-GISEL-W32-LABEL: global_load_tr_b128_half:
-; GFX12-GISEL-W32: ; %bb.0: ; %entry
-; GFX12-GISEL-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W32-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-GISEL-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
-; GFX12-GISEL-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_store_b128 v4, v[0:3], s[2:3]
-; GFX12-GISEL-W32-NEXT: s_nop 0
-; GFX12-GISEL-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W32-NEXT: s_endpgm
-entry:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <8 x half> @llvm.amdgcn.global.load.tr.v8f16.p1(ptr addrspace(1) %gep)
- store <8 x half> %val, ptr addrspace(1) %use
- ret void
-}
-
-define amdgpu_kernel void @global_load_tr_b128_bfloat(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W32-LABEL: global_load_tr_b128_bfloat:
-; GFX12-SDAG-W32: ; %bb.0: ; %entry
-; GFX12-SDAG-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W32-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-SDAG-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
-; GFX12-SDAG-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_store_b128 v4, v[0:3], s[2:3]
-; GFX12-SDAG-W32-NEXT: s_nop 0
-; GFX12-SDAG-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W32-NEXT: s_endpgm
-;
-; GFX12-GISEL-W32-LABEL: global_load_tr_b128_bfloat:
-; GFX12-GISEL-W32: ; %bb.0: ; %entry
-; GFX12-GISEL-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W32-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-GISEL-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
-; GFX12-GISEL-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_store_b128 v4, v[0:3], s[2:3]
-; GFX12-GISEL-W32-NEXT: s_nop 0
-; GFX12-GISEL-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W32-NEXT: s_endpgm
-entry:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <8 x bfloat> @llvm.amdgcn.global.load.tr.v8bf16.p1(ptr addrspace(1) %gep)
- store <8 x bfloat> %val, ptr addrspace(1) %use
- ret void
-}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w64.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w64.ll
index 7ad1416..a2dc366 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w64.ll
@@ -1,132 +1,44 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=-wavefrontsize32,+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX12-SDAG-W64 %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=-wavefrontsize32,+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX12-GISEL-W64 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=-wavefrontsize32,+wavefrontsize64 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=-wavefrontsize32,+wavefrontsize64 < %s | FileCheck -check-prefix=GFX12 %s
-declare i32 @llvm.amdgcn.global.load.tr.i32.p1(ptr addrspace(1))
-declare <4 x i16> @llvm.amdgcn.global.load.tr.v4i16.p1(ptr addrspace(1))
-declare <4 x half> @llvm.amdgcn.global.load.tr.v4f16.p1(ptr addrspace(1))
-declare <4 x bfloat> @llvm.amdgcn.global.load.tr.v4bf16.p1(ptr addrspace(1))
+declare i32 @llvm.amdgcn.global.load.tr.b64.i32.p1(ptr addrspace(1))
+declare <4 x i16> @llvm.amdgcn.global.load.tr.b128.v4i16.p1(ptr addrspace(1))
define amdgpu_kernel void @global_load_tr_b64(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W64-LABEL: global_load_tr_b64:
-; GFX12-SDAG-W64: ; %bb.0: ; %entry
-; GFX12-SDAG-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W64-NEXT: v_mov_b32_e32 v0, 0
-; GFX12-SDAG-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_load_tr_b64 v1, v0, s[0:1] offset:32
-; GFX12-SDAG-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_store_b32 v0, v1, s[2:3]
-; GFX12-SDAG-W64-NEXT: s_nop 0
-; GFX12-SDAG-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W64-NEXT: s_endpgm
-;
-; GFX12-GISEL-W64-LABEL: global_load_tr_b64:
-; GFX12-GISEL-W64: ; %bb.0: ; %entry
-; GFX12-GISEL-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W64-NEXT: v_mov_b32_e32 v0, 0
-; GFX12-GISEL-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_load_tr_b64 v1, v0, s[0:1] offset:32
-; GFX12-GISEL-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_store_b32 v0, v1, s[2:3]
-; GFX12-GISEL-W64-NEXT: s_nop 0
-; GFX12-GISEL-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W64-NEXT: s_endpgm
+; GFX12-LABEL: global_load_tr_b64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_tr_b64 v1, v0, s[0:1] offset:32
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b32 v0, v1, s[2:3]
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call i32 @llvm.amdgcn.global.load.tr.i32.p1(ptr addrspace(1) %gep)
+ %val = call i32 @llvm.amdgcn.global.load.tr.b64.i32.p1(ptr addrspace(1) %gep)
store i32 %val, ptr addrspace(1) %use
ret void
}
-define amdgpu_kernel void @global_load_tr_b128_i16(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W64-LABEL: global_load_tr_b128_i16:
-; GFX12-SDAG-W64: ; %bb.0: ; %entry
-; GFX12-SDAG-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W64-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-SDAG-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
-; GFX12-SDAG-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-SDAG-W64-NEXT: s_nop 0
-; GFX12-SDAG-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W64-NEXT: s_endpgm
-;
-; GFX12-GISEL-W64-LABEL: global_load_tr_b128_i16:
-; GFX12-GISEL-W64: ; %bb.0: ; %entry
-; GFX12-GISEL-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W64-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-GISEL-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
-; GFX12-GISEL-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-GISEL-W64-NEXT: s_nop 0
-; GFX12-GISEL-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W64-NEXT: s_endpgm
+define amdgpu_kernel void @global_load_tr_b128(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
+; GFX12-LABEL: global_load_tr_b128:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <4 x i16> @llvm.amdgcn.global.load.tr.v4i16.p1(ptr addrspace(1) %gep)
+ %val = call <4 x i16> @llvm.amdgcn.global.load.tr.b128.v4i16.p1(ptr addrspace(1) %gep)
store <4 x i16> %val, ptr addrspace(1) %use
ret void
}
-
-define amdgpu_kernel void @global_load_tr_b128_half(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W64-LABEL: global_load_tr_b128_half:
-; GFX12-SDAG-W64: ; %bb.0: ; %entry
-; GFX12-SDAG-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W64-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-SDAG-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
-; GFX12-SDAG-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-SDAG-W64-NEXT: s_nop 0
-; GFX12-SDAG-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W64-NEXT: s_endpgm
-;
-; GFX12-GISEL-W64-LABEL: global_load_tr_b128_half:
-; GFX12-GISEL-W64: ; %bb.0: ; %entry
-; GFX12-GISEL-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W64-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-GISEL-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
-; GFX12-GISEL-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-GISEL-W64-NEXT: s_nop 0
-; GFX12-GISEL-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W64-NEXT: s_endpgm
-entry:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <4 x half> @llvm.amdgcn.global.load.tr.v4f16.p1(ptr addrspace(1) %gep)
- store <4 x half> %val, ptr addrspace(1) %use
- ret void
-}
-
-define amdgpu_kernel void @global_load_tr_b128_bfloat(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W64-LABEL: global_load_tr_b128_bfloat:
-; GFX12-SDAG-W64: ; %bb.0: ; %entry
-; GFX12-SDAG-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W64-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-SDAG-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
-; GFX12-SDAG-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-SDAG-W64-NEXT: s_nop 0
-; GFX12-SDAG-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W64-NEXT: s_endpgm
-;
-; GFX12-GISEL-W64-LABEL: global_load_tr_b128_bfloat:
-; GFX12-GISEL-W64: ; %bb.0: ; %entry
-; GFX12-GISEL-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W64-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-GISEL-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
-; GFX12-GISEL-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-GISEL-W64-NEXT: s_nop 0
-; GFX12-GISEL-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W64-NEXT: s_endpgm
-entry:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <4 x bfloat> @llvm.amdgcn.global.load.tr.v4bf16.p1(ptr addrspace(1) %gep)
- store <4 x bfloat> %val, ptr addrspace(1) %use
- ret void
-}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.single.2b.mir b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.single.2b.mir
index 091b29c..e93595b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.single.2b.mir
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.single.2b.mir
@@ -4,6 +4,8 @@
--- |
define amdgpu_kernel void @single-wave-phase-2b(ptr addrspace(3) noalias %in0, ptr addrspace(3) noalias %in1, ptr addrspace(3) noalias %in2, ptr addrspace(3) noalias %in3, ptr addrspace(3) noalias %in4, ptr addrspace(3) noalias %in5, ptr addrspace(3) noalias %in6, ptr addrspace(3) noalias %in7, ptr addrspace(3) noalias %in8, ptr addrspace(3) noalias %in9, ptr addrspace(3) noalias %in10, ptr addrspace(3) noalias %in11, ptr addrspace(7) noalias %in12, ptr addrspace(7) noalias %in13, ptr addrspace(7) noalias %in14, ptr addrspace(7) noalias %in15, ptr addrspace(7) noalias %in16, ptr addrspace(7) noalias %in17, ptr addrspace(7) noalias %in18, ptr addrspace(7) noalias %in19, ptr addrspace(7) noalias %in20, ptr addrspace(7) noalias %in21, ptr addrspace(7) noalias %in22, ptr addrspace(7) noalias %in23, ptr addrspace(7) noalias %in24, ptr addrspace(7) noalias %in25, ptr addrspace(7) noalias %in26, ptr addrspace(7) noalias %in27, ptr addrspace(7) noalias %in28, ptr addrspace(7) noalias %in29) #0 { ret void }
+ attributes #0 = { nounwind "amdgpu-waves-per-eu"="1,1" "amdgpu-flat-work-group-size"="1,256" }
+
!0 = distinct !{!0}
!1 = !{!1, !0}
...
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.msaa.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.msaa.load.ll
index 1348315..7b1f55e 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.msaa.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.msaa.load.ll
@@ -22,18 +22,36 @@ main_body:
define amdgpu_ps <4 x float> @load_2dmsaa_both(<8 x i32> inreg %rsrc, ptr addrspace(1) inreg %out, i32 %s, i32 %t, i32 %fragid) {
; GFX11-LABEL: load_2dmsaa_both:
; GFX11: ; %bb.0: ; %main_body
-; GFX11-NEXT: image_msaa_load v[0:4], v[0:2], s[0:7] dmask:0x2 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe lwe ; encoding: [0x98,0x02,0x60,0xf0,0x00,0x00,0x60,0x00]
-; GFX11-NEXT: v_mov_b32_e32 v5, 0 ; encoding: [0x80,0x02,0x0a,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v8, 0 ; encoding: [0x00,0x01,0x10,0xca,0x80,0x00,0x08,0x05]
+; GFX11-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x06,0x07]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4) ; encoding: [0x42,0x02,0x87,0xbf]
+; GFX11-NEXT: v_mov_b32_e32 v9, v8 ; encoding: [0x08,0x03,0x12,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v10, v8 ; encoding: [0x08,0x03,0x14,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v11, v8 ; encoding: [0x08,0x03,0x16,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v12, v8 ; encoding: [0x08,0x03,0x18,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v0, v8 :: v_dual_mov_b32 v1, v9 ; encoding: [0x08,0x01,0x10,0xca,0x09,0x01,0x00,0x00]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; encoding: [0x93,0x01,0x87,0xbf]
+; GFX11-NEXT: v_dual_mov_b32 v2, v10 :: v_dual_mov_b32 v3, v11 ; encoding: [0x0a,0x01,0x10,0xca,0x0b,0x01,0x02,0x02]
+; GFX11-NEXT: v_mov_b32_e32 v4, v12 ; encoding: [0x0c,0x03,0x08,0x7e]
+; GFX11-NEXT: image_msaa_load v[0:4], v[5:7], s[0:7] dmask:0x2 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe lwe ; encoding: [0x98,0x02,0x60,0xf0,0x05,0x00,0x60,0x00]
; GFX11-NEXT: s_waitcnt vmcnt(0) ; encoding: [0xf7,0x03,0x89,0xbf]
-; GFX11-NEXT: global_store_b32 v5, v4, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x05,0x04,0x08,0x00]
+; GFX11-NEXT: global_store_b32 v8, v4, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x08,0x04,0x08,0x00]
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-LABEL: load_2dmsaa_both:
; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: image_msaa_load v[0:4], [v0, v1, v2], s[0:7] dmask:0x2 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe lwe ; encoding: [0x0e,0x20,0x86,0xe4,0x00,0x01,0x00,0x00,0x00,0x01,0x02,0x00]
-; GFX12-NEXT: v_mov_b32_e32 v5, 0 ; encoding: [0x80,0x02,0x0a,0x7e]
+; GFX12-NEXT: v_dual_mov_b32 v7, v0 :: v_dual_mov_b32 v8, 0 ; encoding: [0x00,0x01,0x10,0xca,0x80,0x00,0x08,0x07]
+; GFX12-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v6, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x06,0x05]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; encoding: [0x22,0x01,0x87,0xbf]
+; GFX12-NEXT: v_dual_mov_b32 v9, v8 :: v_dual_mov_b32 v10, v8 ; encoding: [0x08,0x01,0x10,0xca,0x08,0x01,0x0a,0x09]
+; GFX12-NEXT: v_dual_mov_b32 v11, v8 :: v_dual_mov_b32 v12, v8 ; encoding: [0x08,0x01,0x10,0xca,0x08,0x01,0x0c,0x0b]
+; GFX12-NEXT: v_dual_mov_b32 v0, v8 :: v_dual_mov_b32 v1, v9 ; encoding: [0x08,0x01,0x10,0xca,0x09,0x01,0x00,0x00]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; encoding: [0x92,0x01,0x87,0xbf]
+; GFX12-NEXT: v_dual_mov_b32 v2, v10 :: v_dual_mov_b32 v3, v11 ; encoding: [0x0a,0x01,0x10,0xca,0x0b,0x01,0x02,0x02]
+; GFX12-NEXT: v_mov_b32_e32 v4, v12 ; encoding: [0x0c,0x03,0x08,0x7e]
+; GFX12-NEXT: image_msaa_load v[0:4], [v7, v6, v5], s[0:7] dmask:0x2 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe lwe ; encoding: [0x0e,0x20,0x86,0xe4,0x00,0x01,0x00,0x00,0x07,0x06,0x05,0x00]
; GFX12-NEXT: s_wait_loadcnt 0x0 ; encoding: [0x00,0x00,0xc0,0xbf]
-; GFX12-NEXT: global_store_b32 v5, v4, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x02,0x05,0x00,0x00,0x00]
+; GFX12-NEXT: global_store_b32 v8, v4, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x02,0x08,0x00,0x00,0x00]
; GFX12-NEXT: ; return to shader part epilog
main_body:
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.2dmsaa.v4f32i32.i32(i32 2, i32 %s, i32 %t, i32 %fragid, <8 x i32> %rsrc, i32 3, i32 0)
@@ -63,18 +81,37 @@ main_body:
define amdgpu_ps <4 x float> @load_2darraymsaa_tfe(<8 x i32> inreg %rsrc, ptr addrspace(1) inreg %out, i32 %s, i32 %t, i32 %slice, i32 %fragid) {
; GFX11-LABEL: load_2darraymsaa_tfe:
; GFX11: ; %bb.0: ; %main_body
-; GFX11-NEXT: image_msaa_load v[0:4], v[0:3], s[0:7] dmask:0x8 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe ; encoding: [0x9c,0x08,0x60,0xf0,0x00,0x00,0x20,0x00]
-; GFX11-NEXT: v_mov_b32_e32 v5, 0 ; encoding: [0x80,0x02,0x0a,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v9, 0 :: v_dual_mov_b32 v8, v3 ; encoding: [0x80,0x00,0x10,0xca,0x03,0x01,0x08,0x09]
+; GFX11-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x06,0x07]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4) ; encoding: [0x42,0x02,0x87,0xbf]
+; GFX11-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v10, v9 ; encoding: [0x00,0x01,0x10,0xca,0x09,0x01,0x0a,0x05]
+; GFX11-NEXT: v_mov_b32_e32 v11, v9 ; encoding: [0x09,0x03,0x16,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v12, v9 ; encoding: [0x09,0x03,0x18,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v13, v9 ; encoding: [0x09,0x03,0x1a,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v0, v9 :: v_dual_mov_b32 v1, v10 ; encoding: [0x09,0x01,0x10,0xca,0x0a,0x01,0x00,0x00]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; encoding: [0x93,0x01,0x87,0xbf]
+; GFX11-NEXT: v_dual_mov_b32 v2, v11 :: v_dual_mov_b32 v3, v12 ; encoding: [0x0b,0x01,0x10,0xca,0x0c,0x01,0x02,0x02]
+; GFX11-NEXT: v_mov_b32_e32 v4, v13 ; encoding: [0x0d,0x03,0x08,0x7e]
+; GFX11-NEXT: image_msaa_load v[0:4], v[5:8], s[0:7] dmask:0x8 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe ; encoding: [0x9c,0x08,0x60,0xf0,0x05,0x00,0x20,0x00]
; GFX11-NEXT: s_waitcnt vmcnt(0) ; encoding: [0xf7,0x03,0x89,0xbf]
-; GFX11-NEXT: global_store_b32 v5, v4, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x05,0x04,0x08,0x00]
+; GFX11-NEXT: global_store_b32 v9, v4, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x09,0x04,0x08,0x00]
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-LABEL: load_2darraymsaa_tfe:
; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: image_msaa_load v[0:4], [v0, v1, v2, v3], s[0:7] dmask:0x8 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe ; encoding: [0x0f,0x20,0x06,0xe6,0x00,0x00,0x00,0x00,0x00,0x01,0x02,0x03]
-; GFX12-NEXT: v_mov_b32_e32 v5, 0 ; encoding: [0x80,0x02,0x0a,0x7e]
+; GFX12-NEXT: v_mov_b32_e32 v9, 0 ; encoding: [0x80,0x02,0x12,0x7e]
+; GFX12-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v6, v2 ; encoding: [0x03,0x01,0x10,0xca,0x02,0x01,0x06,0x05]
+; GFX12-NEXT: v_dual_mov_b32 v7, v1 :: v_dual_mov_b32 v8, v0 ; encoding: [0x01,0x01,0x10,0xca,0x00,0x01,0x08,0x07]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; encoding: [0x23,0x01,0x87,0xbf]
+; GFX12-NEXT: v_dual_mov_b32 v10, v9 :: v_dual_mov_b32 v11, v9 ; encoding: [0x09,0x01,0x10,0xca,0x09,0x01,0x0a,0x0a]
+; GFX12-NEXT: v_dual_mov_b32 v12, v9 :: v_dual_mov_b32 v13, v9 ; encoding: [0x09,0x01,0x10,0xca,0x09,0x01,0x0c,0x0c]
+; GFX12-NEXT: v_dual_mov_b32 v0, v9 :: v_dual_mov_b32 v1, v10 ; encoding: [0x09,0x01,0x10,0xca,0x0a,0x01,0x00,0x00]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; encoding: [0x92,0x01,0x87,0xbf]
+; GFX12-NEXT: v_dual_mov_b32 v2, v11 :: v_dual_mov_b32 v3, v12 ; encoding: [0x0b,0x01,0x10,0xca,0x0c,0x01,0x02,0x02]
+; GFX12-NEXT: v_mov_b32_e32 v4, v13 ; encoding: [0x0d,0x03,0x08,0x7e]
+; GFX12-NEXT: image_msaa_load v[0:4], [v8, v7, v6, v5], s[0:7] dmask:0x8 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe ; encoding: [0x0f,0x20,0x06,0xe6,0x00,0x00,0x00,0x00,0x08,0x07,0x06,0x05]
; GFX12-NEXT: s_wait_loadcnt 0x0 ; encoding: [0x00,0x00,0xc0,0xbf]
-; GFX12-NEXT: global_store_b32 v5, v4, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x02,0x05,0x00,0x00,0x00]
+; GFX12-NEXT: global_store_b32 v9, v4, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x02,0x09,0x00,0x00,0x00]
; GFX12-NEXT: ; return to shader part epilog
main_body:
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.2darraymsaa.v4f32i32.i32(i32 8, i32 %s, i32 %t, i32 %slice, i32 %fragid, <8 x i32> %rsrc, i32 1, i32 0)
@@ -155,18 +192,31 @@ main_body:
define amdgpu_ps <4 x half> @load_2dmsaa_tfe_d16(<8 x i32> inreg %rsrc, ptr addrspace(1) inreg %out, i32 %s, i32 %t, i32 %fragid) {
; GFX11-LABEL: load_2dmsaa_tfe_d16:
; GFX11: ; %bb.0: ; %main_body
-; GFX11-NEXT: image_msaa_load v[0:2], v[0:2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe d16 ; encoding: [0x98,0x01,0x62,0xf0,0x00,0x00,0x20,0x00]
-; GFX11-NEXT: v_mov_b32_e32 v3, 0 ; encoding: [0x80,0x02,0x06,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v6, 0 ; encoding: [0x00,0x01,0x10,0xca,0x80,0x00,0x06,0x03]
+; GFX11-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v4, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x04,0x05]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; encoding: [0x22,0x01,0x87,0xbf]
+; GFX11-NEXT: v_mov_b32_e32 v7, v6 ; encoding: [0x06,0x03,0x0e,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v8, v6 ; encoding: [0x06,0x03,0x10,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 ; encoding: [0x06,0x01,0x10,0xca,0x07,0x01,0x00,0x00]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; encoding: [0x02,0x00,0x87,0xbf]
+; GFX11-NEXT: v_mov_b32_e32 v2, v8 ; encoding: [0x08,0x03,0x04,0x7e]
+; GFX11-NEXT: image_msaa_load v[0:2], v[3:5], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe d16 ; encoding: [0x98,0x01,0x62,0xf0,0x03,0x00,0x20,0x00]
; GFX11-NEXT: s_waitcnt vmcnt(0) ; encoding: [0xf7,0x03,0x89,0xbf]
-; GFX11-NEXT: global_store_b32 v3, v2, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x03,0x02,0x08,0x00]
+; GFX11-NEXT: global_store_b32 v6, v2, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x06,0x02,0x08,0x00]
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-LABEL: load_2dmsaa_tfe_d16:
; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: image_msaa_load v[0:2], [v0, v1, v2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe d16 ; encoding: [0x2e,0x20,0x46,0xe4,0x00,0x00,0x00,0x00,0x00,0x01,0x02,0x00]
-; GFX12-NEXT: v_mov_b32_e32 v3, 0 ; encoding: [0x80,0x02,0x06,0x7e]
+; GFX12-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v6, 0 ; encoding: [0x00,0x01,0x10,0xca,0x80,0x00,0x06,0x05]
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x04,0x03]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; encoding: [0x92,0x00,0x87,0xbf]
+; GFX12-NEXT: v_dual_mov_b32 v7, v6 :: v_dual_mov_b32 v8, v6 ; encoding: [0x06,0x01,0x10,0xca,0x06,0x01,0x08,0x07]
+; GFX12-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 ; encoding: [0x06,0x01,0x10,0xca,0x07,0x01,0x00,0x00]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) ; encoding: [0x02,0x00,0x87,0xbf]
+; GFX12-NEXT: v_mov_b32_e32 v2, v8 ; encoding: [0x08,0x03,0x04,0x7e]
+; GFX12-NEXT: image_msaa_load v[0:2], [v5, v4, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe d16 ; encoding: [0x2e,0x20,0x46,0xe4,0x00,0x00,0x00,0x00,0x05,0x04,0x03,0x00]
; GFX12-NEXT: s_wait_loadcnt 0x0 ; encoding: [0x00,0x00,0xc0,0xbf]
-; GFX12-NEXT: global_store_b32 v3, v2, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x01,0x03,0x00,0x00,0x00]
+; GFX12-NEXT: global_store_b32 v6, v2, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x01,0x06,0x00,0x00,0x00]
; GFX12-NEXT: ; return to shader part epilog
main_body:
%v = call {<4 x half>,i32} @llvm.amdgcn.image.msaa.load.2dmsaa.v4f16i32.i32(i32 1, i32 %s, i32 %t, i32 %fragid, <8 x i32> %rsrc, i32 1, i32 0)
@@ -196,18 +246,31 @@ main_body:
define amdgpu_ps <4 x half> @load_2darraymsaa_tfe_d16(<8 x i32> inreg %rsrc, ptr addrspace(1) inreg %out, i32 %s, i32 %t, i32 %slice, i32 %fragid) {
; GFX11-LABEL: load_2darraymsaa_tfe_d16:
; GFX11: ; %bb.0: ; %main_body
-; GFX11-NEXT: image_msaa_load v[0:2], v[0:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe d16 ; encoding: [0x9c,0x01,0x62,0xf0,0x00,0x00,0x20,0x00]
-; GFX11-NEXT: v_mov_b32_e32 v3, 0 ; encoding: [0x80,0x02,0x06,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v6, v0 :: v_dual_mov_b32 v7, 0 ; encoding: [0x00,0x01,0x10,0xca,0x80,0x00,0x06,0x06]
+; GFX11-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v5, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x04,0x04]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; encoding: [0x22,0x01,0x87,0xbf]
+; GFX11-NEXT: v_mov_b32_e32 v8, v7 ; encoding: [0x07,0x03,0x10,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v9, v7 ; encoding: [0x07,0x03,0x12,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 ; encoding: [0x07,0x01,0x10,0xca,0x08,0x01,0x00,0x00]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; encoding: [0x02,0x00,0x87,0xbf]
+; GFX11-NEXT: v_mov_b32_e32 v2, v9 ; encoding: [0x09,0x03,0x04,0x7e]
+; GFX11-NEXT: image_msaa_load v[0:2], [v6, v5, v4, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe d16 ; encoding: [0x9d,0x01,0x62,0xf0,0x06,0x00,0x20,0x00,0x05,0x04,0x03,0x00]
; GFX11-NEXT: s_waitcnt vmcnt(0) ; encoding: [0xf7,0x03,0x89,0xbf]
-; GFX11-NEXT: global_store_b32 v3, v2, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x03,0x02,0x08,0x00]
+; GFX11-NEXT: global_store_b32 v7, v2, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x07,0x02,0x08,0x00]
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-LABEL: load_2darraymsaa_tfe_d16:
; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: image_msaa_load v[0:2], [v0, v1, v2, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe d16 ; encoding: [0x2f,0x20,0x46,0xe4,0x00,0x00,0x00,0x00,0x00,0x01,0x02,0x03]
-; GFX12-NEXT: v_mov_b32_e32 v3, 0 ; encoding: [0x80,0x02,0x06,0x7e]
+; GFX12-NEXT: v_dual_mov_b32 v6, v0 :: v_dual_mov_b32 v7, 0 ; encoding: [0x00,0x01,0x10,0xca,0x80,0x00,0x06,0x06]
+; GFX12-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v5, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x04,0x04]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; encoding: [0x92,0x00,0x87,0xbf]
+; GFX12-NEXT: v_dual_mov_b32 v8, v7 :: v_dual_mov_b32 v9, v7 ; encoding: [0x07,0x01,0x10,0xca,0x07,0x01,0x08,0x08]
+; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 ; encoding: [0x07,0x01,0x10,0xca,0x08,0x01,0x00,0x00]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) ; encoding: [0x02,0x00,0x87,0xbf]
+; GFX12-NEXT: v_mov_b32_e32 v2, v9 ; encoding: [0x09,0x03,0x04,0x7e]
+; GFX12-NEXT: image_msaa_load v[0:2], [v6, v5, v4, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe d16 ; encoding: [0x2f,0x20,0x46,0xe4,0x00,0x00,0x00,0x00,0x06,0x05,0x04,0x03]
; GFX12-NEXT: s_wait_loadcnt 0x0 ; encoding: [0x00,0x00,0xc0,0xbf]
-; GFX12-NEXT: global_store_b32 v3, v2, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x01,0x03,0x00,0x00,0x00]
+; GFX12-NEXT: global_store_b32 v7, v2, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x01,0x07,0x00,0x00,0x00]
; GFX12-NEXT: ; return to shader part epilog
main_body:
%v = call {<4 x half>,i32} @llvm.amdgcn.image.msaa.load.2darraymsaa.v4f16i32.i32(i32 1, i32 %s, i32 %t, i32 %slice, i32 %fragid, <8 x i32> %rsrc, i32 1, i32 0)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.inreg.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.inreg.ll
index 429528e..e3dd036 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.inreg.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.inreg.ll
@@ -147,6 +147,34 @@ main_body:
ret half %res
}
+define amdgpu_ps half @v_interp_rtz_f16(float inreg %i, float inreg %j, i32 inreg %m0) #0 {
+; GCN-LABEL: v_interp_rtz_f16:
+; GCN: ; %bb.0: ; %main_body
+; GCN-NEXT: s_mov_b32 s3, exec_lo
+; GCN-NEXT: s_wqm_b32 exec_lo, exec_lo
+; GCN-NEXT: s_mov_b32 m0, s2
+; GCN-NEXT: lds_param_load v1, attr0.x wait_vdst:15
+; GCN-NEXT: s_mov_b32 exec_lo, s3
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: v_mov_b32_e32 v2, s1
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GCN-NEXT: v_interp_p10_rtz_f16_f32 v3, v1, v0, v1 wait_exp:0
+; GCN-NEXT: v_interp_p10_rtz_f16_f32 v0, v1, v0, v1 op_sel:[1,0,1,0] wait_exp:7
+; GCN-NEXT: v_interp_p2_rtz_f16_f32 v3, v1, v2, v3 wait_exp:7
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GCN-NEXT: v_interp_p2_rtz_f16_f32 v0, v1, v2, v0 op_sel:[1,0,0,0] wait_exp:7
+; GCN-NEXT: v_add_f16_e32 v0, v3, v0
+; GCN-NEXT: ; return to shader part epilog
+main_body:
+ %p0 = call float @llvm.amdgcn.lds.param.load(i32 0, i32 0, i32 %m0)
+ %l_p0 = call float @llvm.amdgcn.interp.p10.rtz.f16(float %p0, float %i, float %p0, i1 0)
+ %l_p1 = call half @llvm.amdgcn.interp.p2.rtz.f16(float %p0, float %j, float %l_p0, i1 0)
+ %h_p0 = call float @llvm.amdgcn.interp.p10.rtz.f16(float %p0, float %i, float %p0, i1 1)
+ %h_p1 = call half @llvm.amdgcn.interp.p2.rtz.f16(float %p0, float %j, float %h_p0, i1 1)
+ %res = fadd half %l_p1, %h_p1
+ ret half %res
+}
+
define amdgpu_ps half @v_interp_f16_imm_params(float inreg %i, float inreg %j) #0 {
; GCN-LABEL: v_interp_f16_imm_params:
; GCN: ; %bb.0: ; %main_body
@@ -172,6 +200,8 @@ declare float @llvm.amdgcn.interp.inreg.p10(float, float, float) #0
declare float @llvm.amdgcn.interp.inreg.p2(float, float, float) #0
declare float @llvm.amdgcn.interp.inreg.p10.f16(float, float, float, i1) #0
declare half @llvm.amdgcn.interp.inreg.p2.f16(float, float, float, i1) #0
+declare float @llvm.amdgcn.interp.p10.rtz.f16(float, float, float, i1) #0
+declare half @llvm.amdgcn.interp.p2.rtz.f16(float, float, float, i1) #0
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
declare void @llvm.amdgcn.exp.f16(i32, i32, float, float, float, float, i1, i1) #0
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx12.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx12.ll
new file mode 100644
index 0000000..fdcb177
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx12.ll
@@ -0,0 +1,333 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -misched-cluster=0 < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -misched-cluster=0 -amdgpu-igrouplp-exact-solver-max-branches=250000 < %s | FileCheck -check-prefix=EXACTCUTOFF %s
+
+declare <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16..i16(<8 x half>, <16 x half>, <8 x half>, i16)
+
+define amdgpu_kernel void @test_sched_group_barrier_pipeline_SWMMAC_cluster(ptr addrspace(3) noalias %in, ptr addrspace(3) noalias %out) #0 {
+; GCN-LABEL: test_sched_group_barrier_pipeline_SWMMAC_cluster:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GCN-NEXT: v_lshlrev_b32_e32 v28, 4, v0
+; GCN-NEXT: v_mov_b32_e32 v48, 0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GCN-NEXT: v_add_nc_u32_e32 v0, s0, v28
+; GCN-NEXT: v_dual_mov_b32 v50, s1 :: v_dual_add_nc_u32 v49, s1, v28
+; GCN-NEXT: ds_load_b128 v[8:11], v0
+; GCN-NEXT: ds_load_b128 v[12:15], v0 offset:512
+; GCN-NEXT: ds_load_b128 v[16:19], v0 offset:1536
+; GCN-NEXT: ds_load_b128 v[20:23], v0 offset:3072
+; GCN-NEXT: ds_load_b128 v[24:27], v0 offset:5120
+; GCN-NEXT: ds_load_b128 v[4:7], v0 offset:11280
+; GCN-NEXT: ds_load_b128 v[0:3], v0 offset:11264
+; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(7) SyncID(0)
+; GCN-NEXT: s_wait_dscnt 0x6
+; GCN-NEXT: v_mov_b32_e32 v31, v11
+; GCN-NEXT: s_wait_dscnt 0x5
+; GCN-NEXT: v_mov_b32_e32 v35, v15
+; GCN-NEXT: s_wait_dscnt 0x4
+; GCN-NEXT: v_mov_b32_e32 v39, v19
+; GCN-NEXT: s_wait_dscnt 0x3
+; GCN-NEXT: v_mov_b32_e32 v43, v23
+; GCN-NEXT: s_wait_dscnt 0x2
+; GCN-NEXT: v_dual_mov_b32 v47, v27 :: v_dual_mov_b32 v30, v10
+; GCN-NEXT: v_dual_mov_b32 v29, v9 :: v_dual_mov_b32 v28, v8
+; GCN-NEXT: v_dual_mov_b32 v34, v14 :: v_dual_mov_b32 v33, v13
+; GCN-NEXT: v_mov_b32_e32 v32, v12
+; GCN-NEXT: v_dual_mov_b32 v38, v18 :: v_dual_mov_b32 v37, v17
+; GCN-NEXT: v_mov_b32_e32 v36, v16
+; GCN-NEXT: v_dual_mov_b32 v42, v22 :: v_dual_mov_b32 v41, v21
+; GCN-NEXT: v_mov_b32_e32 v40, v20
+; GCN-NEXT: v_dual_mov_b32 v46, v26 :: v_dual_mov_b32 v45, v25
+; GCN-NEXT: v_mov_b32_e32 v44, v24
+; GCN-NEXT: s_wait_dscnt 0x0
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[28:31], v[8:11], v[0:7], v48
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[32:35], v[12:15], v[0:7], v48
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[36:39], v[16:19], v[0:7], v48
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[40:43], v[20:23], v[0:7], v48
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[44:47], v[24:27], v[0:7], v48
+; GCN-NEXT: ds_store_b128 v49, v[28:31]
+; GCN-NEXT: ds_store_b128 v50, v[32:35] offset:512
+; GCN-NEXT: ds_store_b128 v50, v[36:39] offset:1024
+; GCN-NEXT: ds_store_b128 v50, v[40:43] offset:1536
+; GCN-NEXT: ds_store_b128 v50, v[44:47] offset:2048
+; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(5) SyncID(0)
+; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(5) SyncID(0)
+; GCN-NEXT: s_endpgm
+;
+; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_SWMMAC_cluster:
+; EXACTCUTOFF: ; %bb.0: ; %entry
+; EXACTCUTOFF-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v28, 4, v0
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v48, 0
+; EXACTCUTOFF-NEXT: s_wait_kmcnt 0x0
+; EXACTCUTOFF-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; EXACTCUTOFF-NEXT: v_add_nc_u32_e32 v0, s0, v28
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v50, s1 :: v_dual_add_nc_u32 v49, s1, v28
+; EXACTCUTOFF-NEXT: ds_load_b128 v[8:11], v0
+; EXACTCUTOFF-NEXT: ds_load_b128 v[12:15], v0 offset:512
+; EXACTCUTOFF-NEXT: ds_load_b128 v[16:19], v0 offset:1536
+; EXACTCUTOFF-NEXT: ds_load_b128 v[20:23], v0 offset:3072
+; EXACTCUTOFF-NEXT: ds_load_b128 v[24:27], v0 offset:5120
+; EXACTCUTOFF-NEXT: ds_load_b128 v[4:7], v0 offset:11280
+; EXACTCUTOFF-NEXT: ds_load_b128 v[0:3], v0 offset:11264
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(7) SyncID(0)
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x6
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v31, v11
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x5
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v35, v15
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x4
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v39, v19
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x3
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v43, v23
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x2
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v47, v27 :: v_dual_mov_b32 v30, v10
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v29, v9 :: v_dual_mov_b32 v28, v8
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v34, v14 :: v_dual_mov_b32 v33, v13
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v32, v12
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v38, v18 :: v_dual_mov_b32 v37, v17
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v36, v16
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v42, v22 :: v_dual_mov_b32 v41, v21
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v40, v20
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v46, v26 :: v_dual_mov_b32 v45, v25
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v44, v24
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x0
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[28:31], v[8:11], v[0:7], v48
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[32:35], v[12:15], v[0:7], v48
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[36:39], v[16:19], v[0:7], v48
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[40:43], v[20:23], v[0:7], v48
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[44:47], v[24:27], v[0:7], v48
+; EXACTCUTOFF-NEXT: ds_store_b128 v49, v[28:31]
+; EXACTCUTOFF-NEXT: ds_store_b128 v50, v[32:35] offset:512
+; EXACTCUTOFF-NEXT: ds_store_b128 v50, v[36:39] offset:1024
+; EXACTCUTOFF-NEXT: ds_store_b128 v50, v[40:43] offset:1536
+; EXACTCUTOFF-NEXT: ds_store_b128 v50, v[44:47] offset:2048
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(5) SyncID(0)
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(5) SyncID(0)
+; EXACTCUTOFF-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %load.0.addr = getelementptr <8 x half>, ptr addrspace(3) %in, i32 %idx
+ %load.0 = load <8 x half>, ptr addrspace(3) %load.0.addr
+ %load.1.addr = getelementptr <8 x half>, ptr addrspace(3) %load.0.addr, i32 32
+ %load.1 = load <8 x half>, ptr addrspace(3) %load.1.addr
+ %load.2.addr = getelementptr <8 x half>, ptr addrspace(3) %load.1.addr, i32 64
+ %load.2 = load <8 x half>, ptr addrspace(3) %load.2.addr
+ %load.3.addr = getelementptr <8 x half>, ptr addrspace(3) %load.2.addr, i32 96
+ %load.3 = load <8 x half>, ptr addrspace(3) %load.3.addr
+ %load.4.addr = getelementptr <8 x half>, ptr addrspace(3) %load.3.addr, i32 128
+ %load.4 = load <8 x half>, ptr addrspace(3) %load.4.addr
+ %load.b.addr = getelementptr <16 x half>, ptr addrspace(3) %load.4.addr, i32 192
+ %load.b = load <16 x half>, ptr addrspace(3) %load.b.addr
+ %mai.0 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.0, <16 x half> %load.b, <8 x half> %load.0, i1 0)
+ %mai.1 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.1, <16 x half> %load.b, <8 x half> %load.1, i1 0)
+ %mai.2 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.2, <16 x half> %load.b, <8 x half> %load.2, i1 0)
+ %mai.3 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.3, <16 x half> %load.b, <8 x half> %load.3, i1 0)
+ %mai.4 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.4, <16 x half> %load.b, <8 x half> %load.4, i1 0)
+ %store.0.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 %idx
+ store <8 x half> %mai.0, ptr addrspace(3) %store.0.addr
+ %store.1.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 32
+ store <8 x half> %mai.1, ptr addrspace(3) %store.1.addr
+ %store.2.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 64
+ store <8 x half> %mai.2, ptr addrspace(3) %store.2.addr
+ %store.3.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 96
+ store <8 x half> %mai.3, ptr addrspace(3) %store.3.addr
+ %store.4.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 128
+ store <8 x half> %mai.4, ptr addrspace(3) %store.4.addr
+ ; 7 DS read
+ call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 7, i32 0)
+ ; 5 SWMMAC
+ call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 5, i32 0)
+ ; 5 DS write
+ call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 5, i32 0)
+ ret void
+}
+
+define amdgpu_kernel void @test_sched_group_barrier_pipeline_SWMMAC_interleaved(ptr addrspace(3) noalias %in, ptr addrspace(3) noalias %out) #0 {
+; GCN-LABEL: test_sched_group_barrier_pipeline_SWMMAC_interleaved:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GCN-NEXT: v_mov_b32_e32 v18, 0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_lshl_add_u32 v17, v0, 5, s0
+; GCN-NEXT: v_lshl_add_u32 v0, v0, 4, s1
+; GCN-NEXT: ds_load_b128 v[9:12], v17 offset:1024
+; GCN-NEXT: ds_load_b128 v[1:4], v17
+; GCN-NEXT: ds_load_b128 v[5:8], v17 offset:16
+; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(3) SyncID(0)
+; GCN-NEXT: s_wait_dscnt 0x2
+; GCN-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; GCN-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; GCN-NEXT: s_wait_dscnt 0x0
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT: ds_store_b128 v0, v[13:16]
+; GCN-NEXT: ds_load_b128 v[9:12], v17 offset:2560
+; GCN-NEXT: v_mov_b32_e32 v0, s1
+; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; GCN-NEXT: s_wait_dscnt 0x0
+; GCN-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; GCN-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT: ds_store_b128 v0, v[13:16] offset:512
+; GCN-NEXT: ds_load_b128 v[9:12], v17 offset:4608
+; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; GCN-NEXT: s_wait_dscnt 0x0
+; GCN-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; GCN-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT: ds_store_b128 v0, v[13:16] offset:1024
+; GCN-NEXT: ds_load_b128 v[9:12], v17 offset:7168
+; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; GCN-NEXT: s_wait_dscnt 0x0
+; GCN-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; GCN-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT: ds_store_b128 v0, v[13:16] offset:1536
+; GCN-NEXT: ds_load_b128 v[9:12], v17 offset:10240
+; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; GCN-NEXT: s_wait_dscnt 0x0
+; GCN-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; GCN-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT: ds_store_b128 v0, v[13:16] offset:2048
+; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; GCN-NEXT: s_endpgm
+;
+; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_SWMMAC_interleaved:
+; EXACTCUTOFF: ; %bb.0: ; %entry
+; EXACTCUTOFF-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v18, 0
+; EXACTCUTOFF-NEXT: s_wait_kmcnt 0x0
+; EXACTCUTOFF-NEXT: v_lshl_add_u32 v17, v0, 5, s0
+; EXACTCUTOFF-NEXT: v_lshl_add_u32 v0, v0, 4, s1
+; EXACTCUTOFF-NEXT: ds_load_b128 v[9:12], v17 offset:1024
+; EXACTCUTOFF-NEXT: ds_load_b128 v[1:4], v17
+; EXACTCUTOFF-NEXT: ds_load_b128 v[5:8], v17 offset:16
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(3) SyncID(0)
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x2
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x0
+; EXACTCUTOFF-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: ds_store_b128 v0, v[13:16]
+; EXACTCUTOFF-NEXT: ds_load_b128 v[9:12], v17 offset:2560
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v0, s1
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x0
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; EXACTCUTOFF-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: ds_store_b128 v0, v[13:16] offset:512
+; EXACTCUTOFF-NEXT: ds_load_b128 v[9:12], v17 offset:4608
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x0
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; EXACTCUTOFF-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: ds_store_b128 v0, v[13:16] offset:1024
+; EXACTCUTOFF-NEXT: ds_load_b128 v[9:12], v17 offset:7168
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x0
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; EXACTCUTOFF-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: ds_store_b128 v0, v[13:16] offset:1536
+; EXACTCUTOFF-NEXT: ds_load_b128 v[9:12], v17 offset:10240
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x0
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; EXACTCUTOFF-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: ds_store_b128 v0, v[13:16] offset:2048
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %load.b.addr = getelementptr <16 x half>, ptr addrspace(3) %in, i32 %idx
+ %load.b = load <16 x half>, ptr addrspace(3) %load.b.addr
+ %load.0.addr = getelementptr <8 x half>, ptr addrspace(3) %load.b.addr, i32 64
+ %load.0 = load <8 x half>, ptr addrspace(3) %load.0.addr
+ %load.1.addr = getelementptr <8 x half>, ptr addrspace(3) %load.0.addr, i32 96
+ %load.1 = load <8 x half>, ptr addrspace(3) %load.1.addr
+ %load.2.addr = getelementptr <8 x half>, ptr addrspace(3) %load.1.addr, i32 128
+ %load.2 = load <8 x half>, ptr addrspace(3) %load.2.addr
+ %load.3.addr = getelementptr <8 x half>, ptr addrspace(3) %load.2.addr, i32 160
+ %load.3 = load <8 x half>, ptr addrspace(3) %load.3.addr
+ %load.4.addr = getelementptr <8 x half>, ptr addrspace(3) %load.3.addr, i32 192
+ %load.4 = load <8 x half>, ptr addrspace(3) %load.4.addr
+ %mai.0 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.0, <16 x half> %load.b, <8 x half> %load.0, i1 0)
+ %mai.1 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.1, <16 x half> %load.b, <8 x half> %load.1, i1 0)
+ %mai.2 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.2, <16 x half> %load.b, <8 x half> %load.2, i1 0)
+ %mai.3 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.3, <16 x half> %load.b, <8 x half> %load.3, i1 0)
+ %mai.4 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.4, <16 x half> %load.b, <8 x half> %load.4, i1 0)
+ %store.0.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 %idx
+ store <8 x half> %mai.0, ptr addrspace(3) %store.0.addr
+ %store.1.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 32
+ store <8 x half> %mai.1, ptr addrspace(3) %store.1.addr
+ %store.2.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 64
+ store <8 x half> %mai.2, ptr addrspace(3) %store.2.addr
+ %store.3.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 96
+ store <8 x half> %mai.3, ptr addrspace(3) %store.3.addr
+ %store.4.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 128
+ store <8 x half> %mai.4, ptr addrspace(3) %store.4.addr
+ ; 3 DS read
+ call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 3, i32 0)
+ ; 1 SWMMAC
+ call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+ ; 1 DS write
+ call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 1, i32 0)
+ ; 1 DS read
+ call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0)
+ ; 1 SWMMAC
+ call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+ ; 1 DS write
+ call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 1, i32 0)
+ ; 1 DS read
+ call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0)
+ ; 1 SWMMAC
+ call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+ ; 1 DS write
+ call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 1, i32 0)
+ ; 1 DS read
+ call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0)
+ ; 1 SWMMAC
+ call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+ ; 1 DS write
+ call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 1, i32 0)
+ ; 1 DS read
+ call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0)
+ ; 1 SWMMAC
+ call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+ ; 1 DS write
+ call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 1, i32 0)
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.format.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.format.ll
index 00be32b..ba3d306 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.format.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.format.ll
@@ -2,6 +2,7 @@
;RUN: llc < %s -mtriple=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefixes=GFX6 %s
;RUN: llc < %s -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefixes=GFX8PLUS %s
;RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs | FileCheck --check-prefixes=GFX11 %s
+;RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-enable-prt-strict-null -verify-machineinstrs | FileCheck --check-prefixes=NOPRT %s
;RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs | FileCheck --check-prefixes=GFX12,GFX12-SDAG %s
;RUN: llc < %s -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs | FileCheck --check-prefixes=GFX12,GFX12-GISEL %s
@@ -34,6 +35,16 @@ define amdgpu_ps {<4 x float>, <4 x float>, <4 x float>} @buffer_load(<4 x i32>
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v8, 0
+; NOPRT-NEXT: s_clause 0x2
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v8, s[0:3], 0 idxen
+; NOPRT-NEXT: buffer_load_format_xyzw v[4:7], v8, s[0:3], 0 idxen glc
+; NOPRT-NEXT: buffer_load_format_xyzw v[8:11], v8, s[0:3], 0 idxen slc
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v8, 0
@@ -75,6 +86,13 @@ define amdgpu_ps <4 x float> @buffer_load_immoffs(<4 x i32> inreg) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_immoffs:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen offset:42
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_immoffs:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -146,6 +164,25 @@ define amdgpu_ps <4 x float> @buffer_load_immoffs_large(<4 x i32> inreg) {
; GFX11-NEXT: v_add_f32_e32 v2, v10, v2
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_immoffs_large:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v8, 0
+; NOPRT-NEXT: s_movk_i32 s4, 0x7ffc
+; NOPRT-NEXT: s_clause 0x1
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v8, s[0:3], 60 idxen offset:4092
+; NOPRT-NEXT: buffer_load_format_xyzw v[4:7], v8, s[0:3], s4 idxen offset:4092
+; NOPRT-NEXT: s_mov_b32 s4, 0x8ffc
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: v_add_f32_e32 v1, v1, v5
+; NOPRT-NEXT: buffer_load_format_xyzw v[8:11], v8, s[0:3], s4 idxen offset:4
+; NOPRT-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v3, v3, v7
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: v_dual_add_f32 v2, v2, v6 :: v_dual_add_f32 v1, v9, v1
+; NOPRT-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; NOPRT-NEXT: v_dual_add_f32 v0, v8, v0 :: v_dual_add_f32 v3, v11, v3
+; NOPRT-NEXT: v_add_f32_e32 v2, v10, v2
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_immoffs_large:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v8, 0
@@ -196,6 +233,13 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_12bit(<4 x i32> inreg) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_voffset_large_12bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_voffset_large_12bit:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -235,6 +279,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_13bit(<4 x i32> inreg) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_voffset_large_13bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0x1000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_voffset_large_13bit:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -274,6 +327,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_16bit(<4 x i32> inreg) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_voffset_large_16bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0xf000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_voffset_large_16bit:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -313,6 +375,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_23bit(<4 x i32> inreg) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_voffset_large_23bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0x7ff000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_voffset_large_23bit:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -352,6 +423,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_24bit(<4 x i32> inreg) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_voffset_large_24bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0xfff000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-SDAG-LABEL: buffer_load_voffset_large_24bit:
; GFX12-SDAG: ; %bb.0: ; %main_body
; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 0x800000 :: v_dual_mov_b32 v0, 0
@@ -389,6 +469,12 @@ define amdgpu_ps <4 x float> @buffer_load_idx(<4 x i32> inreg, i32) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_idx:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_idx:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], null idxen
@@ -427,6 +513,15 @@ define amdgpu_ps <4 x float> @buffer_load_ofs(<4 x i32> inreg, i32) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_ofs:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_ofs:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, 0
@@ -466,6 +561,15 @@ define amdgpu_ps <4 x float> @buffer_load_ofs_imm(<4 x i32> inreg, i32) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_ofs_imm:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:60
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_ofs_imm:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, 0
@@ -497,6 +601,12 @@ define amdgpu_ps <4 x float> @buffer_load_both(<4 x i32> inreg, i32, i32) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_both:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_both:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], null idxen offen
@@ -529,6 +639,13 @@ define amdgpu_ps <4 x float> @buffer_load_both_reversed(<4 x i32> inreg, i32, i3
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_both_reversed:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v2, v0
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[1:2], s[0:3], 0 idxen offen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_both_reversed:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v2, v0
@@ -562,6 +679,13 @@ define amdgpu_ps float @buffer_load_x(<4 x i32> inreg %rsrc) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_x:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_x:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -595,6 +719,13 @@ define amdgpu_ps float @buffer_load_x_i32(<4 x i32> inreg %rsrc) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_x_i32:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_x_i32:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -629,6 +760,13 @@ define amdgpu_ps <2 x float> @buffer_load_xy(<4 x i32> inreg %rsrc) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_xy:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_xy v[0:1], v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_xy:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -644,7 +782,12 @@ define amdgpu_cs float @buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX6-LABEL: buffer_load_v4i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
-; GFX6-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
+; GFX6-NEXT: v_mov_b32_e32 v7, 2
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
+; GFX6-NEXT: v_mov_b32_e32 v6, v2
+; GFX6-NEXT: buffer_load_format_xyzw v[2:6], v7, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: s_mov_b32 s0, s2
@@ -658,7 +801,12 @@ define amdgpu_cs float @buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX8PLUS-LABEL: buffer_load_v4i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
-; GFX8PLUS-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
+; GFX8PLUS-NEXT: v_mov_b32_e32 v7, 2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v6, v2
+; GFX8PLUS-NEXT: buffer_load_format_xyzw v[2:6], v7, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
; GFX8PLUS-NEXT: v_mov_b32_e32 v0, v6
@@ -667,22 +815,40 @@ define amdgpu_cs float @buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
;
; GFX11-LABEL: buffer_load_v4i32_tfe:
; GFX11: ; %bb.0:
-; GFX11-NEXT: v_mov_b32_e32 v2, 0
-; GFX11-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
+; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v7, 2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
+; GFX11-NEXT: v_mov_b32_e32 v6, v2
+; GFX11-NEXT: buffer_load_format_xyzw v[2:6], v7, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b128 v[0:1], v[2:5], off
; GFX11-NEXT: v_mov_b32_e32 v0, v6
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_v4i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v2, 2
+; NOPRT-NEXT: v_mov_b32_e32 v6, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b128 v[0:1], v[2:5], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v6
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_v4i32_tfe:
; GFX12: ; %bb.0:
-; GFX12-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], null idxen tfe
+; GFX12-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v7, 2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v2
+; GFX12-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v6, v2
+; GFX12-NEXT: buffer_load_format_xyzw v[2:6], v7, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b128 v[0:1], v[2:5], off
; GFX12-NEXT: v_mov_b32_e32 v0, v6
; GFX12-NEXT: ; return to shader part epilog
- %load = call { <4 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.format.sl_v4i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %load = call { <4 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.format.sl_v4i32i32s(<4 x i32> %rsrc, i32 2, i32 0, i32 0, i32 0)
%data = extractvalue { <4 x i32>, i32 } %load, 0
store <4 x i32> %data, ptr addrspace(1) %out
%status = extractvalue { <4 x i32>, i32 } %load, 1
@@ -694,6 +860,10 @@ define amdgpu_cs float @buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX6-LABEL: buffer_load_v4f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
+; GFX6-NEXT: v_mov_b32_e32 v6, v2
; GFX6-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -708,6 +878,10 @@ define amdgpu_cs float @buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX8PLUS-LABEL: buffer_load_v4f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v6, v2
; GFX8PLUS-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
@@ -718,15 +892,32 @@ define amdgpu_cs float @buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX11-LABEL: buffer_load_v4f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
+; GFX11-NEXT: v_mov_b32_e32 v6, v2
; GFX11-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b128 v[0:1], v[2:5], off
; GFX11-NEXT: v_mov_b32_e32 v0, v6
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_v4f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v6, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[2:6], v6, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b128 v[0:1], v[2:5], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v6
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_v4f32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v2
+; GFX12-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v6, v2
; GFX12-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b128 v[0:1], v[2:5], off
@@ -744,6 +935,9 @@ define amdgpu_cs float @buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX6-LABEL: buffer_load_v3i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -759,6 +953,9 @@ define amdgpu_cs float @buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX8PLUS-LABEL: buffer_load_v3i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
; GFX8PLUS-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx3 v[0:1], v[2:4]
@@ -769,15 +966,31 @@ define amdgpu_cs float @buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX11-LABEL: buffer_load_v3i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
; GFX11-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b96 v[0:1], v[2:4], off
; GFX11-NEXT: v_mov_b32_e32 v0, v5
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_v3i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v5, 0
+; NOPRT-NEXT: buffer_load_format_xyz v[2:5], v5, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b96 v[0:1], v[2:4], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v5
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_v3i32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v2
+; GFX12-NEXT: v_mov_b32_e32 v5, v2
; GFX12-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b96 v[0:1], v[2:4], off
@@ -795,6 +1008,9 @@ define amdgpu_cs float @buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX6-LABEL: buffer_load_v3f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -810,6 +1026,9 @@ define amdgpu_cs float @buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX8PLUS-LABEL: buffer_load_v3f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
; GFX8PLUS-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx3 v[0:1], v[2:4]
@@ -820,15 +1039,31 @@ define amdgpu_cs float @buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX11-LABEL: buffer_load_v3f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
; GFX11-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b96 v[0:1], v[2:4], off
; GFX11-NEXT: v_mov_b32_e32 v0, v5
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_v3f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v5, 0
+; NOPRT-NEXT: buffer_load_format_xyz v[2:5], v5, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b96 v[0:1], v[2:4], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v5
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_v3f32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v2
+; GFX12-NEXT: v_mov_b32_e32 v5, v2
; GFX12-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b96 v[0:1], v[2:4], off
@@ -846,6 +1081,9 @@ define amdgpu_cs float @buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX6-LABEL: buffer_load_v2i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -860,6 +1098,8 @@ define amdgpu_cs float @buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX8PLUS-LABEL: buffer_load_v2i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
; GFX8PLUS-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
@@ -870,15 +1110,29 @@ define amdgpu_cs float @buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX11-LABEL: buffer_load_v2i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
; GFX11-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX11-NEXT: v_mov_b32_e32 v0, v4
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_v2i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v4, 0
+; NOPRT-NEXT: buffer_load_format_xy v[2:4], v4, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b64 v[0:1], v[2:3], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v4
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_v2i32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v2
; GFX12-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off
@@ -896,6 +1150,9 @@ define amdgpu_cs float @buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX6-LABEL: buffer_load_v2f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -910,6 +1167,8 @@ define amdgpu_cs float @buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX8PLUS-LABEL: buffer_load_v2f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
; GFX8PLUS-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
@@ -920,15 +1179,29 @@ define amdgpu_cs float @buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX11-LABEL: buffer_load_v2f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
; GFX11-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX11-NEXT: v_mov_b32_e32 v0, v4
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_v2f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v4, 0
+; NOPRT-NEXT: buffer_load_format_xy v[2:4], v4, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b64 v[0:1], v[2:3], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v4
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_v2f32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v2
; GFX12-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off
@@ -946,6 +1219,7 @@ define amdgpu_cs float @buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace
; GFX6-LABEL: buffer_load_i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
; GFX6-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -960,6 +1234,7 @@ define amdgpu_cs float @buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace
; GFX8PLUS-LABEL: buffer_load_i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
; GFX8PLUS-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dword v[0:1], v2
@@ -970,15 +1245,28 @@ define amdgpu_cs float @buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace
; GFX11-LABEL: buffer_load_i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
; GFX11-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b32 v[0:1], v2, off
; GFX11-NEXT: v_mov_b32_e32 v0, v3
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v3, 0
+; NOPRT-NEXT: buffer_load_format_x v[2:3], v3, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b32 v[0:1], v2, off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v3
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_i32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
; GFX12-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b32 v[0:1], v2, off
@@ -996,6 +1284,7 @@ define amdgpu_cs float @buffer_load_f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace
; GFX6-LABEL: buffer_load_f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
; GFX6-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -1010,6 +1299,7 @@ define amdgpu_cs float @buffer_load_f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace
; GFX8PLUS-LABEL: buffer_load_f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
; GFX8PLUS-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dword v[0:1], v2
@@ -1020,15 +1310,28 @@ define amdgpu_cs float @buffer_load_f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace
; GFX11-LABEL: buffer_load_f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
; GFX11-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b32 v[0:1], v2, off
; GFX11-NEXT: v_mov_b32_e32 v0, v3
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v3, 0
+; NOPRT-NEXT: buffer_load_format_x v[2:3], v3, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b32 v[0:1], v2, off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v3
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_f32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
; GFX12-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b32 v[0:1], v2, off
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.format.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.format.ll
index b0bd4e4..c5202b8 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.format.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.format.ll
@@ -2,6 +2,7 @@
;RUN: llc < %s -mtriple=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefixes=GFX6 %s
;RUN: llc < %s -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefixes=GFX8PLUS %s
;RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs | FileCheck --check-prefixes=GFX11 %s
+;RUN: llc < %s -mtriple=amdgcn -mattr=-enable-prt-strict-null -mcpu=gfx1100 -verify-machineinstrs | FileCheck --check-prefixes=NOPRT %s
define amdgpu_ps {<4 x float>, <4 x float>, <4 x float>} @buffer_load(ptr addrspace(8) inreg) {
; GFX6-LABEL: buffer_load:
@@ -31,6 +32,16 @@ define amdgpu_ps {<4 x float>, <4 x float>, <4 x float>} @buffer_load(ptr addrsp
; GFX11-NEXT: buffer_load_format_xyzw v[8:11], v8, s[0:3], 0 idxen slc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v8, 0
+; NOPRT-NEXT: s_clause 0x2
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v8, s[0:3], 0 idxen
+; NOPRT-NEXT: buffer_load_format_xyzw v[4:7], v8, s[0:3], 0 idxen glc
+; NOPRT-NEXT: buffer_load_format_xyzw v[8:11], v8, s[0:3], 0 idxen slc
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 0, i32 0, i32 0)
%data_glc = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 0, i32 0, i32 1)
@@ -62,6 +73,13 @@ define amdgpu_ps <4 x float> @buffer_load_immoffs(ptr addrspace(8) inreg) {
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen offset:42
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_immoffs:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen offset:42
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 42, i32 0, i32 0)
ret <4 x float> %data
@@ -126,6 +144,25 @@ define amdgpu_ps <4 x float> @buffer_load_immoffs_large(ptr addrspace(8) inreg)
; GFX11-NEXT: v_dual_add_f32 v0, v8, v0 :: v_dual_add_f32 v3, v11, v3
; GFX11-NEXT: v_add_f32_e32 v2, v10, v2
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_immoffs_large:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v8, 0
+; NOPRT-NEXT: s_movk_i32 s4, 0x7ffc
+; NOPRT-NEXT: s_clause 0x1
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v8, s[0:3], 60 idxen offset:4092
+; NOPRT-NEXT: buffer_load_format_xyzw v[4:7], v8, s[0:3], s4 idxen offset:4092
+; NOPRT-NEXT: s_mov_b32 s4, 0x8ffc
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: v_add_f32_e32 v1, v1, v5
+; NOPRT-NEXT: buffer_load_format_xyzw v[8:11], v8, s[0:3], s4 idxen offset:4
+; NOPRT-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v3, v3, v7
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: v_dual_add_f32 v2, v2, v6 :: v_dual_add_f32 v1, v9, v1
+; NOPRT-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; NOPRT-NEXT: v_dual_add_f32 v0, v8, v0 :: v_dual_add_f32 v3, v11, v3
+; NOPRT-NEXT: v_add_f32_e32 v2, v10, v2
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%d.0 = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 4092, i32 60, i32 0)
%d.1 = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 4092, i32 32764, i32 0)
@@ -156,6 +193,13 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_12bit(ptr addrspace(8) i
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen offset:4092
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_voffset_large_12bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 4092, i32 0, i32 0)
ret <4 x float> %data
@@ -188,6 +232,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_13bit(ptr addrspace(8) i
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_voffset_large_13bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0x1000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 8188, i32 0, i32 0)
ret <4 x float> %data
@@ -220,6 +273,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_16bit(ptr addrspace(8) i
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_voffset_large_16bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0xf000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 65532, i32 0, i32 0)
ret <4 x float> %data
@@ -252,6 +314,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_23bit(ptr addrspace(8) i
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_voffset_large_23bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0x7ff000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 8388604, i32 0, i32 0)
ret <4 x float> %data
@@ -284,6 +355,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_24bit(ptr addrspace(8) i
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_voffset_large_24bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0xfff000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 16777212, i32 0, i32 0)
ret <4 x float> %data
@@ -307,6 +387,12 @@ define amdgpu_ps <4 x float> @buffer_load_idx(ptr addrspace(8) inreg, i32) {
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_idx:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 %1, i32 0, i32 0, i32 0)
ret <4 x float> %data
@@ -339,6 +425,15 @@ define amdgpu_ps <4 x float> @buffer_load_ofs(ptr addrspace(8) inreg, i32) {
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_ofs:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 %1, i32 0, i32 0)
ret <4 x float> %data
@@ -371,6 +466,15 @@ define amdgpu_ps <4 x float> @buffer_load_ofs_imm(ptr addrspace(8) inreg, i32) {
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:60
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_ofs_imm:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:60
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%ofs = add i32 %1, 60
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 %ofs, i32 0, i32 0)
@@ -395,6 +499,12 @@ define amdgpu_ps <4 x float> @buffer_load_both(ptr addrspace(8) inreg, i32, i32)
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_both:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 %1, i32 %2, i32 0, i32 0)
ret <4 x float> %data
@@ -421,6 +531,13 @@ define amdgpu_ps <4 x float> @buffer_load_both_reversed(ptr addrspace(8) inreg,
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[1:2], s[0:3], 0 idxen offen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_both_reversed:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v2, v0
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[1:2], s[0:3], 0 idxen offen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 %2, i32 %1, i32 0, i32 0)
ret <4 x float> %data
@@ -447,6 +564,13 @@ define amdgpu_ps float @buffer_load_x(ptr addrspace(8) inreg %rsrc) {
; GFX11-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_x:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call float @llvm.amdgcn.struct.ptr.buffer.load.format.f32(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
ret float %data
@@ -473,6 +597,13 @@ define amdgpu_ps float @buffer_load_x_i32(ptr addrspace(8) inreg %rsrc) {
; GFX11-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_x_i32:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call i32 @llvm.amdgcn.struct.ptr.buffer.load.format.i32(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%fdata = bitcast i32 %data to float
@@ -500,6 +631,13 @@ define amdgpu_ps <2 x float> @buffer_load_xy(ptr addrspace(8) inreg %rsrc) {
; GFX11-NEXT: buffer_load_format_xy v[0:1], v0, s[0:3], 0 idxen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_xy:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_xy v[0:1], v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <2 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v2f32(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
ret <2 x float> %data
@@ -509,6 +647,10 @@ define amdgpu_cs float @buffer_load_v4i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX6-LABEL: buffer_load_v4i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
+; GFX6-NEXT: v_mov_b32_e32 v6, v2
; GFX6-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -523,6 +665,10 @@ define amdgpu_cs float @buffer_load_v4i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX8PLUS-LABEL: buffer_load_v4i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v6, v2
; GFX8PLUS-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
@@ -533,11 +679,25 @@ define amdgpu_cs float @buffer_load_v4i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX11-LABEL: buffer_load_v4i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
+; GFX11-NEXT: v_mov_b32_e32 v6, v2
; GFX11-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b128 v[0:1], v[2:5], off
; GFX11-NEXT: v_mov_b32_e32 v0, v6
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_v4i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v6, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[2:6], v6, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b128 v[0:1], v[2:5], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v6
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { <4 x i32>, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_v4i32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <4 x i32>, i32 } %load, 0
store <4 x i32> %data, ptr addrspace(1) %out
@@ -550,6 +710,10 @@ define amdgpu_cs float @buffer_load_v4f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX6-LABEL: buffer_load_v4f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
+; GFX6-NEXT: v_mov_b32_e32 v6, v2
; GFX6-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -564,6 +728,10 @@ define amdgpu_cs float @buffer_load_v4f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX8PLUS-LABEL: buffer_load_v4f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v6, v2
; GFX8PLUS-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
@@ -574,11 +742,25 @@ define amdgpu_cs float @buffer_load_v4f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX11-LABEL: buffer_load_v4f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
+; GFX11-NEXT: v_mov_b32_e32 v6, v2
; GFX11-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b128 v[0:1], v[2:5], off
; GFX11-NEXT: v_mov_b32_e32 v0, v6
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_v4f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v6, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[2:6], v6, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b128 v[0:1], v[2:5], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v6
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { <4 x float>, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_v4f32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <4 x float>, i32 } %load, 0
store <4 x float> %data, ptr addrspace(1) %out
@@ -591,6 +773,9 @@ define amdgpu_cs float @buffer_load_v3i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX6-LABEL: buffer_load_v3i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -606,6 +791,9 @@ define amdgpu_cs float @buffer_load_v3i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX8PLUS-LABEL: buffer_load_v3i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
; GFX8PLUS-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx3 v[0:1], v[2:4]
@@ -616,11 +804,24 @@ define amdgpu_cs float @buffer_load_v3i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX11-LABEL: buffer_load_v3i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
; GFX11-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b96 v[0:1], v[2:4], off
; GFX11-NEXT: v_mov_b32_e32 v0, v5
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_v3i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v5, 0
+; NOPRT-NEXT: buffer_load_format_xyz v[2:5], v5, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b96 v[0:1], v[2:4], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v5
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { <3 x i32>, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_v3i32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <3 x i32>, i32 } %load, 0
store <3 x i32> %data, ptr addrspace(1) %out
@@ -633,6 +834,9 @@ define amdgpu_cs float @buffer_load_v3f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX6-LABEL: buffer_load_v3f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -648,6 +852,9 @@ define amdgpu_cs float @buffer_load_v3f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX8PLUS-LABEL: buffer_load_v3f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
; GFX8PLUS-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx3 v[0:1], v[2:4]
@@ -658,11 +865,24 @@ define amdgpu_cs float @buffer_load_v3f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX11-LABEL: buffer_load_v3f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
; GFX11-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b96 v[0:1], v[2:4], off
; GFX11-NEXT: v_mov_b32_e32 v0, v5
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_v3f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v5, 0
+; NOPRT-NEXT: buffer_load_format_xyz v[2:5], v5, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b96 v[0:1], v[2:4], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v5
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { <3 x float>, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_v3f32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <3 x float>, i32 } %load, 0
store <3 x float> %data, ptr addrspace(1) %out
@@ -675,6 +895,9 @@ define amdgpu_cs float @buffer_load_v2i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX6-LABEL: buffer_load_v2i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -689,6 +912,8 @@ define amdgpu_cs float @buffer_load_v2i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX8PLUS-LABEL: buffer_load_v2i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
; GFX8PLUS-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
@@ -699,11 +924,23 @@ define amdgpu_cs float @buffer_load_v2i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX11-LABEL: buffer_load_v2i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
; GFX11-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX11-NEXT: v_mov_b32_e32 v0, v4
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_v2i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v4, 0
+; NOPRT-NEXT: buffer_load_format_xy v[2:4], v4, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b64 v[0:1], v[2:3], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v4
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { <2 x i32>, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_v2i32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <2 x i32>, i32 } %load, 0
store <2 x i32> %data, ptr addrspace(1) %out
@@ -716,6 +953,9 @@ define amdgpu_cs float @buffer_load_v2f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX6-LABEL: buffer_load_v2f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -730,6 +970,8 @@ define amdgpu_cs float @buffer_load_v2f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX8PLUS-LABEL: buffer_load_v2f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
; GFX8PLUS-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
@@ -740,11 +982,23 @@ define amdgpu_cs float @buffer_load_v2f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX11-LABEL: buffer_load_v2f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
; GFX11-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX11-NEXT: v_mov_b32_e32 v0, v4
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_v2f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v4, 0
+; NOPRT-NEXT: buffer_load_format_xy v[2:4], v4, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b64 v[0:1], v[2:3], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v4
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { <2 x float>, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_v2f32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <2 x float>, i32 } %load, 0
store <2 x float> %data, ptr addrspace(1) %out
@@ -757,6 +1011,7 @@ define amdgpu_cs float @buffer_load_i32_tfe(ptr addrspace(8) inreg %rsrc, ptr ad
; GFX6-LABEL: buffer_load_i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
; GFX6-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -771,6 +1026,7 @@ define amdgpu_cs float @buffer_load_i32_tfe(ptr addrspace(8) inreg %rsrc, ptr ad
; GFX8PLUS-LABEL: buffer_load_i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
; GFX8PLUS-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dword v[0:1], v2
@@ -781,11 +1037,22 @@ define amdgpu_cs float @buffer_load_i32_tfe(ptr addrspace(8) inreg %rsrc, ptr ad
; GFX11-LABEL: buffer_load_i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
; GFX11-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b32 v[0:1], v2, off
; GFX11-NEXT: v_mov_b32_e32 v0, v3
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v3, 0
+; NOPRT-NEXT: buffer_load_format_x v[2:3], v3, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b32 v[0:1], v2, off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v3
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { i32, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_i32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { i32, i32 } %load, 0
store i32 %data, ptr addrspace(1) %out
@@ -798,6 +1065,7 @@ define amdgpu_cs float @buffer_load_f32_tfe(ptr addrspace(8) inreg %rsrc, ptr ad
; GFX6-LABEL: buffer_load_f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
; GFX6-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -812,6 +1080,7 @@ define amdgpu_cs float @buffer_load_f32_tfe(ptr addrspace(8) inreg %rsrc, ptr ad
; GFX8PLUS-LABEL: buffer_load_f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
; GFX8PLUS-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dword v[0:1], v2
@@ -822,11 +1091,22 @@ define amdgpu_cs float @buffer_load_f32_tfe(ptr addrspace(8) inreg %rsrc, ptr ad
; GFX11-LABEL: buffer_load_f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
; GFX11-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b32 v[0:1], v2, off
; GFX11-NEXT: v_mov_b32_e32 v0, v3
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v3, 0
+; NOPRT-NEXT: buffer_load_format_x v[2:3], v3, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b32 v[0:1], v2, off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v3
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { float, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_f32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { float, i32 } %load, 0
store float %data, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll
index ab7ab4d..d056a97 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll
@@ -32,8 +32,6 @@ define amdgpu_kernel void @maxnum_f16(
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_max_f32_e32 v0, v0, v1
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
@@ -170,7 +168,6 @@ define amdgpu_kernel void @maxnum_f16_imm_a(
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 0x40400000, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
@@ -279,7 +276,6 @@ define amdgpu_kernel void @maxnum_f16_imm_b(
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 4.0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
@@ -384,21 +380,17 @@ define amdgpu_kernel void @maxnum_v2f16(
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s1, s2, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s0
-; SI-NEXT: s_lshr_b32 s0, s0, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s0
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s1
-; SI-NEXT: v_cvt_f32_f16_e32 v0, s2
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_max_f32_e32 v2, v3, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: s_lshr_b32 s3, s0, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v0, s1
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s3
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s2
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s0
; SI-NEXT: v_max_f32_e32 v0, v0, v1
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_max_f32_e32 v1, v2, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
@@ -497,20 +489,18 @@ define amdgpu_kernel void @maxnum_v2f16_imm_a(
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_load_dword s2, s[2:3], 0x0
-; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v0, s2
-; SI-NEXT: s_lshr_b32 s2, s2, 16
+; SI-NEXT: s_lshr_b32 s3, s2, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v0, s3
; SI-NEXT: v_cvt_f32_f16_e32 v1, s2
+; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_max_f32_e32 v0, 0x40400000, v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_max_f32_e32 v1, 4.0, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_max_f32_e32 v0, 4.0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_max_f32_e32 v1, 0x40400000, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
@@ -589,20 +579,18 @@ define amdgpu_kernel void @maxnum_v2f16_imm_b(
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_load_dword s2, s[2:3], 0x0
-; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v0, s2
-; SI-NEXT: s_lshr_b32 s2, s2, 16
+; SI-NEXT: s_lshr_b32 s3, s2, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v0, s3
; SI-NEXT: v_cvt_f32_f16_e32 v1, s2
+; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_max_f32_e32 v0, 4.0, v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_max_f32_e32 v1, 0x40400000, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_max_f32_e32 v0, 0x40400000, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_max_f32_e32 v1, 4.0, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
@@ -688,27 +676,21 @@ define amdgpu_kernel void @maxnum_v3f16(
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, s3
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s2
-; SI-NEXT: s_lshr_b32 s2, s2, 16
-; SI-NEXT: s_lshr_b32 s3, s0, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s3
+; SI-NEXT: s_lshr_b32 s3, s2, 16
+; SI-NEXT: s_lshr_b32 s8, s0, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s3
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s8
; SI-NEXT: v_cvt_f32_f16_e32 v3, s2
-; SI-NEXT: v_cvt_f32_f16_e32 v5, s0
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; SI-NEXT: v_max_f32_e32 v2, v3, v2
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v5
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_max_f32_e32 v1, v1, v3
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v4
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-NEXT: v_max_f32_e32 v0, v0, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s0
+; SI-NEXT: v_cvt_f32_f16_e32 v5, s1
+; SI-NEXT: v_max_f32_e32 v1, v1, v2
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_max_f32_e32 v2, v3, v4
+; SI-NEXT: v_max_f32_e32 v0, v0, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT: v_or_b32_e32 v1, v1, v2
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0 offset:4
; SI-NEXT: buffer_store_dword v1, off, s[4:7], 0
; SI-NEXT: s_endpgm
@@ -837,25 +819,17 @@ define amdgpu_kernel void @maxnum_v4f16(
; SI-NEXT: v_cvt_f32_f16_e32 v2, s6
; SI-NEXT: s_lshr_b32 s6, s7, 16
; SI-NEXT: v_cvt_f32_f16_e32 v3, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
; SI-NEXT: s_lshr_b32 s6, s5, 16
+; SI-NEXT: s_lshr_b32 s4, s4, 16
; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s4
; SI-NEXT: v_cvt_f32_f16_e32 v1, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
-; SI-NEXT: s_lshr_b32 s4, s4, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s5
-; SI-NEXT: v_cvt_f32_f16_e32 v6, s4
-; SI-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v6, s5
; SI-NEXT: v_max_f32_e32 v3, v3, v5
-; SI-NEXT: v_mul_f32_e32 v5, 1.0, v7
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_max_f32_e32 v1, v1, v5
-; SI-NEXT: v_mul_f32_e32 v5, 1.0, v6
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; SI-NEXT: v_max_f32_e32 v2, v2, v5
-; SI-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; SI-NEXT: v_max_f32_e32 v2, v2, v7
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_max_f32_e32 v1, v1, v6
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_max_f32_e32 v0, v0, v4
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
@@ -986,20 +960,16 @@ define amdgpu_kernel void @fmax_v4f16_imm_a(
; SI-NEXT: v_cvt_f32_f16_e32 v1, s5
; SI-NEXT: s_lshr_b32 s5, s5, 16
; SI-NEXT: v_cvt_f32_f16_e32 v0, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s5
; SI-NEXT: s_lshr_b32 s4, s4, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s5
; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; SI-NEXT: v_max_f32_e32 v2, 4.0, v2
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3
; SI-NEXT: v_max_f32_e32 v1, 0x40400000, v1
+; SI-NEXT: v_max_f32_e32 v0, 0x41000000, v0
+; SI-NEXT: v_max_f32_e32 v2, 4.0, v2
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_max_f32_e32 v3, 2.0, v3
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: v_max_f32_e32 v0, 0x41000000, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v1, v1, v2
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.minnum.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.minnum.f16.ll
index b7370ce..f934a2d 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.minnum.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.minnum.f16.ll
@@ -32,8 +32,6 @@ define amdgpu_kernel void @minnum_f16_ieee(
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_min_f32_e32 v0, v0, v1
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
@@ -197,7 +195,6 @@ define amdgpu_kernel void @minnum_f16_imm_a(
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_min_f32_e32 v0, 0x40400000, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
@@ -305,7 +302,6 @@ define amdgpu_kernel void @minnum_f16_imm_b(
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_min_f32_e32 v0, 4.0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
@@ -409,21 +405,17 @@ define amdgpu_kernel void @minnum_v2f16_ieee(
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s1, s2, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s0
-; SI-NEXT: s_lshr_b32 s0, s0, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s0
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s1
-; SI-NEXT: v_cvt_f32_f16_e32 v0, s2
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_min_f32_e32 v2, v3, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: s_lshr_b32 s3, s0, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v0, s1
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s3
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s2
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s0
; SI-NEXT: v_min_f32_e32 v0, v0, v1
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_min_f32_e32 v1, v2, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
@@ -556,20 +548,18 @@ define amdgpu_kernel void @minnum_v2f16_imm_a(
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_load_dword s2, s[2:3], 0x0
-; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v0, s2
-; SI-NEXT: s_lshr_b32 s2, s2, 16
+; SI-NEXT: s_lshr_b32 s3, s2, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v0, s3
; SI-NEXT: v_cvt_f32_f16_e32 v1, s2
+; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_min_f32_e32 v0, 0x40400000, v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_min_f32_e32 v1, 4.0, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_min_f32_e32 v0, 4.0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_min_f32_e32 v1, 0x40400000, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
@@ -647,20 +637,18 @@ define amdgpu_kernel void @minnum_v2f16_imm_b(
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_load_dword s2, s[2:3], 0x0
-; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v0, s2
-; SI-NEXT: s_lshr_b32 s2, s2, 16
+; SI-NEXT: s_lshr_b32 s3, s2, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v0, s3
; SI-NEXT: v_cvt_f32_f16_e32 v1, s2
+; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_min_f32_e32 v0, 4.0, v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_min_f32_e32 v1, 0x40400000, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_min_f32_e32 v0, 0x40400000, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_min_f32_e32 v1, 4.0, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
@@ -745,27 +733,21 @@ define amdgpu_kernel void @minnum_v3f16(
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, s3
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s2
-; SI-NEXT: s_lshr_b32 s2, s2, 16
-; SI-NEXT: s_lshr_b32 s3, s0, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s3
+; SI-NEXT: s_lshr_b32 s3, s2, 16
+; SI-NEXT: s_lshr_b32 s8, s0, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s3
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s8
; SI-NEXT: v_cvt_f32_f16_e32 v3, s2
-; SI-NEXT: v_cvt_f32_f16_e32 v5, s0
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; SI-NEXT: v_min_f32_e32 v2, v3, v2
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v5
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_min_f32_e32 v1, v1, v3
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v4
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-NEXT: v_min_f32_e32 v0, v0, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s0
+; SI-NEXT: v_cvt_f32_f16_e32 v5, s1
+; SI-NEXT: v_min_f32_e32 v1, v1, v2
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_min_f32_e32 v2, v3, v4
+; SI-NEXT: v_min_f32_e32 v0, v0, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT: v_or_b32_e32 v1, v1, v2
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0 offset:4
; SI-NEXT: buffer_store_dword v1, off, s[4:7], 0
; SI-NEXT: s_endpgm
@@ -893,25 +875,17 @@ define amdgpu_kernel void @minnum_v4f16(
; SI-NEXT: v_cvt_f32_f16_e32 v2, s6
; SI-NEXT: s_lshr_b32 s6, s7, 16
; SI-NEXT: v_cvt_f32_f16_e32 v3, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
; SI-NEXT: s_lshr_b32 s6, s5, 16
+; SI-NEXT: s_lshr_b32 s4, s4, 16
; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s4
; SI-NEXT: v_cvt_f32_f16_e32 v1, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
-; SI-NEXT: s_lshr_b32 s4, s4, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s5
-; SI-NEXT: v_cvt_f32_f16_e32 v6, s4
-; SI-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v6, s5
; SI-NEXT: v_min_f32_e32 v3, v3, v5
-; SI-NEXT: v_mul_f32_e32 v5, 1.0, v7
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_min_f32_e32 v1, v1, v5
-; SI-NEXT: v_mul_f32_e32 v5, 1.0, v6
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; SI-NEXT: v_min_f32_e32 v2, v2, v5
-; SI-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; SI-NEXT: v_min_f32_e32 v2, v2, v7
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_min_f32_e32 v1, v1, v6
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_min_f32_e32 v0, v0, v4
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
@@ -1041,20 +1015,16 @@ define amdgpu_kernel void @fmin_v4f16_imm_a(
; SI-NEXT: v_cvt_f32_f16_e32 v1, s5
; SI-NEXT: s_lshr_b32 s5, s5, 16
; SI-NEXT: v_cvt_f32_f16_e32 v0, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s5
; SI-NEXT: s_lshr_b32 s4, s4, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s5
; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; SI-NEXT: v_min_f32_e32 v2, 4.0, v2
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3
; SI-NEXT: v_min_f32_e32 v1, 0x40400000, v1
+; SI-NEXT: v_min_f32_e32 v0, 0x41000000, v0
+; SI-NEXT: v_min_f32_e32 v2, 4.0, v2
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_min_f32_e32 v3, 2.0, v3
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: v_min_f32_e32 v0, 0x41000000, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v1, v1, v2
diff --git a/llvm/test/CodeGen/AMDGPU/lto-lower-module-lds.ll b/llvm/test/CodeGen/AMDGPU/lto-lower-module-lds.ll
new file mode 100644
index 0000000..f1d9463
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/lto-lower-module-lds.ll
@@ -0,0 +1,47 @@
+
+; Default O0
+; RUN: opt -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -O0 -cg-opt-level 0 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Unified O0
+; RUN: opt -unified-lto -thinlto-split-lto-unit -thinlto-bc -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -unified-lto=full -O0 -cg-opt-level 0 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Default O1
+; RUN: opt -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -O1 -cg-opt-level 1 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Unified O1
+; RUN: opt -unified-lto -thinlto-split-lto-unit -thinlto-bc -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -unified-lto=full -O1 -cg-opt-level 1 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Default O2
+; RUN: opt -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -O2 -cg-opt-level 2 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Unified O2
+; RUN: opt -unified-lto -thinlto-split-lto-unit -thinlto-bc -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -unified-lto=full -O2 -cg-opt-level 2 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Default O3
+; RUN: opt -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -O3 -cg-opt-level 3 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Unified O3
+; RUN: opt -unified-lto -thinlto-split-lto-unit -thinlto-bc -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -unified-lto=full -O3 -cg-opt-level 3 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; First print will be from the New PM during the full LTO pipeline.
+; Second print will be from the legacy PM during the CG pipeline.
+
+; CHECK: Running pass: AMDGPULowerModuleLDSPass on [module]
+; CHECK: ModulePass Manager
+; CHECK: Lower uses of LDS variables from non-kernel functions
+
+@lds = internal unnamed_addr addrspace(3) global i32 poison, align 4
+
+define amdgpu_kernel void @test() {
+entry:
+ store i32 1, ptr addrspace(3) @lds
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/mad-mix-lo.ll b/llvm/test/CodeGen/AMDGPU/mad-mix-lo.ll
index fb3e79b..5b7f0e7 100644
--- a/llvm/test/CodeGen/AMDGPU/mad-mix-lo.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad-mix-lo.ll
@@ -951,56 +951,70 @@ define <3 x half> @v_mad_mix_v3f32_clamp_postcvt(<3 x half> %src0, <3 x half> %s
; SDAG-GFX1100-LABEL: v_mad_mix_v3f32_clamp_postcvt:
; SDAG-GFX1100: ; %bb.0:
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX1100-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1]
; SDAG-GFX1100-NEXT: v_fma_mixlo_f16 v1, v1, v3, v5 op_sel_hi:[1,1,1]
-; SDAG-GFX1100-NEXT: v_fma_mixlo_f16 v3, v0, v2, v4 op_sel_hi:[1,1,1] clamp
; SDAG-GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; SDAG-GFX1100-NEXT: v_pack_b32_f16 v1, v1, 0
-; SDAG-GFX1100-NEXT: v_fma_mixhi_f16 v3, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
+; SDAG-GFX1100-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1]
+; SDAG-GFX1100-NEXT: v_pack_b32_f16 v0, v1, 0
; SDAG-GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; SDAG-GFX1100-NEXT: v_pk_max_f16 v1, v1, v1 clamp
-; SDAG-GFX1100-NEXT: v_mov_b32_e32 v0, v3
+; SDAG-GFX1100-NEXT: v_pk_max_f16 v1, v6, 0
+; SDAG-GFX1100-NEXT: v_pk_max_f16 v2, v0, 0
+; SDAG-GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; SDAG-GFX1100-NEXT: v_pk_min_f16 v0, v1, 1.0 op_sel_hi:[1,0]
+; SDAG-GFX1100-NEXT: v_pk_min_f16 v1, v2, 1.0 op_sel_hi:[1,0]
; SDAG-GFX1100-NEXT: s_setpc_b64 s[30:31]
;
; SDAG-GFX900-LABEL: v_mad_mix_v3f32_clamp_postcvt:
; SDAG-GFX900: ; %bb.0:
; SDAG-GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX900-NEXT: v_mad_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1]
; SDAG-GFX900-NEXT: v_mad_mixlo_f16 v1, v1, v3, v5 op_sel_hi:[1,1,1]
-; SDAG-GFX900-NEXT: v_mad_mixlo_f16 v3, v0, v2, v4 op_sel_hi:[1,1,1] clamp
; SDAG-GFX900-NEXT: v_pack_b32_f16 v1, v1, 0
-; SDAG-GFX900-NEXT: v_mad_mixhi_f16 v3, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
-; SDAG-GFX900-NEXT: v_pk_max_f16 v1, v1, v1 clamp
-; SDAG-GFX900-NEXT: v_mov_b32_e32 v0, v3
+; SDAG-GFX900-NEXT: v_mad_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1]
+; SDAG-GFX900-NEXT: v_pk_max_f16 v1, v1, 0
+; SDAG-GFX900-NEXT: v_pk_max_f16 v0, v6, 0
+; SDAG-GFX900-NEXT: v_pk_min_f16 v0, v0, 1.0 op_sel_hi:[1,0]
+; SDAG-GFX900-NEXT: v_pk_min_f16 v1, v1, 1.0 op_sel_hi:[1,0]
; SDAG-GFX900-NEXT: s_setpc_b64 s[30:31]
;
; SDAG-GFX906-LABEL: v_mad_mix_v3f32_clamp_postcvt:
; SDAG-GFX906: ; %bb.0:
; SDAG-GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX906-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1]
; SDAG-GFX906-NEXT: v_fma_mixlo_f16 v1, v1, v3, v5 op_sel_hi:[1,1,1]
-; SDAG-GFX906-NEXT: v_fma_mixlo_f16 v3, v0, v2, v4 op_sel_hi:[1,1,1] clamp
; SDAG-GFX906-NEXT: v_pack_b32_f16 v1, v1, 0
-; SDAG-GFX906-NEXT: v_fma_mixhi_f16 v3, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
-; SDAG-GFX906-NEXT: v_pk_max_f16 v1, v1, v1 clamp
-; SDAG-GFX906-NEXT: v_mov_b32_e32 v0, v3
+; SDAG-GFX906-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1]
+; SDAG-GFX906-NEXT: v_pk_max_f16 v1, v1, 0
+; SDAG-GFX906-NEXT: v_pk_max_f16 v0, v6, 0
+; SDAG-GFX906-NEXT: v_pk_min_f16 v0, v0, 1.0 op_sel_hi:[1,0]
+; SDAG-GFX906-NEXT: v_pk_min_f16 v1, v1, 1.0 op_sel_hi:[1,0]
; SDAG-GFX906-NEXT: s_setpc_b64 s[30:31]
;
; SDAG-VI-LABEL: v_mad_mix_v3f32_clamp_postcvt:
; SDAG-VI: ; %bb.0:
; SDAG-VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v2, v2
; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v8, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SDAG-VI-NEXT: v_mac_f32_e32 v8, v6, v7
; SDAG-VI-NEXT: v_mac_f32_e32 v4, v0, v2
-; SDAG-VI-NEXT: v_cvt_f16_f32_sdwa v0, v8 clamp dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
-; SDAG-VI-NEXT: v_cvt_f16_f32_e64 v2, v4 clamp
; SDAG-VI-NEXT: v_mac_f32_e32 v5, v1, v3
-; SDAG-VI-NEXT: v_cvt_f16_f32_e64 v1, v5 clamp
+; SDAG-VI-NEXT: v_cvt_f16_f32_e32 v0, v8
+; SDAG-VI-NEXT: v_cvt_f16_f32_e32 v1, v4
+; SDAG-VI-NEXT: v_cvt_f16_f32_e32 v2, v5
+; SDAG-VI-NEXT: v_max_f16_e32 v0, 0, v0
+; SDAG-VI-NEXT: v_max_f16_e32 v3, 0, v1
+; SDAG-VI-NEXT: v_max_f16_e32 v1, 0, v2
+; SDAG-VI-NEXT: v_mov_b32_e32 v2, 0x3c00
+; SDAG-VI-NEXT: v_min_f16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; SDAG-VI-NEXT: v_min_f16_e32 v2, 1.0, v3
+; SDAG-VI-NEXT: v_min_f16_e32 v1, 1.0, v1
; SDAG-VI-NEXT: v_or_b32_e32 v0, v2, v0
; SDAG-VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1139,63 +1153,80 @@ define <3 x half> @v_mad_mix_v3f32_clamp_postcvt(<3 x half> %src0, <3 x half> %s
}
define <4 x half> @v_mad_mix_v4f32_clamp_postcvt(<4 x half> %src0, <4 x half> %src1, <4 x half> %src2) #0 {
-; GFX1100-LABEL: v_mad_mix_v4f32_clamp_postcvt:
-; GFX1100: ; %bb.0:
-; GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1100-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1] clamp
-; GFX1100-NEXT: v_fma_mixlo_f16 v7, v1, v3, v5 op_sel_hi:[1,1,1] clamp
-; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX1100-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
-; GFX1100-NEXT: v_fma_mixhi_f16 v7, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
-; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1100-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7
-; GFX1100-NEXT: s_setpc_b64 s[30:31]
+; SDAG-GFX1100-LABEL: v_mad_mix_v4f32_clamp_postcvt:
+; SDAG-GFX1100: ; %bb.0:
+; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX1100-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1]
+; SDAG-GFX1100-NEXT: v_fma_mixlo_f16 v7, v1, v3, v5 op_sel_hi:[1,1,1]
+; SDAG-GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; SDAG-GFX1100-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1]
+; SDAG-GFX1100-NEXT: v_fma_mixhi_f16 v7, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1]
+; SDAG-GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; SDAG-GFX1100-NEXT: v_pk_max_f16 v0, v6, 0
+; SDAG-GFX1100-NEXT: v_pk_max_f16 v1, v7, 0
+; SDAG-GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; SDAG-GFX1100-NEXT: v_pk_min_f16 v0, v0, 1.0 op_sel_hi:[1,0]
+; SDAG-GFX1100-NEXT: v_pk_min_f16 v1, v1, 1.0 op_sel_hi:[1,0]
+; SDAG-GFX1100-NEXT: s_setpc_b64 s[30:31]
;
-; GFX900-LABEL: v_mad_mix_v4f32_clamp_postcvt:
-; GFX900: ; %bb.0:
-; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-NEXT: v_mad_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1] clamp
-; GFX900-NEXT: v_mad_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
-; GFX900-NEXT: v_mad_mixlo_f16 v2, v1, v3, v5 op_sel_hi:[1,1,1] clamp
-; GFX900-NEXT: v_mad_mixhi_f16 v2, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
-; GFX900-NEXT: v_mov_b32_e32 v0, v6
-; GFX900-NEXT: v_mov_b32_e32 v1, v2
-; GFX900-NEXT: s_setpc_b64 s[30:31]
+; SDAG-GFX900-LABEL: v_mad_mix_v4f32_clamp_postcvt:
+; SDAG-GFX900: ; %bb.0:
+; SDAG-GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX900-NEXT: v_mad_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1]
+; SDAG-GFX900-NEXT: v_mad_mixlo_f16 v7, v1, v3, v5 op_sel_hi:[1,1,1]
+; SDAG-GFX900-NEXT: v_mad_mixhi_f16 v7, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1]
+; SDAG-GFX900-NEXT: v_mad_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1]
+; SDAG-GFX900-NEXT: v_pk_max_f16 v1, v7, 0
+; SDAG-GFX900-NEXT: v_pk_max_f16 v0, v6, 0
+; SDAG-GFX900-NEXT: v_pk_min_f16 v0, v0, 1.0 op_sel_hi:[1,0]
+; SDAG-GFX900-NEXT: v_pk_min_f16 v1, v1, 1.0 op_sel_hi:[1,0]
+; SDAG-GFX900-NEXT: s_setpc_b64 s[30:31]
;
-; GFX906-LABEL: v_mad_mix_v4f32_clamp_postcvt:
-; GFX906: ; %bb.0:
-; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX906-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1] clamp
-; GFX906-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
-; GFX906-NEXT: v_fma_mixlo_f16 v2, v1, v3, v5 op_sel_hi:[1,1,1] clamp
-; GFX906-NEXT: v_fma_mixhi_f16 v2, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
-; GFX906-NEXT: v_mov_b32_e32 v0, v6
-; GFX906-NEXT: v_mov_b32_e32 v1, v2
-; GFX906-NEXT: s_setpc_b64 s[30:31]
+; SDAG-GFX906-LABEL: v_mad_mix_v4f32_clamp_postcvt:
+; SDAG-GFX906: ; %bb.0:
+; SDAG-GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX906-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1]
+; SDAG-GFX906-NEXT: v_fma_mixlo_f16 v7, v1, v3, v5 op_sel_hi:[1,1,1]
+; SDAG-GFX906-NEXT: v_fma_mixhi_f16 v7, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1]
+; SDAG-GFX906-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1]
+; SDAG-GFX906-NEXT: v_pk_max_f16 v1, v7, 0
+; SDAG-GFX906-NEXT: v_pk_max_f16 v0, v6, 0
+; SDAG-GFX906-NEXT: v_pk_min_f16 v0, v0, 1.0 op_sel_hi:[1,0]
+; SDAG-GFX906-NEXT: v_pk_min_f16 v1, v1, 1.0 op_sel_hi:[1,0]
+; SDAG-GFX906-NEXT: s_setpc_b64 s[30:31]
;
; SDAG-VI-LABEL: v_mad_mix_v4f32_clamp_postcvt:
; SDAG-VI: ; %bb.0:
; SDAG-VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v7, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v9, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v9, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v3, v3
-; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v10, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v11, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v10, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v11, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SDAG-VI-NEXT: v_mac_f32_e32 v10, v7, v9
; SDAG-VI-NEXT: v_mac_f32_e32 v11, v6, v8
-; SDAG-VI-NEXT: v_mac_f32_e32 v5, v1, v3
; SDAG-VI-NEXT: v_mac_f32_e32 v4, v0, v2
-; SDAG-VI-NEXT: v_cvt_f16_f32_sdwa v0, v11 clamp dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
-; SDAG-VI-NEXT: v_cvt_f16_f32_sdwa v1, v10 clamp dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
-; SDAG-VI-NEXT: v_cvt_f16_f32_e64 v2, v4 clamp
-; SDAG-VI-NEXT: v_cvt_f16_f32_e64 v3, v5 clamp
+; SDAG-VI-NEXT: v_mac_f32_e32 v5, v1, v3
+; SDAG-VI-NEXT: v_cvt_f16_f32_e32 v0, v10
+; SDAG-VI-NEXT: v_cvt_f16_f32_e32 v1, v11
+; SDAG-VI-NEXT: v_cvt_f16_f32_e32 v2, v4
+; SDAG-VI-NEXT: v_cvt_f16_f32_e32 v3, v5
+; SDAG-VI-NEXT: v_max_f16_e32 v0, 0, v0
+; SDAG-VI-NEXT: v_max_f16_e32 v1, 0, v1
+; SDAG-VI-NEXT: v_max_f16_e32 v2, 0, v2
+; SDAG-VI-NEXT: v_max_f16_e32 v3, 0, v3
+; SDAG-VI-NEXT: v_mov_b32_e32 v4, 0x3c00
+; SDAG-VI-NEXT: v_min_f16_sdwa v1, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; SDAG-VI-NEXT: v_min_f16_sdwa v0, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; SDAG-VI-NEXT: v_min_f16_e32 v3, 1.0, v3
+; SDAG-VI-NEXT: v_min_f16_e32 v2, 1.0, v2
; SDAG-VI-NEXT: v_or_b32_e32 v0, v2, v0
; SDAG-VI-NEXT: v_or_b32_e32 v1, v3, v1
; SDAG-VI-NEXT: s_setpc_b64 s[30:31]
@@ -1241,6 +1272,40 @@ define <4 x half> @v_mad_mix_v4f32_clamp_postcvt(<4 x half> %src0, <4 x half> %s
; SDAG-CI-NEXT: v_cvt_f32_f16_e64 v3, v3 clamp
; SDAG-CI-NEXT: s_setpc_b64 s[30:31]
;
+; GISEL-GFX1100-LABEL: v_mad_mix_v4f32_clamp_postcvt:
+; GISEL-GFX1100: ; %bb.0:
+; GISEL-GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX1100-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1] clamp
+; GISEL-GFX1100-NEXT: v_fma_mixlo_f16 v7, v1, v3, v5 op_sel_hi:[1,1,1] clamp
+; GISEL-GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GISEL-GFX1100-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
+; GISEL-GFX1100-NEXT: v_fma_mixhi_f16 v7, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
+; GISEL-GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GISEL-GFX1100-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7
+; GISEL-GFX1100-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX900-LABEL: v_mad_mix_v4f32_clamp_postcvt:
+; GISEL-GFX900: ; %bb.0:
+; GISEL-GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX900-NEXT: v_mad_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1] clamp
+; GISEL-GFX900-NEXT: v_mad_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
+; GISEL-GFX900-NEXT: v_mad_mixlo_f16 v2, v1, v3, v5 op_sel_hi:[1,1,1] clamp
+; GISEL-GFX900-NEXT: v_mad_mixhi_f16 v2, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
+; GISEL-GFX900-NEXT: v_mov_b32_e32 v0, v6
+; GISEL-GFX900-NEXT: v_mov_b32_e32 v1, v2
+; GISEL-GFX900-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX906-LABEL: v_mad_mix_v4f32_clamp_postcvt:
+; GISEL-GFX906: ; %bb.0:
+; GISEL-GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX906-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1] clamp
+; GISEL-GFX906-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
+; GISEL-GFX906-NEXT: v_fma_mixlo_f16 v2, v1, v3, v5 op_sel_hi:[1,1,1] clamp
+; GISEL-GFX906-NEXT: v_fma_mixhi_f16 v2, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
+; GISEL-GFX906-NEXT: v_mov_b32_e32 v0, v6
+; GISEL-GFX906-NEXT: v_mov_b32_e32 v1, v2
+; GISEL-GFX906-NEXT: s_setpc_b64 s[30:31]
+;
; GISEL-VI-LABEL: v_mad_mix_v4f32_clamp_postcvt:
; GISEL-VI: ; %bb.0:
; GISEL-VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/merge-buffer-gfx12.mir b/llvm/test/CodeGen/AMDGPU/merge-buffer-gfx12.mir
new file mode 100644
index 0000000..d7f5d1a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/merge-buffer-gfx12.mir
@@ -0,0 +1,1154 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -run-pass si-load-store-opt -o - %s | FileCheck -check-prefixes=GFX12 %s
+
+---
+name: buffer_load_dword_dwordx3
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx3
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx3_dword
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx3_dword
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_96 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub0_sub1_sub2
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub0_sub1
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx2
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dword
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dword
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET]].sub0_sub1
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET]].sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+
+name: buffer_load_dword_dword
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_32
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_32
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub1
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 16, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub0_sub1_sub2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub3
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[COPY6]].sub0_sub1
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY killed [[COPY6]].sub2
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY8]].sub0
+ ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY killed [[COPY8]].sub1
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 36, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET]].sub0_sub1
+ ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET]].sub2
+ ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY12]].sub0
+ ; GFX12-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY killed [[COPY12]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %10:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 20, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %11:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 24, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %12:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 28, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %13:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 36, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %14:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 40, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %15:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 44, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+#
+# buffer_store_dword
+#
+
+name: buffer_store_dword_xyz
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-LABEL: name: buffer_store_dword_xyz
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[REG_SEQUENCE1]], %subreg.sub1_sub2_sub3
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_96 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1, %6:vgpr_32, %subreg.sub2
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %7:vgpr_32, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORDX3_VBUFFER_OFFSET_exact %14:vreg_96, %13:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dwordx3_dword
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-LABEL: name: buffer_store_dwordx3_dword
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[COPY]], %subreg.sub3
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_96 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1, %6:vgpr_32, %subreg.sub2
+ BUFFER_STORE_DWORDX3_VBUFFER_OFFSET_exact %14:vreg_96, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %7:vgpr_32, %13:sgpr_128, $sgpr_null, 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dwordx2_dwordx2
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-LABEL: name: buffer_store_dwordx2_dwordx2
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1
+ ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE1]], %subreg.sub0_sub1, [[REG_SEQUENCE2]], %subreg.sub2_sub3
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE3]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_64 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1
+ %15:vreg_64 = REG_SEQUENCE %6:vgpr_32, %subreg.sub0, %7:vgpr_32, %subreg.sub1
+ BUFFER_STORE_DWORDX2_VBUFFER_OFFSET_exact %14:vreg_64, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ BUFFER_STORE_DWORDX2_VBUFFER_OFFSET_exact %15:vreg_64, %13:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dword_dwordx2
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-LABEL: name: buffer_store_dword_dwordx2
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, %10:vreg_64, %subreg.sub1_sub2
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_64 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %7:vgpr_32, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORDX2_VBUFFER_OFFSET_exact %15:vreg_64, %13:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dwordx2_dword
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-LABEL: name: buffer_store_dwordx2_dword
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[REG_SEQUENCE1]], %subreg.sub0_sub1, [[COPY]], %subreg.sub2
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_64 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1
+ BUFFER_STORE_DWORDX2_VBUFFER_OFFSET_exact %14:vreg_64, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %7:vgpr_32, %13:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dword_dword
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-LABEL: name: buffer_store_dword_dword
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE1]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %6:vgpr_32, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %7:vgpr_32, %13:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dword_32
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
+ ; GFX12-LABEL: name: buffer_store_dword_32
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr8
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr7
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr6
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY12:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY12]], %subreg.sub0, [[COPY11]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY9]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE1]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE killed [[REG_SEQUENCE2]], %subreg.sub0_sub1, [[COPY4]], %subreg.sub2
+ ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE killed [[REG_SEQUENCE3]], %subreg.sub0_sub1_sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE4]], [[REG_SEQUENCE]], $sgpr_null, 16, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ ; GFX12-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX12-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:vreg_96 = REG_SEQUENCE killed [[REG_SEQUENCE5]], %subreg.sub0_sub1, [[COPY]], %subreg.sub2
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE6]], [[REG_SEQUENCE]], $sgpr_null, 36, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ %12:vgpr_32 = COPY $vgpr8
+ %11:vgpr_32 = COPY $vgpr7
+ %10:vgpr_32 = COPY $vgpr6
+ %9:vgpr_32 = COPY $vgpr5
+ %8:vgpr_32 = COPY $vgpr4
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %4:vgpr_32, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %5:vgpr_32, %13:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %6:vgpr_32, %13:sgpr_128, $sgpr_null, 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %7:vgpr_32, %13:sgpr_128, $sgpr_null, 20, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %8:vgpr_32, %13:sgpr_128, $sgpr_null, 24, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %9:vgpr_32, %13:sgpr_128, $sgpr_null, 28, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %10:vgpr_32, %13:sgpr_128, $sgpr_null, 36, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %11:vgpr_32, %13:sgpr_128, $sgpr_null, 40, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %12:vgpr_32, %13:sgpr_128, $sgpr_null, 44, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_not_merged_swizzled_0
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_not_merged_swizzled_0
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_not_merged_swizzled_1
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_not_merged_swizzled_1
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 8, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_merge_across_swizzle
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_merge_across_swizzle
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub1
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 12, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %5:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %4:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %6:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %4:sgpr_128, $sgpr_null, 12, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %4:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_not_merge_across_swizzled_store
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_not_merge_across_swizzled_store
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 6, 0, 1, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %6:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %4:vgpr_32, %5:sgpr_128, $sgpr_null, 6, 0, 1, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_merge_across_swizzled_store
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_merge_across_swizzled_store
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub1
+ ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 12, 0, 1, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %6:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %4:vgpr_32, %5:sgpr_128, $sgpr_null, 12, 0, 1, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_idxen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_idxen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_idxen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx2_idxen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_idxen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_idxen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx3_idxen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_bothen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_bothen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_bothen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx2_bothen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_bothen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_bothen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx3_bothen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_idxen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_idxen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_idxen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx2_idxen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_idxen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_idxen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx3_idxen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_idxen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_dword_idxen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact]].sub2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY5]].sub0
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY killed [[COPY5]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_idxen_exact_swizzled_0
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_dword_idxen_exact_swizzled_0
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_bothen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_bothen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_bothen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx2_bothen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen_exact
+body: |
+ bb.0.entry:
+
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_bothen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_bothen_exact
+body: |
+ bb.0.entry:
+
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx3_bothen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_bothen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_dword_bothen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact]].sub2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY5]].sub0
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY killed [[COPY5]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_bothen_exact_swizzled_0
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_dword_bothen_exact_swizzled_0
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_vaddr
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_vaddr
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact [[COPY5]], [[REG_SEQUENCE]], $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:vreg_64 = COPY $vgpr1
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %4, %6:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %5, %6:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_srsrc
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_srsrc
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr4
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact [[COPY5]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact [[COPY5]], [[REG_SEQUENCE1]], $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_32 = COPY $sgpr4
+ %5:vreg_64 = COPY $vgpr0
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:sgpr_128 = REG_SEQUENCE %1:sgpr_32, %subreg.sub0, %2:sgpr_32, %subreg.sub1, %3:sgpr_32, %subreg.sub2, %4:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %5, %6:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %5, %7:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_vaddr
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_vaddr
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact [[COPY5]], [[REG_SEQUENCE]], $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:vgpr_32 = COPY $vgpr1
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %4, %6:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %5, %6:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_srsrc
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_srsrc
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr4
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact [[COPY5]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact [[COPY5]], [[REG_SEQUENCE1]], $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_32 = COPY $sgpr4
+ %5:vgpr_32 = COPY $vgpr0
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:sgpr_128 = REG_SEQUENCE %1:sgpr_32, %subreg.sub0, %2:sgpr_32, %subreg.sub1, %3:sgpr_32, %subreg.sub2, %4:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %5, %6:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %5, %7:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
diff --git a/llvm/test/CodeGen/AMDGPU/merge-buffer.mir b/llvm/test/CodeGen/AMDGPU/merge-buffer.mir
new file mode 100644
index 0000000..1c6d429
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/merge-buffer.mir
@@ -0,0 +1,1130 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
+# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs -run-pass si-load-store-opt -o - %s | FileCheck -check-prefixes=GCN %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs -run-pass si-load-store-opt -o - %s | FileCheck -check-prefixes=GCN %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs -run-pass si-load-store-opt -o - %s | FileCheck -check-prefixes=GCN %s
+
+---
+name: buffer_load_dword_dwordx3
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx3
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_OFFSET]].sub0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_OFFSET]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_OFFSET %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx3_dword
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx3_dword
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_96 = COPY [[BUFFER_LOAD_DWORDX4_OFFSET]].sub0_sub1_sub2
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX4_OFFSET]].sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_96 = BUFFER_LOAD_DWORDX3_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_OFFSET]].sub0_sub1
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_OFFSET]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx2
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_OFFSET]].sub0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_OFFSET]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dword
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dword
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_OFFSET]].sub0_sub1
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_OFFSET]].sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+
+name: buffer_load_dword_dword
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_OFFSET]].sub0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_OFFSET]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_32
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_32
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_OFFSET]].sub0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_OFFSET]].sub1
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE]], 0, 16, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY [[BUFFER_LOAD_DWORDX4_OFFSET]].sub0_sub1_sub2
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX4_OFFSET]].sub3
+ ; GCN-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[COPY6]].sub0_sub1
+ ; GCN-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY killed [[COPY6]].sub2
+ ; GCN-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY8]].sub0
+ ; GCN-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY killed [[COPY8]].sub1
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_OFFSET [[REG_SEQUENCE]], 0, 36, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_OFFSET]].sub0_sub1
+ ; GCN-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_OFFSET]].sub2
+ ; GCN-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY12]].sub0
+ ; GCN-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY killed [[COPY12]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %10:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 20, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %11:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 24, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %12:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 28, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %13:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 36, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %14:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 40, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %15:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 44, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+#
+# buffer_store_dword
+#
+
+name: buffer_store_dword_xyz
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: buffer_store_dword_xyz
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[REG_SEQUENCE1]], %subreg.sub1_sub2_sub3
+ ; GCN-NEXT: BUFFER_STORE_DWORDX4_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_96 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1, %6:vgpr_32, %subreg.sub2
+ BUFFER_STORE_DWORD_OFFSET_exact %7:vgpr_32, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORDX3_OFFSET_exact %14:vreg_96, %13:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dwordx3_dword
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: buffer_store_dwordx3_dword
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[COPY]], %subreg.sub3
+ ; GCN-NEXT: BUFFER_STORE_DWORDX4_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_96 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1, %6:vgpr_32, %subreg.sub2
+ BUFFER_STORE_DWORDX3_OFFSET_exact %14:vreg_96, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %7:vgpr_32, %13:sgpr_128, 0, 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dwordx2_dwordx2
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: buffer_store_dwordx2_dwordx2
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE1]], %subreg.sub0_sub1, [[REG_SEQUENCE2]], %subreg.sub2_sub3
+ ; GCN-NEXT: BUFFER_STORE_DWORDX4_OFFSET_exact killed [[REG_SEQUENCE3]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_64 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1
+ %15:vreg_64 = REG_SEQUENCE %6:vgpr_32, %subreg.sub0, %7:vgpr_32, %subreg.sub1
+ BUFFER_STORE_DWORDX2_OFFSET_exact %14:vreg_64, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ BUFFER_STORE_DWORDX2_OFFSET_exact %15:vreg_64, %13:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dword_dwordx2
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: buffer_store_dword_dwordx2
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, %10:vreg_64, %subreg.sub1_sub2
+ ; GCN-NEXT: BUFFER_STORE_DWORDX3_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_64 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1
+ BUFFER_STORE_DWORD_OFFSET_exact %7:vgpr_32, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORDX2_OFFSET_exact %15:vreg_64, %13:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dwordx2_dword
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: buffer_store_dwordx2_dword
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[REG_SEQUENCE1]], %subreg.sub0_sub1, [[COPY]], %subreg.sub2
+ ; GCN-NEXT: BUFFER_STORE_DWORDX3_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_64 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1
+ BUFFER_STORE_DWORDX2_OFFSET_exact %14:vreg_64, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %7:vgpr_32, %13:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dword_dword
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: buffer_store_dword_dword
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1
+ ; GCN-NEXT: BUFFER_STORE_DWORDX2_OFFSET_exact killed [[REG_SEQUENCE1]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ BUFFER_STORE_DWORD_OFFSET_exact %6:vgpr_32, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %7:vgpr_32, %13:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dword_32
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
+ ; GCN-LABEL: name: buffer_store_dword_32
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr8
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr7
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr6
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY9:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY10:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY11:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY12:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY12]], %subreg.sub0, [[COPY11]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY9]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GCN-NEXT: BUFFER_STORE_DWORDX2_OFFSET_exact killed [[REG_SEQUENCE1]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE killed [[REG_SEQUENCE2]], %subreg.sub0_sub1, [[COPY4]], %subreg.sub2
+ ; GCN-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE killed [[REG_SEQUENCE3]], %subreg.sub0_sub1_sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: BUFFER_STORE_DWORDX4_OFFSET_exact killed [[REG_SEQUENCE4]], [[REG_SEQUENCE]], 0, 16, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ ; GCN-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:vreg_96 = REG_SEQUENCE killed [[REG_SEQUENCE5]], %subreg.sub0_sub1, [[COPY]], %subreg.sub2
+ ; GCN-NEXT: BUFFER_STORE_DWORDX3_OFFSET_exact killed [[REG_SEQUENCE6]], [[REG_SEQUENCE]], 0, 36, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ %12:vgpr_32 = COPY $vgpr8
+ %11:vgpr_32 = COPY $vgpr7
+ %10:vgpr_32 = COPY $vgpr6
+ %9:vgpr_32 = COPY $vgpr5
+ %8:vgpr_32 = COPY $vgpr4
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ BUFFER_STORE_DWORD_OFFSET_exact %4:vgpr_32, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %5:vgpr_32, %13:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %6:vgpr_32, %13:sgpr_128, 0, 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %7:vgpr_32, %13:sgpr_128, 0, 20, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %8:vgpr_32, %13:sgpr_128, 0, 24, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %9:vgpr_32, %13:sgpr_128, 0, 28, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %10:vgpr_32, %13:sgpr_128, 0, 36, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %11:vgpr_32, %13:sgpr_128, 0, 40, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %12:vgpr_32, %13:sgpr_128, 0, 44, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_not_merged_swizzled_0
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_not_merged_swizzled_0
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_not_merged_swizzled_1
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_not_merged_swizzled_1
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 8, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 8, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_merge_across_swizzle
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_merge_across_swizzle
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_OFFSET]].sub0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_OFFSET]].sub1
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 12, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %5:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %4:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %6:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %4:sgpr_128, 0, 12, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %4:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_merge_across_swizzled_store
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_merge_across_swizzled_store
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: BUFFER_STORE_DWORD_OFFSET_exact [[COPY4]], [[REG_SEQUENCE]], 0, 6, 0, 1, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %6:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %4:vgpr_32, %5:sgpr_128, 0, 6, 0, 1, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_idxen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_idxen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_IDXEN]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_IDXEN]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_idxen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx2_idxen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_IDXEN [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_IDXEN]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_IDXEN]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_idxen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_IDXEN [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_IDXEN]].sub0_sub1
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_IDXEN]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN %4, %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_idxen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx3_idxen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_IDXEN [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_IDXEN]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_IDXEN]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_IDXEN %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_bothen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_bothen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_BOTHEN]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_BOTHEN]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_bothen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx2_bothen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_BOTHEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_BOTHEN [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_BOTHEN]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_BOTHEN]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_bothen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_BOTHEN [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_BOTHEN]].sub0_sub1
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_BOTHEN]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_bothen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx3_bothen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_BOTHEN [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_BOTHEN]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_BOTHEN]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_BOTHEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_idxen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_idxen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_IDXEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_IDXEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_idxen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx2_idxen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_IDXEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_IDXEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_IDXEN_exact]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_idxen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_IDXEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_IDXEN_exact]].sub0_sub1
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_IDXEN_exact]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %4, %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_idxen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx3_idxen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_IDXEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_IDXEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_IDXEN_exact]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_IDXEN_exact %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_idxen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_dword_idxen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_IDXEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_IDXEN_exact]].sub0_sub1
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_IDXEN_exact]].sub2
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY5]].sub0
+ ; GCN-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY killed [[COPY5]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_idxen_exact_swizzled_0
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_dword_idxen_exact_swizzled_0
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_IDXEN_exact:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_IDXEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_IDXEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_bothen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_bothen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_BOTHEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_BOTHEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_bothen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx2_bothen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_BOTHEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_BOTHEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_BOTHEN_exact]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen_exact
+body: |
+ bb.0.entry:
+
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_bothen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_BOTHEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_BOTHEN_exact]].sub0_sub1
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_BOTHEN_exact]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_bothen_exact
+body: |
+ bb.0.entry:
+
+ ; GCN-LABEL: name: buffer_load_dword_dwordx3_bothen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_BOTHEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_BOTHEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_BOTHEN_exact]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_BOTHEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_bothen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_dword_bothen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_BOTHEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_BOTHEN_exact]].sub0_sub1
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_BOTHEN_exact]].sub2
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY5]].sub0
+ ; GCN-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY killed [[COPY5]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_bothen_exact_swizzled_0
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_dword_bothen_exact_swizzled_0
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_BOTHEN_exact:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_BOTHEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_BOTHEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_vaddr
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_vaddr
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr1
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact [[COPY5]], [[REG_SEQUENCE]], 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:vreg_64 = COPY $vgpr1
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %4, %6:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %5, %6:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_srsrc
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_srsrc
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr4
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact [[COPY5]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact [[COPY5]], [[REG_SEQUENCE1]], 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_32 = COPY $sgpr4
+ %5:vreg_64 = COPY $vgpr0
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:sgpr_128 = REG_SEQUENCE %1:sgpr_32, %subreg.sub0, %2:sgpr_32, %subreg.sub1, %3:sgpr_32, %subreg.sub2, %4:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %5, %6:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %5, %7:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_vaddr
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_vaddr
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact [[COPY5]], [[REG_SEQUENCE]], 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:vgpr_32 = COPY $vgpr1
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %4, %6:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %5, %6:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_srsrc
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_srsrc
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr4
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact [[COPY5]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact [[COPY5]], [[REG_SEQUENCE1]], 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_32 = COPY $sgpr4
+ %5:vgpr_32 = COPY $vgpr0
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:sgpr_128 = REG_SEQUENCE %1:sgpr_32, %subreg.sub0, %2:sgpr_32, %subreg.sub1, %3:sgpr_32, %subreg.sub2, %4:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %5, %6:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %5, %7:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
diff --git a/llvm/test/CodeGen/AMDGPU/merge-tbuffer.mir b/llvm/test/CodeGen/AMDGPU/merge-tbuffer.mir
index c86b5ad..9766b42 100644
--- a/llvm/test/CodeGen/AMDGPU/merge-tbuffer.mir
+++ b/llvm/test/CodeGen/AMDGPU/merge-tbuffer.mir
@@ -7,9 +7,37 @@
# GFX9 tests
#
+---
name: gfx9_tbuffer_load_x_xyz
body: |
bb.0.entry:
+ ; GFX9-LABEL: name: gfx9_tbuffer_load_x_xyz
+ ; GFX9: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX9-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX9-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX9-NEXT: [[TBUFFER_LOAD_FORMAT_XYZW_OFFSET:%[0-9]+]]:vreg_128 = TBUFFER_LOAD_FORMAT_XYZW_OFFSET [[REG_SEQUENCE]], 0, 4, 126, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GFX9-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[TBUFFER_LOAD_FORMAT_XYZW_OFFSET]].sub0
+ ; GFX9-NEXT: [[COPY5:%[0-9]+]]:vreg_96 = COPY killed [[TBUFFER_LOAD_FORMAT_XYZW_OFFSET]].sub1_sub2_sub3
+ ;
+ ; GFX10-LABEL: name: gfx9_tbuffer_load_x_xyz
+ ; GFX10: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX10-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX10-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX10-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX10-NEXT: [[TBUFFER_LOAD_FORMAT_X_OFFSET:%[0-9]+]]:vgpr_32 = TBUFFER_LOAD_FORMAT_X_OFFSET [[REG_SEQUENCE]], 0, 4, 116, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX10-NEXT: [[TBUFFER_LOAD_FORMAT_XYZ_OFFSET:%[0-9]+]]:vreg_96 = TBUFFER_LOAD_FORMAT_XYZ_OFFSET [[REG_SEQUENCE]], 0, 8, 125, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ;
+ ; GFX11-LABEL: name: gfx9_tbuffer_load_x_xyz
+ ; GFX11: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[TBUFFER_LOAD_FORMAT_X_OFFSET:%[0-9]+]]:vgpr_32 = TBUFFER_LOAD_FORMAT_X_OFFSET [[REG_SEQUENCE]], 0, 4, 116, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX11-NEXT: [[TBUFFER_LOAD_FORMAT_XYZ_OFFSET:%[0-9]+]]:vreg_96 = TBUFFER_LOAD_FORMAT_XYZ_OFFSET [[REG_SEQUENCE]], 0, 8, 125, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
%0:sgpr_32 = COPY $sgpr0
%1:sgpr_32 = COPY $sgpr1
%2:sgpr_32 = COPY $sgpr2
diff --git a/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll b/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll
index cbdc7bb..69971bc 100644
--- a/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll
+++ b/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll
@@ -27,7 +27,7 @@ define internal fastcc void @csr_vgpr_spill_fp_callee() #0 {
; CHECK-LABEL: csr_vgpr_spill_fp_callee:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_mov_b32 s24, s33
+; CHECK-NEXT: s_mov_b32 s18, s33
; CHECK-NEXT: s_mov_b32 s33, s32
; CHECK-NEXT: s_xor_saveexec_b64 s[16:17], -1
; CHECK-NEXT: buffer_store_dword v1, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
@@ -43,7 +43,6 @@ define internal fastcc void @csr_vgpr_spill_fp_callee() #0 {
; CHECK-NEXT: s_mov_b64 s[20:21], s[0:1]
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; clobber csr v40
@@ -55,7 +54,7 @@ define internal fastcc void @csr_vgpr_spill_fp_callee() #0 {
; CHECK-NEXT: buffer_load_dword v1, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload
; CHECK-NEXT: s_mov_b64 exec, s[4:5]
; CHECK-NEXT: s_add_i32 s32, s32, 0xfffffc00
-; CHECK-NEXT: s_mov_b32 s33, s24
+; CHECK-NEXT: s_mov_b32 s33, s18
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
bb:
@@ -88,7 +87,6 @@ define amdgpu_kernel void @kernel_call() {
; CHECK-NEXT: ; implicit-def: $sgpr15
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: s_endpgm
bb:
@@ -148,7 +146,6 @@ define amdgpu_kernel void @kernel_tailcall() {
; CHECK-NEXT: ; implicit-def: $sgpr15
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: s_endpgm
bb:
@@ -173,7 +170,7 @@ define hidden i32 @caller_save_vgpr_spill_fp_tail_call() #0 {
; CHECK-LABEL: caller_save_vgpr_spill_fp_tail_call:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_mov_b32 s24, s33
+; CHECK-NEXT: s_mov_b32 s18, s33
; CHECK-NEXT: s_mov_b32 s33, s32
; CHECK-NEXT: s_xor_saveexec_b64 s[16:17], -1
; CHECK-NEXT: buffer_store_dword v1, off, s[0:3], s33 ; 4-byte Folded Spill
@@ -188,7 +185,6 @@ define hidden i32 @caller_save_vgpr_spill_fp_tail_call() #0 {
; CHECK-NEXT: s_mov_b64 s[20:21], s[0:1]
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: v_readlane_b32 s31, v1, 1
; CHECK-NEXT: v_readlane_b32 s30, v1, 0
@@ -196,7 +192,7 @@ define hidden i32 @caller_save_vgpr_spill_fp_tail_call() #0 {
; CHECK-NEXT: buffer_load_dword v1, off, s[0:3], s33 ; 4-byte Folded Reload
; CHECK-NEXT: s_mov_b64 exec, s[4:5]
; CHECK-NEXT: s_add_i32 s32, s32, 0xfffffc00
-; CHECK-NEXT: s_mov_b32 s33, s24
+; CHECK-NEXT: s_mov_b32 s33, s18
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
entry:
@@ -208,7 +204,7 @@ define hidden i32 @caller_save_vgpr_spill_fp() #0 {
; CHECK-LABEL: caller_save_vgpr_spill_fp:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_mov_b32 s25, s33
+; CHECK-NEXT: s_mov_b32 s19, s33
; CHECK-NEXT: s_mov_b32 s33, s32
; CHECK-NEXT: s_xor_saveexec_b64 s[16:17], -1
; CHECK-NEXT: buffer_store_dword v2, off, s[0:3], s33 ; 4-byte Folded Spill
@@ -223,7 +219,6 @@ define hidden i32 @caller_save_vgpr_spill_fp() #0 {
; CHECK-NEXT: s_mov_b64 s[20:21], s[0:1]
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: v_readlane_b32 s31, v2, 1
; CHECK-NEXT: v_readlane_b32 s30, v2, 0
@@ -231,7 +226,7 @@ define hidden i32 @caller_save_vgpr_spill_fp() #0 {
; CHECK-NEXT: buffer_load_dword v2, off, s[0:3], s33 ; 4-byte Folded Reload
; CHECK-NEXT: s_mov_b64 exec, s[4:5]
; CHECK-NEXT: s_add_i32 s32, s32, 0xfffffc00
-; CHECK-NEXT: s_mov_b32 s33, s25
+; CHECK-NEXT: s_mov_b32 s33, s19
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
entry:
@@ -263,7 +258,6 @@ define protected amdgpu_kernel void @kernel() {
; CHECK-NEXT: ; implicit-def: $sgpr15
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: s_endpgm
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/neighboring-mfma-padding.mir b/llvm/test/CodeGen/AMDGPU/neighboring-mfma-padding.mir
index 3de258b..bf2cf6a 100644
--- a/llvm/test/CodeGen/AMDGPU/neighboring-mfma-padding.mir
+++ b/llvm/test/CodeGen/AMDGPU/neighboring-mfma-padding.mir
@@ -5,6 +5,14 @@
# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -amdgpu-mfma-padding-ratio=75 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx908-PAD75 %s
# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -amdgpu-mfma-padding-ratio=100 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx908-PAD100 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx90a -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx90a-DEFAULT %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx90a -amdgpu-mfma-padding-ratio=50 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx90a-PAD50 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx90a -amdgpu-mfma-padding-ratio=100 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx90a-PAD100 %s
+
+# RUN: llc -mtriple=amdgcn -mcpu=gfx940 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx940-DEFAULT %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx940 -amdgpu-mfma-padding-ratio=50 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx940-PAD50 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx940 -amdgpu-mfma-padding-ratio=100 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx940-PAD100 %s
+
---
name: mfma_padding_2_pass
body: |
@@ -31,6 +39,35 @@ body: |
; gfx908-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
; gfx908-PAD100-NEXT: S_NOP 1
; gfx908-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_2_pass
+ ; gfx90a-DEFAULT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_2_pass
+ ; gfx90a-PAD50: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_NOP 0
+ ; gfx90a-PAD50-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_2_pass
+ ; gfx90a-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 1
+ ; gfx90a-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_2_pass
+ ; gfx940-DEFAULT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: S_NOP 1
+ ; gfx940-DEFAULT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_2_pass
+ ; gfx940-PAD50: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 1
+ ; gfx940-PAD50-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_2_pass
+ ; gfx940-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 1
+ ; gfx940-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
...
@@ -64,6 +101,40 @@ body: |
; gfx908-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
; gfx908-PAD100-NEXT: S_NOP 0
; gfx908-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_2_pass_1_intervening_valu
+ ; gfx90a-DEFAULT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_2_pass_1_intervening_valu
+ ; gfx90a-PAD50: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_2_pass_1_intervening_valu
+ ; gfx90a-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 0
+ ; gfx90a-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_2_pass_1_intervening_valu
+ ; gfx940-DEFAULT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: S_NOP 0
+ ; gfx940-DEFAULT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_2_pass_1_intervening_valu
+ ; gfx940-PAD50: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 0
+ ; gfx940-PAD50-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_2_pass_1_intervening_valu
+ ; gfx940-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 0
+ ; gfx940-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
$vgpr2 = V_MOV_B32_e32 1, implicit $exec
$agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
@@ -100,6 +171,41 @@ body: |
; gfx908-PAD100-NEXT: DBG_VALUE
; gfx908-PAD100-NEXT: S_NOP 1
; gfx908-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_2_pass_dbg
+ ; gfx90a-DEFAULT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: DBG_VALUE
+ ; gfx90a-DEFAULT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_2_pass_dbg
+ ; gfx90a-PAD50: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: DBG_VALUE
+ ; gfx90a-PAD50-NEXT: S_NOP 0
+ ; gfx90a-PAD50-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_2_pass_dbg
+ ; gfx90a-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: DBG_VALUE
+ ; gfx90a-PAD100-NEXT: S_NOP 1
+ ; gfx90a-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_2_pass_dbg
+ ; gfx940-DEFAULT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: DBG_VALUE
+ ; gfx940-DEFAULT-NEXT: S_NOP 1
+ ; gfx940-DEFAULT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_2_pass_dbg
+ ; gfx940-PAD50: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: DBG_VALUE
+ ; gfx940-PAD50-NEXT: S_NOP 1
+ ; gfx940-PAD50-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_2_pass_dbg
+ ; gfx940-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: DBG_VALUE
+ ; gfx940-PAD100-NEXT: S_NOP 1
+ ; gfx940-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
DBG_VALUE
$agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
@@ -132,6 +238,34 @@ body: |
; gfx908-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
; gfx908-PAD100-NEXT: S_NOP 7
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_8_pass
+ ; gfx90a-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_8_pass
+ ; gfx90a-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_NOP 3
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_8_pass
+ ; gfx90a-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 7
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_8_pass
+ ; gfx940-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_8_pass
+ ; gfx940-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 3
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_8_pass
+ ; gfx940-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 7
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
...
@@ -172,6 +306,46 @@ body: |
; gfx908-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
; gfx908-PAD100-NEXT: S_NOP 5
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_8_pass_2_intervening_valu
+ ; gfx90a-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_8_pass_2_intervening_valu
+ ; gfx90a-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_NOP 1
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_8_pass_2_intervening_valu
+ ; gfx90a-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 5
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_8_pass_2_intervening_valu
+ ; gfx940-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_8_pass_2_intervening_valu
+ ; gfx940-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 1
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_8_pass_2_intervening_valu
+ ; gfx940-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 5
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$vgpr2 = V_MOV_B32_e32 1, implicit $exec
$vgpr3 = V_MOV_B32_e32 1, implicit $exec
@@ -207,6 +381,36 @@ body: |
; gfx908-PAD100-NEXT: S_NOP 7
; gfx908-PAD100-NEXT: S_NOP 7
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_16_pass
+ ; gfx90a-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_16_pass
+ ; gfx90a-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_NOP 7
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_16_pass
+ ; gfx90a-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 7
+ ; gfx90a-PAD100-NEXT: S_NOP 7
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_16_pass
+ ; gfx940-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_16_pass
+ ; gfx940-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 7
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_16_pass
+ ; gfx940-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 7
+ ; gfx940-PAD100-NEXT: S_NOP 7
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
...
@@ -258,6 +462,60 @@ body: |
; gfx908-PAD100-NEXT: S_NOP 7
; gfx908-PAD100-NEXT: S_NOP 3
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_16_pass_4_intervening_valu
+ ; gfx90a-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_16_pass_4_intervening_valu
+ ; gfx90a-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_NOP 3
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_16_pass_4_intervening_valu
+ ; gfx90a-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 7
+ ; gfx90a-PAD100-NEXT: S_NOP 3
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_16_pass_4_intervening_valu
+ ; gfx940-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_16_pass_4_intervening_valu
+ ; gfx940-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 3
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_16_pass_4_intervening_valu
+ ; gfx940-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 7
+ ; gfx940-PAD100-NEXT: S_NOP 3
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$vgpr2 = V_MOV_B32_e32 1, implicit $exec
$vgpr3 = V_MOV_B32_e32 1, implicit $exec
@@ -369,6 +627,126 @@ body: |
; gfx908-PAD100-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
; gfx908-PAD100-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_16_pass_16_intervening_valu
+ ; gfx90a-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr6 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr7 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr8 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr9 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr10 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr11 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr12 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr13 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr14 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr15 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_16_pass_16_intervening_valu
+ ; gfx90a-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr6 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr7 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr8 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr9 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr10 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr11 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr12 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr13 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr14 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr15 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_16_pass_16_intervening_valu
+ ; gfx90a-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr6 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr7 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr8 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr9 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr10 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr11 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr12 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr13 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr14 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr15 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_16_pass_16_intervening_valu
+ ; gfx940-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr6 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr7 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr8 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr9 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr10 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr11 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr12 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr13 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr14 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr15 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_16_pass_16_intervening_valu
+ ; gfx940-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr6 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr7 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr8 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr9 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr10 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr11 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr12 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr13 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr14 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr15 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_16_pass_16_intervening_valu
+ ; gfx940-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr6 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr7 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr8 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr9 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr10 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr11 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr12 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr13 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr14 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr15 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$vgpr2 = V_MOV_B32_e32 1, implicit $exec
$vgpr3 = V_MOV_B32_e32 1, implicit $exec
@@ -414,6 +792,30 @@ body: |
; gfx908-PAD100-LABEL: name: mfma_padding_16_pass_occ_1
; gfx908-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_16_pass_occ_1
+ ; gfx90a-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_16_pass_occ_1
+ ; gfx90a-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_16_pass_occ_1
+ ; gfx90a-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_16_pass_occ_1
+ ; gfx940-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_16_pass_occ_1
+ ; gfx940-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_16_pass_occ_1
+ ; gfx940-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
...
@@ -506,6 +908,108 @@ body: |
; gfx908-PAD100-NEXT: S_NOP 7
; gfx908-PAD100-NEXT: S_NOP 5
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_16_pass_2_preds
+ ; gfx90a-DEFAULT: bb.0:
+ ; gfx90a-DEFAULT-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; gfx90a-DEFAULT-NEXT: {{ $}}
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
+ ; gfx90a-DEFAULT-NEXT: {{ $}}
+ ; gfx90a-DEFAULT-NEXT: bb.1:
+ ; gfx90a-DEFAULT-NEXT: successors: %bb.2(0x80000000)
+ ; gfx90a-DEFAULT-NEXT: {{ $}}
+ ; gfx90a-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: {{ $}}
+ ; gfx90a-DEFAULT-NEXT: bb.2:
+ ; gfx90a-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_16_pass_2_preds
+ ; gfx90a-PAD50: bb.0:
+ ; gfx90a-PAD50-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; gfx90a-PAD50-NEXT: {{ $}}
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
+ ; gfx90a-PAD50-NEXT: {{ $}}
+ ; gfx90a-PAD50-NEXT: bb.1:
+ ; gfx90a-PAD50-NEXT: successors: %bb.2(0x80000000)
+ ; gfx90a-PAD50-NEXT: {{ $}}
+ ; gfx90a-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: {{ $}}
+ ; gfx90a-PAD50-NEXT: bb.2:
+ ; gfx90a-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_NOP 5
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_16_pass_2_preds
+ ; gfx90a-PAD100: bb.0:
+ ; gfx90a-PAD100-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; gfx90a-PAD100-NEXT: {{ $}}
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
+ ; gfx90a-PAD100-NEXT: {{ $}}
+ ; gfx90a-PAD100-NEXT: bb.1:
+ ; gfx90a-PAD100-NEXT: successors: %bb.2(0x80000000)
+ ; gfx90a-PAD100-NEXT: {{ $}}
+ ; gfx90a-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: {{ $}}
+ ; gfx90a-PAD100-NEXT: bb.2:
+ ; gfx90a-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 7
+ ; gfx90a-PAD100-NEXT: S_NOP 5
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_16_pass_2_preds
+ ; gfx940-DEFAULT: bb.0:
+ ; gfx940-DEFAULT-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; gfx940-DEFAULT-NEXT: {{ $}}
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
+ ; gfx940-DEFAULT-NEXT: {{ $}}
+ ; gfx940-DEFAULT-NEXT: bb.1:
+ ; gfx940-DEFAULT-NEXT: successors: %bb.2(0x80000000)
+ ; gfx940-DEFAULT-NEXT: {{ $}}
+ ; gfx940-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: {{ $}}
+ ; gfx940-DEFAULT-NEXT: bb.2:
+ ; gfx940-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_16_pass_2_preds
+ ; gfx940-PAD50: bb.0:
+ ; gfx940-PAD50-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; gfx940-PAD50-NEXT: {{ $}}
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
+ ; gfx940-PAD50-NEXT: {{ $}}
+ ; gfx940-PAD50-NEXT: bb.1:
+ ; gfx940-PAD50-NEXT: successors: %bb.2(0x80000000)
+ ; gfx940-PAD50-NEXT: {{ $}}
+ ; gfx940-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: {{ $}}
+ ; gfx940-PAD50-NEXT: bb.2:
+ ; gfx940-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 5
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_16_pass_2_preds
+ ; gfx940-PAD100: bb.0:
+ ; gfx940-PAD100-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; gfx940-PAD100-NEXT: {{ $}}
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
+ ; gfx940-PAD100-NEXT: {{ $}}
+ ; gfx940-PAD100-NEXT: bb.1:
+ ; gfx940-PAD100-NEXT: successors: %bb.2(0x80000000)
+ ; gfx940-PAD100-NEXT: {{ $}}
+ ; gfx940-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: {{ $}}
+ ; gfx940-PAD100-NEXT: bb.2:
+ ; gfx940-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 7
+ ; gfx940-PAD100-NEXT: S_NOP 5
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
bb.0:
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
diff --git a/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll b/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll
index 34e67d0..9999cb9 100644
--- a/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll
+++ b/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll
@@ -32,7 +32,6 @@ define hidden void @_ZL3barv() #0 !dbg !1644 {
; CHECK-NEXT: s_mov_b64 s[20:21], s[0:1]
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: .Ltmp1:
diff --git a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll
new file mode 100644
index 0000000..538ce15
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll
@@ -0,0 +1,305 @@
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck %s
+
+; CHECK: .amdgpu_pal_metadata
+; CHECK-NEXT: ---
+; CHECK-NEXT: amdpal.pipelines:
+; CHECK-NEXT: - .api: Vulkan
+; CHECK-NEXT: .compute_registers:
+; CHECK-NEXT: .tg_size_en: true
+; CHECK-NEXT: .tgid_x_en: false
+; CHECK-NEXT: .tgid_y_en: false
+; CHECK-NEXT: .tgid_z_en: false
+; CHECK-NEXT: .tidig_comp_cnt: 0x1
+; CHECK-NEXT: .hardware_stages:
+; CHECK-NEXT: .cs:
+; CHECK-NEXT: .checksum_value: 0x9444d7d0
+; CHECK-NEXT: .debug_mode: 0
+; CHECK-NEXT: .excp_en: 0
+; CHECK-NEXT: .float_mode: 0xc0
+; CHECK-NEXT: .ieee_mode: true
+; CHECK-NEXT: .image_op: false
+; CHECK-NEXT: .lds_size: 0x200
+; CHECK-NEXT: .mem_ordered: true
+; CHECK-NEXT: .sgpr_limit: 0x6a
+; CHECK-NEXT: .threadgroup_dimensions:
+; CHECK-NEXT: - 0x1
+; CHECK-NEXT: - 0x400
+; CHECK-NEXT: - 0x1
+; CHECK-NEXT: .trap_present: false
+; CHECK-NEXT: .user_data_reg_map:
+; CHECK-NEXT: - 0x10000000
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: .user_sgprs: 0x3
+; CHECK-NEXT: .vgpr_limit: 0x100
+; CHECK-NEXT: .wavefront_size: 0x40
+; CHECK-NEXT: .wgp_mode: true
+; CHECK: .registers: {}
+; CHECK-NEXT: .shader_functions:
+; CHECK-NEXT: dynamic_stack:
+; CHECK-NEXT: .backend_stack_size: 0x10
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x22
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x10
+; CHECK-NEXT: .vgpr_count: 0x2
+; CHECK-NEXT: dynamic_stack_loop:
+; CHECK-NEXT: .backend_stack_size: 0x10
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x22
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x10
+; CHECK-NEXT: .vgpr_count: 0x3
+; CHECK-NEXT: multiple_stack:
+; CHECK-NEXT: .backend_stack_size: 0x24
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x21
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x24
+; CHECK-NEXT: .vgpr_count: 0x3
+; CHECK-NEXT: no_stack:
+; CHECK-NEXT: .backend_stack_size: 0
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x20
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0
+; CHECK-NEXT: .vgpr_count: 0x1
+; CHECK-NEXT: no_stack_call:
+; CHECK-NEXT: .backend_stack_size: 0x10
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x22
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x10
+; CHECK-NEXT: .vgpr_count: 0x3
+; CHECK-NEXT: no_stack_extern_call:
+; CHECK-NEXT: .backend_stack_size: 0x10
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x29
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x10
+; CHECK-NEXT: .vgpr_count: 0x58
+; CHECK-NEXT: no_stack_extern_call_many_args:
+; CHECK-NEXT: .backend_stack_size: 0x90
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x29
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x90
+; CHECK-NEXT: .vgpr_count: 0x58
+; CHECK-NEXT: no_stack_indirect_call:
+; CHECK-NEXT: .backend_stack_size: 0x10
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x29
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x10
+; CHECK-NEXT: .vgpr_count: 0x58
+; CHECK-NEXT: simple_lds:
+; CHECK-NEXT: .backend_stack_size: 0
+; CHECK-NEXT: .lds_size: 0x100
+; CHECK-NEXT: .sgpr_count: 0x20
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0
+; CHECK-NEXT: .vgpr_count: 0x1
+; CHECK-NEXT: simple_lds_recurse:
+; CHECK-NEXT: .backend_stack_size: 0x10
+; CHECK-NEXT: .lds_size: 0x100
+; CHECK-NEXT: .sgpr_count: 0x24
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x10
+; CHECK-NEXT: .vgpr_count: 0x29
+; CHECK-NEXT: simple_stack:
+; CHECK-NEXT: .backend_stack_size: 0x14
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x21
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x14
+; CHECK-NEXT: .vgpr_count: 0x2
+; CHECK-NEXT: simple_stack_call:
+; CHECK-NEXT: .backend_stack_size: 0x20
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x22
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x20
+; CHECK-NEXT: .vgpr_count: 0x4
+; CHECK-NEXT: simple_stack_extern_call:
+; CHECK-NEXT: .backend_stack_size: 0x20
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x29
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x20
+; CHECK-NEXT: .vgpr_count: 0x58
+; CHECK-NEXT: simple_stack_indirect_call:
+; CHECK-NEXT: .backend_stack_size: 0x20
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x29
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x20
+; CHECK-NEXT: .vgpr_count: 0x58
+; CHECK-NEXT: simple_stack_recurse:
+; CHECK-NEXT: .backend_stack_size: 0x20
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x24
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x20
+; CHECK-NEXT: .vgpr_count: 0x2a
+; CHECK:amdpal.version:
+; CHECK-NEXT: - 0x3
+; CHECK-NEXT: - 0
+; CHECK-NEXT:...
+; CHECK-NEXT: .end_amdgpu_pal_metadata
+
+declare amdgpu_gfx float @extern_func(float) #0
+declare amdgpu_gfx float @extern_func_many_args(<64 x float>) #0
+
+@funcptr = external hidden unnamed_addr addrspace(4) constant ptr, align 4
+
+define amdgpu_gfx float @no_stack(float %arg0) #0 {
+ %add = fadd float %arg0, 1.0
+ ret float %add
+}
+
+define amdgpu_gfx float @simple_stack(float %arg0) #0 {
+ %stack = alloca float, i32 4, align 4, addrspace(5)
+ store volatile float 2.0, ptr addrspace(5) %stack
+ %val = load volatile float, ptr addrspace(5) %stack
+ %add = fadd float %arg0, %val
+ ret float %add
+}
+
+define amdgpu_gfx float @multiple_stack(float %arg0) #0 {
+ %stack = alloca float, i32 4, align 4, addrspace(5)
+ store volatile float 2.0, ptr addrspace(5) %stack
+ %val = load volatile float, ptr addrspace(5) %stack
+ %add = fadd float %arg0, %val
+ %stack2 = alloca float, i32 4, align 4, addrspace(5)
+ store volatile float 2.0, ptr addrspace(5) %stack2
+ %val2 = load volatile float, ptr addrspace(5) %stack2
+ %add2 = fadd float %add, %val2
+ ret float %add2
+}
+
+define amdgpu_gfx float @dynamic_stack(float %arg0) #0 {
+bb0:
+ %cmp = fcmp ogt float %arg0, 0.0
+ br i1 %cmp, label %bb1, label %bb2
+
+bb1:
+ %stack = alloca float, i32 4, align 4, addrspace(5)
+ store volatile float 2.0, ptr addrspace(5) %stack
+ %val = load volatile float, ptr addrspace(5) %stack
+ %add = fadd float %arg0, %val
+ br label %bb2
+
+bb2:
+ %res = phi float [ 0.0, %bb0 ], [ %add, %bb1 ]
+ ret float %res
+}
+
+define amdgpu_gfx float @dynamic_stack_loop(float %arg0) #0 {
+bb0:
+ br label %bb1
+
+bb1:
+ %ctr = phi i32 [ 0, %bb0 ], [ %newctr, %bb1 ]
+ %stack = alloca float, i32 4, align 4, addrspace(5)
+ store volatile float 2.0, ptr addrspace(5) %stack
+ %val = load volatile float, ptr addrspace(5) %stack
+ %add = fadd float %arg0, %val
+ %cmp = icmp sgt i32 %ctr, 0
+ %newctr = sub i32 %ctr, 1
+ br i1 %cmp, label %bb1, label %bb2
+
+bb2:
+ ret float %add
+}
+
+define amdgpu_gfx float @no_stack_call(float %arg0) #0 {
+ %res = call amdgpu_gfx float @simple_stack(float %arg0)
+ ret float %res
+}
+
+define amdgpu_gfx float @simple_stack_call(float %arg0) #0 {
+ %stack = alloca float, i32 4, align 4, addrspace(5)
+ store volatile float 2.0, ptr addrspace(5) %stack
+ %val = load volatile float, ptr addrspace(5) %stack
+ %res = call amdgpu_gfx float @simple_stack(float %arg0)
+ %add = fadd float %res, %val
+ ret float %add
+}
+
+define amdgpu_gfx float @no_stack_extern_call(float %arg0) #0 {
+ %res = call amdgpu_gfx float @extern_func(float %arg0)
+ ret float %res
+}
+
+define amdgpu_gfx float @simple_stack_extern_call(float %arg0) #0 {
+ %stack = alloca float, i32 4, align 4, addrspace(5)
+ store volatile float 2.0, ptr addrspace(5) %stack
+ %val = load volatile float, ptr addrspace(5) %stack
+ %res = call amdgpu_gfx float @extern_func(float %arg0)
+ %add = fadd float %res, %val
+ ret float %add
+}
+
+define amdgpu_gfx float @no_stack_extern_call_many_args(<64 x float> %arg0) #0 {
+ %res = call amdgpu_gfx float @extern_func_many_args(<64 x float> %arg0)
+ ret float %res
+}
+
+define amdgpu_gfx float @no_stack_indirect_call(float %arg0) #0 {
+ %fptr = load ptr, ptr addrspace(4) @funcptr
+ call amdgpu_gfx void %fptr()
+ ret float %arg0
+}
+
+define amdgpu_gfx float @simple_stack_indirect_call(float %arg0) #0 {
+ %stack = alloca float, i32 4, align 4, addrspace(5)
+ store volatile float 2.0, ptr addrspace(5) %stack
+ %val = load volatile float, ptr addrspace(5) %stack
+ %fptr = load ptr, ptr addrspace(4) @funcptr
+ call amdgpu_gfx void %fptr()
+ %add = fadd float %arg0, %val
+ ret float %add
+}
+
+define amdgpu_gfx float @simple_stack_recurse(float %arg0) #0 {
+ %stack = alloca float, i32 4, align 4, addrspace(5)
+ store volatile float 2.0, ptr addrspace(5) %stack
+ %val = load volatile float, ptr addrspace(5) %stack
+ %res = call amdgpu_gfx float @simple_stack_recurse(float %arg0)
+ %add = fadd float %res, %val
+ ret float %add
+}
+
+@lds = internal addrspace(3) global [64 x float] undef
+
+define amdgpu_gfx float @simple_lds(float %arg0) #0 {
+ %val = load float, ptr addrspace(3) @lds
+ ret float %val
+}
+
+define amdgpu_gfx float @simple_lds_recurse(float %arg0) #0 {
+ %val = load float, ptr addrspace(3) @lds
+ %res = call amdgpu_gfx float @simple_lds_recurse(float %val)
+ ret float %res
+}
+
+attributes #0 = { nounwind }
+
+!amdgpu.pal.metadata.msgpack = !{!0}
+
+!0 = !{!"\82\B0amdpal.pipelines\91\8A\A4.api\A6Vulkan\B2.compute_registers\85\AB.tg_size_en\C3\AA.tgid_x_en\C2\AA.tgid_y_en\C2\AA.tgid_z_en\C2\AF.tidig_comp_cnt\01\B0.hardware_stages\81\A3.cs\8C\AF.checksum_value\CE\94D\D7\D0\AB.debug_mode\00\AB.float_mode\CC\C0\A9.image_op\C2\AC.mem_ordered\C3\AB.sgpr_limitj\B7.threadgroup_dimensions\93\01\CD\04\00\01\AD.trap_present\00\B2.user_data_reg_map\DC\00 \CE\10\00\00\00\CE\FF\FF\FF\FF\00\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\AB.user_sgprs\03\AB.vgpr_limit\CD\01\00\AF.wavefront_size@\B7.internal_pipeline_hash\92\CF\E7\10k\A6:\A6%\F7\CF\B2\1F\1A\D4{\DA\E1T\AA.registers\80\A8.shaders\81\A8.compute\82\B0.api_shader_hash\92\CF\E9Zn7}\1E\B9\E7\00\B1.hardware_mapping\91\A3.cs\B0.spill_threshold\CE\FF\FF\FF\FF\A5.type\A2Cs\B0.user_data_limit\01\AF.xgl_cache_info\82\B3.128_bit_cache_hash\92\CF\B4X\B8\11[\A4\88P\CF\A0;\B0\AF\FF\B4\BE\C0\AD.llpc_version\A461.1\AEamdpal.version\92\03\00"}
+!1 = !{i32 7}
diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll b/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll
index a70488a..a030f86 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll
@@ -1,17 +1,20 @@
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -asm-verbose=0 < %s | FileCheck -check-prefixes=GCN,HSA %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -filetype=obj < %s | llvm-objdump --arch=amdgcn --mcpu=gfx940 --disassemble - | FileCheck -check-prefixes=GCN,HSA %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -filetype=obj < %s | llvm-objdump --arch=amdgcn --mcpu=gfx940 --disassemble - | FileCheck -check-prefixes=GCN,NON-HSA %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -asm-verbose=0 < %s | FileCheck -check-prefixes=GCN,HSA,ASM %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -filetype=obj < %s | llvm-objdump --arch=amdgcn --mcpu=gfx940 --disassemble - | FileCheck -check-prefixes=GCN,HSA,OBJ %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -filetype=obj < %s | llvm-objdump --arch=amdgcn --mcpu=gfx940 --disassemble - | FileCheck -check-prefixes=GCN,NON-HSA,OBJ %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -asm-verbose=0 < %s | llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx940 -filetype=obj | llvm-objdump --arch=amdgcn --mcpu=gfx940 --disassemble - | FileCheck -check-prefixes=GCN,HSA,OBJ %s
; GCN: preload_kernarg_header
; HSA: s_trap 2
; NON-HSA: s_endpgm
-; GCN-COUNT-63: s_nop 0
+; ASM: .fill 63, 4, 0xbf800000 ; s_nop 0
+; OBJ-COUNT-63: s_nop 0
define amdgpu_kernel void @preload_kernarg_header(ptr %arg) {
store ptr %arg, ptr %arg
ret void
}
; GCN: non_kernel_function
+; GCN-NOT: s_trap 2
; GCN-NOT: s_nop 0
; GCN: flat_store
define void @non_kernel_function(ptr %arg) {
diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernargs-inreg-hints.ll b/llvm/test/CodeGen/AMDGPU/preload-kernargs-inreg-hints.ll
index e7488e0..20edbd6 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-kernargs-inreg-hints.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-kernargs-inreg-hints.ll
@@ -157,27 +157,27 @@ define amdgpu_kernel void @test_preload_hint_kernel_1_call_func(ptr %0) #0 {
define amdgpu_kernel void @test_preload_hint_kernel_1_call_intrinsic(i16 %0) #0 {
; NO-PRELOAD-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; NO-PRELOAD-SAME: (i16 [[TMP0:%.*]]) #[[ATTR2]] {
+; NO-PRELOAD-SAME: (i16 [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
; NO-PRELOAD-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
; NO-PRELOAD-NEXT: ret void
;
; PRELOAD-1-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; PRELOAD-1-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR2]] {
+; PRELOAD-1-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
; PRELOAD-1-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
; PRELOAD-1-NEXT: ret void
;
; PRELOAD-3-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; PRELOAD-3-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR2]] {
+; PRELOAD-3-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
; PRELOAD-3-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
; PRELOAD-3-NEXT: ret void
;
; PRELOAD-16-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; PRELOAD-16-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR2]] {
+; PRELOAD-16-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
; PRELOAD-16-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
; PRELOAD-16-NEXT: ret void
;
; PRELOAD-20-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; PRELOAD-20-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR2]] {
+; PRELOAD-20-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
; PRELOAD-20-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
; PRELOAD-20-NEXT: ret void
;
@@ -235,23 +235,23 @@ define amdgpu_kernel void @test_preload_hint_kernel_2_preexisting(i32 inreg %0,
define amdgpu_kernel void @test_preload_hint_kernel_incompatible_attributes(ptr addrspace(4) byref(i32) %0, ptr nest %1) {
; NO-PRELOAD-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; NO-PRELOAD-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
+; NO-PRELOAD-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
; NO-PRELOAD-NEXT: ret void
;
; PRELOAD-1-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; PRELOAD-1-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
+; PRELOAD-1-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
; PRELOAD-1-NEXT: ret void
;
; PRELOAD-3-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; PRELOAD-3-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
+; PRELOAD-3-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
; PRELOAD-3-NEXT: ret void
;
; PRELOAD-16-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; PRELOAD-16-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
+; PRELOAD-16-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
; PRELOAD-16-NEXT: ret void
;
; PRELOAD-20-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; PRELOAD-20-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
+; PRELOAD-20-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
; PRELOAD-20-NEXT: ret void
;
ret void
diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
index d20c3a4..f0e709b 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
@@ -24,70 +24,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: ptr1_i8:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -98,70 +36,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: ptr1_i8:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_and_b32 s0, s4, 0xff
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -170,70 +46,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: ptr1_i8:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_and_b32 s0, s4, 0xff
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -242,70 +56,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: ptr1_i8:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_and_b32 s0, s4, 0xff
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -325,70 +77,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: ptr1_i8:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -399,70 +89,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: ptr1_i8:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_and_b32 s0, s8, 0xff
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -471,70 +99,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: ptr1_i8:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_and_b32 s0, s8, 0xff
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -543,70 +109,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: ptr1_i8:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_and_b32 s0, s8, 0xff
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -631,70 +135,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: ptr1_i8_zext_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -705,70 +147,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: ptr1_i8_zext_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_mov_b32 s0, 0xffff
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s4
@@ -778,70 +158,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: ptr1_i8_zext_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_mov_b32 s0, 0xffff
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s4
@@ -851,70 +169,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: ptr1_i8_zext_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_mov_b32 s0, 0xffff
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s4
@@ -935,70 +191,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: ptr1_i8_zext_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -1009,70 +203,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: ptr1_i8_zext_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_mov_b32 s0, 0xffff
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s8
@@ -1082,70 +214,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: ptr1_i8_zext_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_mov_b32 s0, 0xffff
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s8
@@ -1155,70 +225,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: ptr1_i8_zext_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_mov_b32 s0, 0xffff
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s8
@@ -1244,70 +252,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: ptr1_i16_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -1318,70 +264,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: ptr1_i16_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_and_b32 s0, s4, 0xffff
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -1390,70 +274,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: ptr1_i16_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_and_b32 s0, s4, 0xffff
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -1462,70 +284,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: ptr1_i16_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_and_b32 s0, s4, 0xffff
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -1545,70 +305,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: ptr1_i16_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -1619,70 +317,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: ptr1_i16_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_and_b32 s0, s8, 0xffff
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -1691,70 +327,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: ptr1_i16_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_and_b32 s0, s8, 0xffff
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -1763,70 +337,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: ptr1_i16_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_and_b32 s0, s8, 0xffff
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -1850,70 +362,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: ptr1_i32_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -1923,70 +373,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: ptr1_i32_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s4
@@ -1994,70 +382,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: ptr1_i32_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s4
@@ -2065,70 +391,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: ptr1_i32_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s4
@@ -2146,70 +410,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -2219,70 +421,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s8
@@ -2290,70 +430,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s8
@@ -2361,70 +439,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s8
@@ -2449,70 +465,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: i32_ptr1_i32_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s3, s[0:1], 0x10
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x8
@@ -2524,70 +478,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: i32_ptr1_i32_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_load_dword s0, s[0:1], 0x10
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -2598,70 +490,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: i32_ptr1_i32_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_add_i32 s0, s2, s6
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -2670,70 +500,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: i32_ptr1_i32_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_add_i32 s0, s2, s6
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -2754,70 +522,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: i32_ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s2, s[4:5], 0x10
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
@@ -2829,70 +535,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: i32_ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_load_dword s0, s[4:5], 0x10
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -2903,70 +547,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: i32_ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_add_i32 s0, s6, s10
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -2975,70 +557,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: i32_ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_add_i32 s0, s6, s10
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -3065,70 +585,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: ptr1_i16_i16_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -3141,70 +599,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: ptr1_i16_i16_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-2-NEXT: s_and_b32 s1, s4, 0xffff
@@ -3217,70 +613,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: ptr1_i16_i16_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 16
; GFX940-PRELOAD-4-NEXT: s_and_b32 s1, s4, 0xffff
@@ -3291,70 +625,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: ptr1_i16_i16_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 16
; GFX940-PRELOAD-8-NEXT: s_and_b32 s1, s4, 0xffff
@@ -3378,70 +650,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: ptr1_i16_i16_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -3454,70 +664,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: ptr1_i16_i16_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-2-NEXT: s_and_b32 s1, s8, 0xffff
@@ -3530,70 +678,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: ptr1_i16_i16_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 16
; GFX90a-PRELOAD-4-NEXT: s_and_b32 s1, s8, 0xffff
@@ -3604,70 +690,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: ptr1_i16_i16_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 16
; GFX90a-PRELOAD-8-NEXT: s_and_b32 s1, s8, 0xffff
@@ -3695,70 +719,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: ptr1_v2i8_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -3768,70 +730,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: ptr1_v2i8_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 8
; GFX940-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -3841,70 +741,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: ptr1_v2i8_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 8
; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -3914,70 +752,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: ptr1_v2i8_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 8
; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -3997,70 +773,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: ptr1_v2i8_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -4070,70 +784,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: ptr1_v2i8_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s8, 8
; GFX90a-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -4143,70 +795,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: ptr1_v2i8_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 8
; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -4216,70 +806,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: ptr1_v2i8_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 8
; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -4308,70 +836,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: byref_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -4385,70 +851,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: byref_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -4462,70 +866,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: byref_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -4539,70 +881,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: byref_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -4630,70 +910,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: byref_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -4707,70 +925,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: byref_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -4784,70 +940,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: byref_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -4861,70 +955,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: byref_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -4964,70 +996,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v8i32_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v4, 0
@@ -5046,70 +1016,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v8i32_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v4, 0
@@ -5128,70 +1036,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v8i32_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v4, 0
@@ -5210,70 +1056,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v8i32_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v4, 0
@@ -5311,70 +1095,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v8i32_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v4, 0
@@ -5393,70 +1115,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v8i32_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v4, 0
@@ -5475,70 +1135,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v8i32_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v4, 0
@@ -5557,70 +1155,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v8i32_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v4, 0
@@ -5654,70 +1190,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v3i16_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -5729,70 +1203,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v3i16_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s5
@@ -5802,70 +1214,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v3i16_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s5
@@ -5875,70 +1225,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v3i16_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s5
@@ -5959,70 +1247,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v3i16_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -6034,70 +1260,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v3i16_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s9
@@ -6107,70 +1271,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v3i16_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s9
@@ -6180,70 +1282,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v3i16_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s9
@@ -6269,70 +1309,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v3i32_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
@@ -6344,70 +1322,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v3i32_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s6
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s7
@@ -6417,70 +1333,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v3i32_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s6
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s7
@@ -6490,70 +1344,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v3i32_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s6
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s7
@@ -6575,70 +1367,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v3i32_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
@@ -6650,70 +1380,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v3i32_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s10
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s11
@@ -6723,70 +1391,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v3i32_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s10
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s11
@@ -6796,70 +1402,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v3i32_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s10
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s11
@@ -6885,70 +1429,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v3f32_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
@@ -6960,70 +1442,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v3f32_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s6
@@ -7033,70 +1453,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v3f32_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v3, 0
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s6
@@ -7106,70 +1464,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v3f32_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s6
@@ -7191,70 +1487,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v3f32_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
@@ -7266,70 +1500,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v3f32_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s10
@@ -7339,70 +1511,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v3f32_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v3, 0
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s10
@@ -7412,70 +1522,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v3f32_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s10
@@ -7500,70 +1548,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v5i8_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -7575,70 +1561,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v5i8_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 8
; GFX940-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -7655,70 +1579,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v5i8_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 8
; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -7735,70 +1597,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v5i8_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 8
; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -7826,70 +1626,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v5i8_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -7901,70 +1639,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v5i8_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s8, 8
; GFX90a-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -7981,70 +1657,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v5i8_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 8
; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -8061,70 +1675,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v5i8_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 8
; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -8167,70 +1719,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v5f64_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
; GFX940-PRELOAD-1-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
@@ -8252,70 +1742,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v5f64_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
; GFX940-PRELOAD-2-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
@@ -8337,70 +1765,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v5f64_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
; GFX940-PRELOAD-4-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
@@ -8422,70 +1788,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v5f64_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
; GFX940-PRELOAD-8-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
@@ -8529,70 +1833,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v5f64_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
@@ -8614,70 +1856,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v5f64_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
; GFX90a-PRELOAD-2-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
@@ -8699,70 +1879,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v5f64_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
; GFX90a-PRELOAD-4-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
@@ -8784,70 +1902,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v5f64_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
; GFX90a-PRELOAD-8-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
@@ -8882,70 +1938,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v8i8_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
@@ -8955,70 +1949,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v8i8_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s5, 8
; GFX940-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -9042,70 +1974,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v8i8_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s5, 8
; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -9129,70 +1999,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v8i8_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s5, 8
; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -9225,70 +2033,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v8i8_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
@@ -9298,70 +2044,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v8i8_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s9, 8
; GFX90a-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -9384,70 +2068,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v8i8_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s9, 8
; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -9470,70 +2092,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v8i8_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s9, 8
; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -9570,70 +2130,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: i64_kernel_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
@@ -9643,70 +2141,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: i64_kernel_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
; GFX940-PRELOAD-2-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
@@ -9714,70 +2150,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: i64_kernel_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
; GFX940-PRELOAD-4-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
@@ -9785,70 +2159,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: i64_kernel_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
; GFX940-PRELOAD-8-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
@@ -9866,70 +2178,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: i64_kernel_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
@@ -9939,70 +2189,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: i64_kernel_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
; GFX90a-PRELOAD-2-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
@@ -10010,70 +2198,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: i64_kernel_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
; GFX90a-PRELOAD-4-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
@@ -10081,70 +2207,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: i64_kernel_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
; GFX90a-PRELOAD-8-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
@@ -10166,70 +2230,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: f64_kernel_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
@@ -10239,70 +2241,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: f64_kernel_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
; GFX940-PRELOAD-2-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
@@ -10310,70 +2250,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: f64_kernel_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
; GFX940-PRELOAD-4-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
@@ -10381,70 +2259,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: f64_kernel_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
; GFX940-PRELOAD-8-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
@@ -10462,70 +2278,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: f64_kernel_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
@@ -10535,70 +2289,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: f64_kernel_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
; GFX90a-PRELOAD-2-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
@@ -10606,70 +2298,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: f64_kernel_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
; GFX90a-PRELOAD-4-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
@@ -10677,70 +2307,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: f64_kernel_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
; GFX90a-PRELOAD-8-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
diff --git a/llvm/test/CodeGen/AMDGPU/promote-alloca-scoring.ll b/llvm/test/CodeGen/AMDGPU/promote-alloca-scoring.ll
new file mode 100644
index 0000000..ab03177
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/promote-alloca-scoring.ll
@@ -0,0 +1,69 @@
+; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -debug-only=amdgpu-promote-alloca -amdgpu-promote-alloca-to-vector-limit=512 -passes=amdgpu-promote-alloca %s -o - 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+; CHECK: Scoring: %simpleuser = alloca [4 x i64], align 4, addrspace(5)
+; CHECK-NEXT: [+1]: store i32 42, ptr addrspace(5) %simpleuser, align 4
+; CHECK-NEXT: => Final Score:1
+; CHECK-NEXT: Scoring: %manyusers = alloca [4 x i64], align 4, addrspace(5)
+; CHECK-NEXT: [+1]: store i32 %v0.ext, ptr addrspace(5) %manyusers.1, align 4
+; CHECK-NEXT: [+1]: %v0 = load i8, ptr addrspace(5) %manyusers.1, align 1
+; CHECK-NEXT: [+1]: store i32 %v1.ext, ptr addrspace(5) %manyusers.2, align 4
+; CHECK-NEXT: [+1]: %v1 = load i8, ptr addrspace(5) %manyusers.2, align 1
+; CHECK-NEXT: => Final Score:4
+; CHECK-NEXT: Sorted Worklist:
+; CHECK-NEXT: %manyusers = alloca [4 x i64], align 4, addrspace(5)
+; CHECK-NEXT: %simpleuser = alloca [4 x i64], align 4, addrspace(5)
+define amdgpu_kernel void @simple_users_scores() #0 {
+entry:
+ ; should get a score of 1
+ %simpleuser = alloca [4 x i64], align 4, addrspace(5)
+ ; should get a score of 4
+ %manyusers = alloca [4 x i64], align 4, addrspace(5)
+
+ store i32 42, ptr addrspace(5) %simpleuser
+
+ %manyusers.1 = getelementptr i8, ptr addrspace(5) %manyusers, i64 2
+ %v0 = load i8, ptr addrspace(5) %manyusers.1
+ %v0.ext = zext i8 %v0 to i32
+ store i32 %v0.ext, ptr addrspace(5) %manyusers.1
+
+ %manyusers.2 = getelementptr i8, ptr addrspace(5) %manyusers, i64 1
+ %v1 = load i8, ptr addrspace(5) %manyusers.2
+ %v1.ext = zext i8 %v0 to i32
+ store i32 %v1.ext, ptr addrspace(5) %manyusers.2
+
+ ret void
+}
+
+; CHECK: Scoring: %stack = alloca [4 x i64], align 4, addrspace(5)
+; CHECK-NEXT: [+5]: store i32 32, ptr addrspace(5) %stack, align 4
+; CHECK-NEXT: [+1]: store i32 42, ptr addrspace(5) %stack, align 4
+; CHECK-NEXT: [+9]: store i32 32, ptr addrspace(5) %stack.1, align 4
+; CHECK-NEXT: [+5]: %outer.cmp = load i1, ptr addrspace(5) %stack.1, align 1
+; CHECK-NEXT: [+1]: store i32 64, ptr addrspace(5) %stack.2, align 4
+; CHECK-NEXT: [+9]: %inner.cmp = load i1, ptr addrspace(5) %stack.2, align 1
+; CHECK-NEXT: => Final Score:30
+define amdgpu_kernel void @loop_users_alloca(i1 %x, i2) #0 {
+entry:
+ ; should get a score of 1
+ %stack = alloca [4 x i64], align 4, addrspace(5)
+ %stack.1 = getelementptr i8, ptr addrspace(5) %stack, i64 4
+ %stack.2 = getelementptr i8, ptr addrspace(5) %stack, i64 8
+
+ store i32 42, ptr addrspace(5) %stack
+ br label %loop.outer
+
+loop.outer:
+ store i32 32, ptr addrspace(5) %stack
+ %outer.cmp = load i1, ptr addrspace(5) %stack.1
+ br label %loop.inner
+
+loop.inner:
+ store i32 32, ptr addrspace(5) %stack.1
+ %inner.cmp = load i1, ptr addrspace(5) %stack.2
+ br i1 %inner.cmp, label %loop.inner, label %loop.outer
+
+exit:
+ store i32 64, ptr addrspace(5) %stack.2
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll b/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll
index d92ba77..d070dc3 100644
--- a/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll
+++ b/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll
@@ -203,13 +203,13 @@ attributes #5 = { "amdgpu-flat-work-group-size"="128,512" }
attributes #6 = { "amdgpu-flat-work-group-size"="512,512" }
attributes #7 = { "amdgpu-flat-work-group-size"="64,256" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-flat-work-group-size"="1,256" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-flat-work-group-size"="64,128" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR2]] = { "amdgpu-flat-work-group-size"="128,512" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR3]] = { "amdgpu-flat-work-group-size"="64,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR4]] = { "amdgpu-flat-work-group-size"="128,128" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR5]] = { "amdgpu-flat-work-group-size"="512,512" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR6]] = { "amdgpu-flat-work-group-size"="64,256" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR7]] = { "amdgpu-flat-work-group-size"="128,256" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR8]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-flat-work-group-size"="1,256" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-flat-work-group-size"="64,128" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-flat-work-group-size"="128,512" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR3]] = { "amdgpu-flat-work-group-size"="64,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR4]] = { "amdgpu-flat-work-group-size"="128,128" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR5]] = { "amdgpu-flat-work-group-size"="512,512" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR6]] = { "amdgpu-flat-work-group-size"="64,256" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR7]] = { "amdgpu-flat-work-group-size"="128,256" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR8]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/propagate-waves-per-eu.ll b/llvm/test/CodeGen/AMDGPU/propagate-waves-per-eu.ll
index 2df219b..f62f1d5 100644
--- a/llvm/test/CodeGen/AMDGPU/propagate-waves-per-eu.ll
+++ b/llvm/test/CodeGen/AMDGPU/propagate-waves-per-eu.ll
@@ -399,26 +399,26 @@ attributes #17 = { "amdgpu-waves-per-eu"="5,8" }
attributes #18 = { "amdgpu-waves-per-eu"="9,10" }
attributes #19 = { "amdgpu-waves-per-eu"="8,9" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,8" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,2" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR2]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,4" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR3]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,9" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR4]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,1" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR5]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,2" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR6]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="9,9" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR7]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,8" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR8]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="3,8" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR9]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR10]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR11]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="0,8" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR12]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,123" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR13]] = { "amdgpu-flat-work-group-size"="1,512" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR14]] = { "amdgpu-flat-work-group-size"="1,512" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="3,6" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR15]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="6,9" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR16]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="6,8" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR17]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="5,5" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR18]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="8,8" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR19]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="9,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR20]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="9,9" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR21]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="8,9" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,8" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,2" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,4" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR3]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,9" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR4]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,1" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR5]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,2" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR6]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="9,9" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR7]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,8" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR8]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="3,8" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR9]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR10]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR11]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="0,8" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR12]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,123" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR13]] = { "amdgpu-flat-work-group-size"="1,512" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR14]] = { "amdgpu-flat-work-group-size"="1,512" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="3,6" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR15]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="6,9" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR16]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="6,8" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR17]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="5,5" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR18]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="8,8" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR19]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="9,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR20]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="9,9" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR21]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="8,9" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir b/llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir
index 2ccc241..fdfc9b0 100644
--- a/llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir
+++ b/llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir
@@ -24,6 +24,7 @@ registers:
- { id: 10, class: sreg_64_xexec, preferred-register: '$vcc' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo:
maxKernArgAlign: 1
diff --git a/llvm/test/CodeGen/AMDGPU/recursive_global_initializer.ll b/llvm/test/CodeGen/AMDGPU/recursive_global_initializer.ll
index eaef63b..c1d647c 100644
--- a/llvm/test/CodeGen/AMDGPU/recursive_global_initializer.ll
+++ b/llvm/test/CodeGen/AMDGPU/recursive_global_initializer.ll
@@ -19,5 +19,5 @@ define void @hoge() {
ret void
}
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/remove-no-kernel-id-attribute.ll b/llvm/test/CodeGen/AMDGPU/remove-no-kernel-id-attribute.ll
index 297a056..384a9c4 100644
--- a/llvm/test/CodeGen/AMDGPU/remove-no-kernel-id-attribute.ll
+++ b/llvm/test/CodeGen/AMDGPU/remove-no-kernel-id-attribute.ll
@@ -191,11 +191,11 @@ define amdgpu_kernel void @kernel_lds_recursion() {
!1 = !{i32 1, !"amdhsa_code_object_version", i32 400}
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR2]] = { "amdgpu-lds-size"="2" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-lds-size"="2" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; CHECK: attributes #[[ATTR3]] = { "amdgpu-lds-size"="4" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR4]] = { "amdgpu-lds-size"="2" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR4]] = { "amdgpu-lds-size"="2" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
; CHECK: attributes #[[ATTR5:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
; CHECK: attributes #[[ATTR6:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir b/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir
index c0d1999..0903770 100644
--- a/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir
+++ b/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir
@@ -181,6 +181,8 @@ legalized: false
regBankSelected: false
selected: false
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
liveins:
- { reg: '$vgpr0', virtual-reg: '%0' }
- { reg: '$vgpr1', virtual-reg: '%1' }
diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir b/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir
index efbdbca..c6ccbd9 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir
@@ -78,6 +78,7 @@
name: sgpr_spill_wrong_stack_id
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
machineFunctionInfo:
scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3
diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll b/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll
index 764f494..f523b4a 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll
@@ -16,7 +16,7 @@ define void @spill_sgpr_with_no_lower_vgpr_available() #0 {
; GCN-LABEL: spill_sgpr_with_no_lower_vgpr_available:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s24, s33
+; GCN-NEXT: s_mov_b32 s18, s33
; GCN-NEXT: s_mov_b32 s33, s32
; GCN-NEXT: s_or_saveexec_b64 s[16:17], -1
; GCN-NEXT: buffer_store_dword v255, off, s[0:3], s33 offset:448 ; 4-byte Folded Spill
@@ -150,7 +150,6 @@ define void @spill_sgpr_with_no_lower_vgpr_available() #0 {
; GCN-NEXT: s_mov_b64 s[20:21], s[0:1]
; GCN-NEXT: s_mov_b64 s[0:1], s[20:21]
; GCN-NEXT: s_mov_b64 s[2:3], s[22:23]
-; GCN-NEXT: ; implicit-def: $sgpr18_sgpr19
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GCN-NEXT: v_readlane_b32 s31, v255, 1
@@ -270,7 +269,7 @@ define void @spill_sgpr_with_no_lower_vgpr_available() #0 {
; GCN-NEXT: buffer_load_dword v255, off, s[0:3], s33 offset:448 ; 4-byte Folded Reload
; GCN-NEXT: s_mov_b64 exec, s[4:5]
; GCN-NEXT: s_add_i32 s32, s32, 0xffff8c00
-; GCN-NEXT: s_mov_b32 s33, s24
+; GCN-NEXT: s_mov_b32 s33, s18
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_setpc_b64 s[30:31]
%alloca = alloca i32, align 4, addrspace(5)
@@ -311,7 +310,7 @@ define void @spill_to_lowest_available_vgpr() #0 {
; GCN-LABEL: spill_to_lowest_available_vgpr:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s24, s33
+; GCN-NEXT: s_mov_b32 s18, s33
; GCN-NEXT: s_mov_b32 s33, s32
; GCN-NEXT: s_or_saveexec_b64 s[16:17], -1
; GCN-NEXT: buffer_store_dword v254, off, s[0:3], s33 offset:444 ; 4-byte Folded Spill
@@ -444,7 +443,6 @@ define void @spill_to_lowest_available_vgpr() #0 {
; GCN-NEXT: s_mov_b64 s[20:21], s[0:1]
; GCN-NEXT: s_mov_b64 s[0:1], s[20:21]
; GCN-NEXT: s_mov_b64 s[2:3], s[22:23]
-; GCN-NEXT: ; implicit-def: $sgpr18_sgpr19
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GCN-NEXT: v_readlane_b32 s31, v254, 1
@@ -563,7 +561,7 @@ define void @spill_to_lowest_available_vgpr() #0 {
; GCN-NEXT: buffer_load_dword v254, off, s[0:3], s33 offset:444 ; 4-byte Folded Reload
; GCN-NEXT: s_mov_b64 exec, s[4:5]
; GCN-NEXT: s_add_i32 s32, s32, 0xffff8c00
-; GCN-NEXT: s_mov_b32 s33, s24
+; GCN-NEXT: s_mov_b32 s33, s18
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_setpc_b64 s[30:31]
%alloca = alloca i32, align 4, addrspace(5)
@@ -1530,7 +1528,7 @@ define void @spill_sgpr_no_free_vgpr_ipra() #0 {
; GCN-LABEL: spill_sgpr_no_free_vgpr_ipra:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s24, s33
+; GCN-NEXT: s_mov_b32 s18, s33
; GCN-NEXT: s_mov_b32 s33, s32
; GCN-NEXT: s_add_i32 s32, s32, 0x7400
; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:444 ; 4-byte Folded Spill
@@ -1668,7 +1666,6 @@ define void @spill_sgpr_no_free_vgpr_ipra() #0 {
; GCN-NEXT: s_mov_b64 s[20:21], s[0:1]
; GCN-NEXT: s_mov_b64 s[0:1], s[20:21]
; GCN-NEXT: s_mov_b64 s[2:3], s[22:23]
-; GCN-NEXT: ; implicit-def: $sgpr18_sgpr19
; GCN-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GCN-NEXT: s_mov_b64 s[4:5], exec
; GCN-NEXT: s_mov_b64 exec, 1
@@ -1801,7 +1798,7 @@ define void @spill_sgpr_no_free_vgpr_ipra() #0 {
; GCN-NEXT: buffer_load_dword v41, off, s[0:3], s33 offset:440 ; 4-byte Folded Reload
; GCN-NEXT: buffer_load_dword v40, off, s[0:3], s33 offset:444 ; 4-byte Folded Reload
; GCN-NEXT: s_add_i32 s32, s32, 0xffff8c00
-; GCN-NEXT: s_mov_b32 s33, s24
+; GCN-NEXT: s_mov_b32 s33, s18
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_setpc_b64 s[30:31]
call void @child_function_ipra()
diff --git a/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll b/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll
index f229f33..539cfc7 100644
--- a/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll
@@ -73,7 +73,7 @@ define amdgpu_kernel void @test_simple_indirect_call() {
;.
; AKF_GCN: attributes #[[ATTR0]] = { "amdgpu-calls" "amdgpu-stack-objects" }
;.
-; ATTRIBUTOR_GCN: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_GCN: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; ATTRIBUTOR_GCN: attributes #[[ATTR1]] = { "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/snippet-copy-bundle-regression.mir b/llvm/test/CodeGen/AMDGPU/snippet-copy-bundle-regression.mir
index 3558298..f8ec6bb 100644
--- a/llvm/test/CodeGen/AMDGPU/snippet-copy-bundle-regression.mir
+++ b/llvm/test/CodeGen/AMDGPU/snippet-copy-bundle-regression.mir
@@ -21,6 +21,7 @@
name: kernel
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
machineFunctionInfo:
isEntryFunction: true
diff --git a/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll b/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll
index b8bc01e..c6a5990 100644
--- a/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll
+++ b/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll
@@ -916,13 +916,13 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE32-O0-LABEL: kernel_stacksave_stackrestore_call_with_stack_objects:
; WAVE32-O0: ; %bb.0:
; WAVE32-O0-NEXT: s_mov_b32 s32, 0x1200
-; WAVE32-O0-NEXT: s_getpc_b64 s[24:25]
-; WAVE32-O0-NEXT: s_mov_b32 s24, s0
-; WAVE32-O0-NEXT: s_load_dwordx4 s[24:27], s[24:25], 0x0
+; WAVE32-O0-NEXT: s_getpc_b64 s[20:21]
+; WAVE32-O0-NEXT: s_mov_b32 s20, s0
+; WAVE32-O0-NEXT: s_load_dwordx4 s[20:23], s[20:21], 0x0
; WAVE32-O0-NEXT: s_waitcnt lgkmcnt(0)
-; WAVE32-O0-NEXT: s_bitset0_b32 s27, 21
-; WAVE32-O0-NEXT: s_add_u32 s24, s24, s9
-; WAVE32-O0-NEXT: s_addc_u32 s25, s25, 0
+; WAVE32-O0-NEXT: s_bitset0_b32 s23, 21
+; WAVE32-O0-NEXT: s_add_u32 s20, s20, s9
+; WAVE32-O0-NEXT: s_addc_u32 s21, s21, 0
; WAVE32-O0-NEXT: ; implicit-def: $vgpr3 : SGPR spill to VGPR lane
; WAVE32-O0-NEXT: s_mov_b32 s14, s8
; WAVE32-O0-NEXT: s_mov_b32 s13, s7
@@ -934,17 +934,17 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE32-O0-NEXT: v_writelane_b32 v3, s0, 0
; WAVE32-O0-NEXT: s_lshr_b32 s0, s0, 5
; WAVE32-O0-NEXT: v_writelane_b32 v3, s0, 1
-; WAVE32-O0-NEXT: s_or_saveexec_b32 s20, -1
-; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[24:27], 0 offset:128 ; 4-byte Folded Spill
-; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s20
+; WAVE32-O0-NEXT: s_or_saveexec_b32 s19, -1
+; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[20:23], 0 offset:128 ; 4-byte Folded Spill
+; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s19
; WAVE32-O0-NEXT: v_mov_b32_e32 v3, 42
-; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[24:27], 0
+; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[20:23], 0
; WAVE32-O0-NEXT: s_waitcnt_vscnt null, 0x0
-; WAVE32-O0-NEXT: s_mov_b64 s[0:1], s[24:25]
-; WAVE32-O0-NEXT: s_mov_b64 s[2:3], s[26:27]
+; WAVE32-O0-NEXT: s_mov_b64 s[0:1], s[20:21]
+; WAVE32-O0-NEXT: s_mov_b64 s[2:3], s[22:23]
; WAVE32-O0-NEXT: s_mov_b32 s6, s32
; WAVE32-O0-NEXT: v_mov_b32_e32 v3, 17
-; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[24:27], s6 offset:4
+; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[20:23], s6 offset:4
; WAVE32-O0-NEXT: s_mov_b32 s6, stack_passed_argument@abs32@hi
; WAVE32-O0-NEXT: s_mov_b32 s16, stack_passed_argument@abs32@lo
; WAVE32-O0-NEXT: ; kill: def $sgpr16 killed $sgpr16 def $sgpr16_sgpr17
@@ -1018,11 +1018,10 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE32-O0-NEXT: v_mov_b32_e32 v29, s18
; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
; WAVE32-O0-NEXT: v_mov_b32_e32 v30, s18
-; WAVE32-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; WAVE32-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
-; WAVE32-O0-NEXT: s_or_saveexec_b32 s20, -1
-; WAVE32-O0-NEXT: buffer_load_dword v0, off, s[24:27], 0 offset:128 ; 4-byte Folded Reload
-; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s20
+; WAVE32-O0-NEXT: s_or_saveexec_b32 s19, -1
+; WAVE32-O0-NEXT: buffer_load_dword v0, off, s[20:23], 0 offset:128 ; 4-byte Folded Reload
+; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s19
; WAVE32-O0-NEXT: s_waitcnt vmcnt(0)
; WAVE32-O0-NEXT: v_readlane_b32 s1, v0, 1
; WAVE32-O0-NEXT: v_readlane_b32 s0, v0, 0
@@ -1137,7 +1136,6 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE64-O0-NEXT: v_mov_b32_e32 v29, s18
; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
; WAVE64-O0-NEXT: v_mov_b32_e32 v30, s18
-; WAVE64-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; WAVE64-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; WAVE64-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
; WAVE64-O0-NEXT: buffer_load_dword v0, off, s[24:27], 0 offset:128 ; 4-byte Folded Reload
@@ -1155,13 +1153,13 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE32-WWM-PREALLOC-LABEL: kernel_stacksave_stackrestore_call_with_stack_objects:
; WAVE32-WWM-PREALLOC: ; %bb.0:
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s32, 0x1200
-; WAVE32-WWM-PREALLOC-NEXT: s_getpc_b64 s[24:25]
-; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s24, s0
-; WAVE32-WWM-PREALLOC-NEXT: s_load_dwordx4 s[24:27], s[24:25], 0x0
+; WAVE32-WWM-PREALLOC-NEXT: s_getpc_b64 s[20:21]
+; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s20, s0
+; WAVE32-WWM-PREALLOC-NEXT: s_load_dwordx4 s[20:23], s[20:21], 0x0
; WAVE32-WWM-PREALLOC-NEXT: s_waitcnt lgkmcnt(0)
-; WAVE32-WWM-PREALLOC-NEXT: s_bitset0_b32 s27, 21
-; WAVE32-WWM-PREALLOC-NEXT: s_add_u32 s24, s24, s9
-; WAVE32-WWM-PREALLOC-NEXT: s_addc_u32 s25, s25, 0
+; WAVE32-WWM-PREALLOC-NEXT: s_bitset0_b32 s23, 21
+; WAVE32-WWM-PREALLOC-NEXT: s_add_u32 s20, s20, s9
+; WAVE32-WWM-PREALLOC-NEXT: s_addc_u32 s21, s21, 0
; WAVE32-WWM-PREALLOC-NEXT: ; implicit-def: $vgpr32 : SGPR spill to VGPR lane
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s14, s8
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s13, s7
@@ -1174,13 +1172,13 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE32-WWM-PREALLOC-NEXT: s_lshr_b32 s0, s0, 5
; WAVE32-WWM-PREALLOC-NEXT: v_writelane_b32 v32, s0, 1
; WAVE32-WWM-PREALLOC-NEXT: v_mov_b32_e32 v3, 42
-; WAVE32-WWM-PREALLOC-NEXT: buffer_store_dword v3, off, s[24:27], 0
+; WAVE32-WWM-PREALLOC-NEXT: buffer_store_dword v3, off, s[20:23], 0
; WAVE32-WWM-PREALLOC-NEXT: s_waitcnt_vscnt null, 0x0
-; WAVE32-WWM-PREALLOC-NEXT: s_mov_b64 s[0:1], s[24:25]
-; WAVE32-WWM-PREALLOC-NEXT: s_mov_b64 s[2:3], s[26:27]
+; WAVE32-WWM-PREALLOC-NEXT: s_mov_b64 s[0:1], s[20:21]
+; WAVE32-WWM-PREALLOC-NEXT: s_mov_b64 s[2:3], s[22:23]
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s6, s32
; WAVE32-WWM-PREALLOC-NEXT: v_mov_b32_e32 v3, 17
-; WAVE32-WWM-PREALLOC-NEXT: buffer_store_dword v3, off, s[24:27], s6 offset:4
+; WAVE32-WWM-PREALLOC-NEXT: buffer_store_dword v3, off, s[20:23], s6 offset:4
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s6, stack_passed_argument@abs32@hi
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s16, stack_passed_argument@abs32@lo
; WAVE32-WWM-PREALLOC-NEXT: ; kill: def $sgpr16 killed $sgpr16 def $sgpr16_sgpr17
@@ -1254,7 +1252,6 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE32-WWM-PREALLOC-NEXT: v_mov_b32_e32 v29, s18
; WAVE32-WWM-PREALLOC-NEXT: ; implicit-def: $sgpr18
; WAVE32-WWM-PREALLOC-NEXT: v_mov_b32_e32 v30, s18
-; WAVE32-WWM-PREALLOC-NEXT: ; implicit-def: $sgpr18_sgpr19
; WAVE32-WWM-PREALLOC-NEXT: s_swappc_b64 s[30:31], s[16:17]
; WAVE32-WWM-PREALLOC-NEXT: v_readlane_b32 s1, v32, 1
; WAVE32-WWM-PREALLOC-NEXT: v_readlane_b32 s0, v32, 0
@@ -1347,7 +1344,7 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE32-O0-LABEL: func_stacksave_stackrestore_call_with_stack_objects:
; WAVE32-O0: ; %bb.0:
; WAVE32-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; WAVE32-O0-NEXT: s_mov_b32 s26, s33
+; WAVE32-O0-NEXT: s_mov_b32 s25, s33
; WAVE32-O0-NEXT: s_mov_b32 s33, s32
; WAVE32-O0-NEXT: s_xor_saveexec_b32 s16, -1
; WAVE32-O0-NEXT: buffer_store_dword v32, off, s[0:3], s33 offset:128 ; 4-byte Folded Spill
@@ -1361,9 +1358,9 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE32-O0-NEXT: v_writelane_b32 v0, s16, 0
; WAVE32-O0-NEXT: s_lshr_b32 s16, s16, 5
; WAVE32-O0-NEXT: v_writelane_b32 v0, s16, 1
-; WAVE32-O0-NEXT: s_or_saveexec_b32 s25, -1
+; WAVE32-O0-NEXT: s_or_saveexec_b32 s24, -1
; WAVE32-O0-NEXT: buffer_store_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Spill
-; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s25
+; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s24
; WAVE32-O0-NEXT: v_mov_b32_e32 v0, 42
; WAVE32-O0-NEXT: buffer_store_dword v0, off, s[0:3], s33
; WAVE32-O0-NEXT: s_waitcnt_vscnt null, 0x0
@@ -1440,11 +1437,10 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE32-O0-NEXT: v_mov_b32_e32 v29, s18
; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
; WAVE32-O0-NEXT: v_mov_b32_e32 v30, s18
-; WAVE32-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; WAVE32-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
-; WAVE32-O0-NEXT: s_or_saveexec_b32 s25, -1
+; WAVE32-O0-NEXT: s_or_saveexec_b32 s24, -1
; WAVE32-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Reload
-; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s25
+; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s24
; WAVE32-O0-NEXT: s_waitcnt vmcnt(0)
; WAVE32-O0-NEXT: v_readlane_b32 s5, v0, 1
; WAVE32-O0-NEXT: v_readlane_b32 s4, v0, 0
@@ -1460,14 +1456,14 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE32-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 offset:136 ; 4-byte Folded Reload
; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s4
; WAVE32-O0-NEXT: s_add_i32 s32, s32, 0xffffee00
-; WAVE32-O0-NEXT: s_mov_b32 s33, s26
+; WAVE32-O0-NEXT: s_mov_b32 s33, s25
; WAVE32-O0-NEXT: s_waitcnt vmcnt(0)
; WAVE32-O0-NEXT: s_setpc_b64 s[30:31]
;
; WAVE64-O0-LABEL: func_stacksave_stackrestore_call_with_stack_objects:
; WAVE64-O0: ; %bb.0:
; WAVE64-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; WAVE64-O0-NEXT: s_mov_b32 s28, s33
+; WAVE64-O0-NEXT: s_mov_b32 s19, s33
; WAVE64-O0-NEXT: s_mov_b32 s33, s32
; WAVE64-O0-NEXT: s_xor_saveexec_b64 s[16:17], -1
; WAVE64-O0-NEXT: buffer_store_dword v32, off, s[0:3], s33 offset:128 ; 4-byte Folded Spill
@@ -1560,7 +1556,6 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE64-O0-NEXT: v_mov_b32_e32 v29, s18
; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
; WAVE64-O0-NEXT: v_mov_b32_e32 v30, s18
-; WAVE64-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; WAVE64-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; WAVE64-O0-NEXT: s_or_saveexec_b64 s[26:27], -1
; WAVE64-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Reload
@@ -1580,14 +1575,14 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE64-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 offset:136 ; 4-byte Folded Reload
; WAVE64-O0-NEXT: s_mov_b64 exec, s[4:5]
; WAVE64-O0-NEXT: s_add_i32 s32, s32, 0xffffdc00
-; WAVE64-O0-NEXT: s_mov_b32 s33, s28
+; WAVE64-O0-NEXT: s_mov_b32 s33, s19
; WAVE64-O0-NEXT: s_waitcnt vmcnt(0)
; WAVE64-O0-NEXT: s_setpc_b64 s[30:31]
;
; WAVE32-WWM-PREALLOC-LABEL: func_stacksave_stackrestore_call_with_stack_objects:
; WAVE32-WWM-PREALLOC: ; %bb.0:
; WAVE32-WWM-PREALLOC-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s25, s33
+; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s24, s33
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s33, s32
; WAVE32-WWM-PREALLOC-NEXT: s_xor_saveexec_b32 s16, -1
; WAVE32-WWM-PREALLOC-NEXT: buffer_store_dword v33, off, s[0:3], s33 offset:128 ; 4-byte Folded Spill
@@ -1677,7 +1672,6 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE32-WWM-PREALLOC-NEXT: v_mov_b32_e32 v29, s18
; WAVE32-WWM-PREALLOC-NEXT: ; implicit-def: $sgpr18
; WAVE32-WWM-PREALLOC-NEXT: v_mov_b32_e32 v30, s18
-; WAVE32-WWM-PREALLOC-NEXT: ; implicit-def: $sgpr18_sgpr19
; WAVE32-WWM-PREALLOC-NEXT: s_swappc_b64 s[30:31], s[16:17]
; WAVE32-WWM-PREALLOC-NEXT: v_readlane_b32 s5, v32, 1
; WAVE32-WWM-PREALLOC-NEXT: v_readlane_b32 s4, v32, 0
@@ -1693,7 +1687,7 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE32-WWM-PREALLOC-NEXT: buffer_load_dword v32, off, s[0:3], s33 offset:132 ; 4-byte Folded Reload
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 exec_lo, s4
; WAVE32-WWM-PREALLOC-NEXT: s_add_i32 s32, s32, 0xffffee00
-; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s33, s25
+; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s33, s24
; WAVE32-WWM-PREALLOC-NEXT: s_waitcnt vmcnt(0)
; WAVE32-WWM-PREALLOC-NEXT: s_setpc_b64 s[30:31]
%alloca = alloca [32 x i32], addrspace(5)
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll
index 8d5dc79..049db01 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll
@@ -31,6 +31,6 @@ define amdgpu_kernel void @kernel1() #1 {
attributes #0 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll
index 7a6f82d..c9387f1 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll
@@ -98,7 +98,7 @@ define amdgpu_kernel void @kernel2() #0 {
attributes #0 = { "uniform-work-group-size"="true" }
;.
; CHECK: attributes #[[ATTR0]] = { "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; CHECK: attributes #[[ATTR2]] = { "uniform-work-group-size"="true" }
-; CHECK: attributes #[[ATTR3]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR3]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="true" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll
index c04154c..7183da2 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll
@@ -41,6 +41,6 @@ define amdgpu_kernel void @kernel3() #2 {
attributes #2 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll
index 2d5ff04..6ed04cf 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll
@@ -41,7 +41,7 @@ define amdgpu_kernel void @kernel2() #2 {
attributes #1 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
-; CHECK: attributes #[[ATTR2]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-propagate-attribute.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-propagate-attribute.ll
index e8bf6fc..d5ba2fd 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-propagate-attribute.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-propagate-attribute.ll
@@ -52,8 +52,8 @@ attributes #0 = { nounwind }
attributes #1 = { "uniform-work-group-size"="false" }
attributes #2 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
; CHECK: attributes #[[ATTR2]] = { nounwind "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; CHECK: attributes #[[ATTR3]] = { "uniform-work-group-size"="true" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll
index 473eea4..7f0dfea 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll
@@ -101,7 +101,7 @@ define amdgpu_kernel void @kernel(ptr addrspace(1) %m) #1 {
attributes #0 = { nounwind readnone }
attributes #1 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { nounwind memory(none) "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { nounwind memory(none) "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="true" }
-; CHECK: attributes #[[ATTR2]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR0]] = { nounwind memory(none) "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { nounwind memory(none) "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="true" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll
index 221f1a1..8616c73 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll
@@ -61,6 +61,6 @@ define amdgpu_kernel void @kernel3() #0 {
attributes #0 = { "uniform-work-group-size"="false" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-agpr-limit-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/vgpr-agpr-limit-gfx90a.ll
index 717d3d9..0407994 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr-agpr-limit-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/vgpr-agpr-limit-gfx90a.ll
@@ -540,6 +540,7 @@ define internal void @use512vgprs() {
}
define void @foo() #0 {
+ call void asm sideeffect "; use $0", "a"(i32 0)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll b/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll
index d2364a6..bfc249e 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll
+++ b/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll
@@ -233,10 +233,10 @@ define amdgpu_ps float @loop(i32 %z, float %v, i32 inreg %bound, ptr %extern_fun
; SI-NEXT: bb.1.Flow:
; SI-NEXT: successors: %bb.2(0x40000000), %bb.10(0x40000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI undef %49:vgpr_32, %bb.0, %4, %bb.9
- ; SI-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[COPY4]], %bb.0, undef %51:vgpr_32, %bb.9
- ; SI-NEXT: [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[COPY3]], %bb.0, undef %53:vgpr_32, %bb.9
- ; SI-NEXT: [[PHI3:%[0-9]+]]:vgpr_32 = PHI [[COPY2]], %bb.0, undef %55:vgpr_32, %bb.9
+ ; SI-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI undef %47:vgpr_32, %bb.0, %4, %bb.9
+ ; SI-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[COPY4]], %bb.0, undef %49:vgpr_32, %bb.9
+ ; SI-NEXT: [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[COPY3]], %bb.0, undef %51:vgpr_32, %bb.9
+ ; SI-NEXT: [[PHI3:%[0-9]+]]:vgpr_32 = PHI [[COPY2]], %bb.0, undef %53:vgpr_32, %bb.9
; SI-NEXT: [[SI_ELSE:%[0-9]+]]:sreg_32 = SI_ELSE killed [[SI_IF]], %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
; SI-NEXT: S_BRANCH %bb.2
; SI-NEXT: {{ $}}
@@ -249,8 +249,8 @@ define amdgpu_ps float @loop(i32 %z, float %v, i32 inreg %bound, ptr %extern_fun
; SI-NEXT: bb.3:
; SI-NEXT: successors: %bb.4(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[PHI4:%[0-9]+]]:vreg_64 = PHI undef %57:vreg_64, %bb.4, [[REG_SEQUENCE]], %bb.2
- ; SI-NEXT: [[PHI5:%[0-9]+]]:vgpr_32 = PHI undef %59:vgpr_32, %bb.4, [[PHI1]], %bb.2
+ ; SI-NEXT: [[PHI4:%[0-9]+]]:vreg_64 = PHI undef %55:vreg_64, %bb.4, [[REG_SEQUENCE]], %bb.2
+ ; SI-NEXT: [[PHI5:%[0-9]+]]:vgpr_32 = PHI undef %57:vgpr_32, %bb.4, [[PHI1]], %bb.2
; SI-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI4]].sub0, implicit $exec
; SI-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI4]].sub1, implicit $exec
; SI-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_1]], %subreg.sub1
@@ -286,8 +286,8 @@ define amdgpu_ps float @loop(i32 %z, float %v, i32 inreg %bound, ptr %extern_fun
; SI-NEXT: bb.7:
; SI-NEXT: successors: %bb.8(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[PHI6:%[0-9]+]]:vreg_64 = PHI undef %61:vreg_64, %bb.8, [[REG_SEQUENCE2]], %bb.6
- ; SI-NEXT: [[PHI7:%[0-9]+]]:vgpr_32 = PHI undef %63:vgpr_32, %bb.8, [[COPY4]], %bb.6
+ ; SI-NEXT: [[PHI6:%[0-9]+]]:vreg_64 = PHI undef %59:vreg_64, %bb.8, [[REG_SEQUENCE2]], %bb.6
+ ; SI-NEXT: [[PHI7:%[0-9]+]]:vgpr_32 = PHI undef %61:vgpr_32, %bb.8, [[COPY4]], %bb.6
; SI-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI6]].sub0, implicit $exec
; SI-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI6]].sub1, implicit $exec
; SI-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_2]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_3]], %subreg.sub1
@@ -356,9 +356,9 @@ define amdgpu_ps float @loop_with_use(i32 %z, float %v, i32 inreg %bound, ptr %e
; SI-NEXT: bb.1.Flow:
; SI-NEXT: successors: %bb.2(0x40000000), %bb.10(0x40000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI undef %50:vgpr_32, %bb.0, %4, %bb.9
- ; SI-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[COPY3]], %bb.0, undef %52:vgpr_32, %bb.9
- ; SI-NEXT: [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[COPY2]], %bb.0, undef %54:vgpr_32, %bb.9
+ ; SI-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI undef %48:vgpr_32, %bb.0, %4, %bb.9
+ ; SI-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[COPY3]], %bb.0, undef %50:vgpr_32, %bb.9
+ ; SI-NEXT: [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[COPY2]], %bb.0, undef %52:vgpr_32, %bb.9
; SI-NEXT: [[SI_ELSE:%[0-9]+]]:sreg_32 = SI_ELSE killed [[SI_IF]], %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
; SI-NEXT: S_BRANCH %bb.2
; SI-NEXT: {{ $}}
@@ -371,7 +371,7 @@ define amdgpu_ps float @loop_with_use(i32 %z, float %v, i32 inreg %bound, ptr %e
; SI-NEXT: bb.3:
; SI-NEXT: successors: %bb.4(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[PHI3:%[0-9]+]]:vreg_64 = PHI undef %56:vreg_64, %bb.4, [[REG_SEQUENCE]], %bb.2
+ ; SI-NEXT: [[PHI3:%[0-9]+]]:vreg_64 = PHI undef %54:vreg_64, %bb.4, [[REG_SEQUENCE]], %bb.2
; SI-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI3]].sub0, implicit $exec
; SI-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI3]].sub1, implicit $exec
; SI-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_1]], %subreg.sub1
@@ -407,7 +407,7 @@ define amdgpu_ps float @loop_with_use(i32 %z, float %v, i32 inreg %bound, ptr %e
; SI-NEXT: bb.7:
; SI-NEXT: successors: %bb.8(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[PHI4:%[0-9]+]]:vreg_64 = PHI undef %58:vreg_64, %bb.8, [[REG_SEQUENCE2]], %bb.6
+ ; SI-NEXT: [[PHI4:%[0-9]+]]:vreg_64 = PHI undef %56:vreg_64, %bb.8, [[REG_SEQUENCE2]], %bb.6
; SI-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI4]].sub0, implicit $exec
; SI-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI4]].sub1, implicit $exec
; SI-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_2]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_3]], %subreg.sub1
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll b/llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll
index 37f207f..4939d526 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll
@@ -47,7 +47,6 @@ define protected amdgpu_kernel void @kern(ptr %addr) !llvm.amdgcn.lds.kernel.id
; CHECK-NEXT: s_mov_b32 s15, 42
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/virtregrewrite-undef-identity-copy.mir b/llvm/test/CodeGen/AMDGPU/virtregrewrite-undef-identity-copy.mir
index 3d9db68..6659e95 100644
--- a/llvm/test/CodeGen/AMDGPU/virtregrewrite-undef-identity-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/virtregrewrite-undef-identity-copy.mir
@@ -20,6 +20,7 @@ name: undef_identity_copy
tracksRegLiveness: true
frameInfo:
maxAlignment: 4
+ adjustsStack: true
hasCalls: true
machineFunctionInfo:
isEntryFunction: true
diff --git a/llvm/test/CodeGen/AMDGPU/wave32.ll b/llvm/test/CodeGen/AMDGPU/wave32.ll
index 82816b4..901e88a 100644
--- a/llvm/test/CodeGen/AMDGPU/wave32.ll
+++ b/llvm/test/CodeGen/AMDGPU/wave32.ll
@@ -2479,8 +2479,7 @@ define amdgpu_kernel void @icmp64(i32 %n, i32 %s) {
; GFX1032-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
; GFX1032-NEXT: v_cvt_u32_f32_e32 v1, v1
; GFX1032-NEXT: v_mul_lo_u32 v2, s1, v1
-; GFX1032-NEXT: s_ff1_i32_b32 s1, 0x80000000
-; GFX1032-NEXT: s_add_i32 s1, s1, 32
+; GFX1032-NEXT: s_brev_b32 s1, 1
; GFX1032-NEXT: v_mul_hi_u32 v2, v1, v2
; GFX1032-NEXT: v_add_nc_u32_e32 v1, v1, v2
; GFX1032-NEXT: v_mul_hi_u32 v1, v0, v1
@@ -2494,8 +2493,7 @@ define amdgpu_kernel void @icmp64(i32 %n, i32 %s) {
; GFX1032-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_lshr_b32 s0, vcc_lo, 1
-; GFX1032-NEXT: s_ff1_i32_b32 s0, s0
-; GFX1032-NEXT: s_min_u32 s0, s0, s1
+; GFX1032-NEXT: s_ff1_i32_b64 s0, s[0:1]
; GFX1032-NEXT: s_cmp_gt_u32 s0, 9
; GFX1032-NEXT: s_cselect_b32 s0, -1, 0
; GFX1032-NEXT: s_and_b32 s0, vcc_lo, s0
@@ -2529,10 +2527,7 @@ define amdgpu_kernel void @icmp64(i32 %n, i32 %s) {
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX1064-NEXT: s_lshr_b64 s[0:1], vcc, 1
; GFX1064-NEXT: s_bitset1_b32 s1, 31
-; GFX1064-NEXT: s_ff1_i32_b32 s0, s0
-; GFX1064-NEXT: s_ff1_i32_b32 s1, s1
-; GFX1064-NEXT: s_add_i32 s1, s1, 32
-; GFX1064-NEXT: s_min_u32 s0, s0, s1
+; GFX1064-NEXT: s_ff1_i32_b64 s0, s[0:1]
; GFX1064-NEXT: s_cmp_gt_u32 s0, 9
; GFX1064-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX1064-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
@@ -2576,9 +2571,8 @@ define amdgpu_kernel void @fcmp64(float %n, float %s) {
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032-NEXT: v_div_scale_f32 v1, s1, s0, s0, v0
; GFX1032-NEXT: v_div_scale_f32 v4, vcc_lo, v0, s0, v0
-; GFX1032-NEXT: s_ff1_i32_b32 s1, 0x80000000
+; GFX1032-NEXT: s_brev_b32 s1, 1
; GFX1032-NEXT: v_rcp_f32_e32 v2, v1
-; GFX1032-NEXT: s_add_i32 s1, s1, 32
; GFX1032-NEXT: v_fma_f32 v3, -v1, v2, 1.0
; GFX1032-NEXT: v_fmac_f32_e32 v2, v3, v2
; GFX1032-NEXT: v_mul_f32_e32 v3, v4, v2
@@ -2592,8 +2586,7 @@ define amdgpu_kernel void @fcmp64(float %n, float %s) {
; GFX1032-NEXT: v_cmp_eq_f32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_lshr_b32 s0, vcc_lo, 1
; GFX1032-NEXT: v_cmp_nlg_f32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: s_ff1_i32_b32 s0, s0
-; GFX1032-NEXT: s_min_u32 s0, s0, s1
+; GFX1032-NEXT: s_ff1_i32_b64 s0, s[0:1]
; GFX1032-NEXT: s_cmp_gt_u32 s0, 9
; GFX1032-NEXT: s_cselect_b32 s0, -1, 0
; GFX1032-NEXT: s_and_b32 s0, vcc_lo, s0
@@ -2609,15 +2602,15 @@ define amdgpu_kernel void @fcmp64(float %n, float %s) {
; GFX1064-NEXT: v_cvt_f32_u32_e32 v0, v0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064-NEXT: v_div_scale_f32 v1, s[0:1], s2, s2, v0
-; GFX1064-NEXT: v_div_scale_f32 v4, vcc, v0, s2, v0
; GFX1064-NEXT: v_rcp_f32_e32 v2, v1
; GFX1064-NEXT: v_fma_f32 v3, -v1, v2, 1.0
; GFX1064-NEXT: v_fmac_f32_e32 v2, v3, v2
-; GFX1064-NEXT: v_mul_f32_e32 v3, v4, v2
-; GFX1064-NEXT: v_fma_f32 v5, -v1, v3, v4
-; GFX1064-NEXT: v_fmac_f32_e32 v3, v5, v2
-; GFX1064-NEXT: v_fma_f32 v1, -v1, v3, v4
-; GFX1064-NEXT: v_div_fmas_f32 v1, v1, v2, v3
+; GFX1064-NEXT: v_div_scale_f32 v3, vcc, v0, s2, v0
+; GFX1064-NEXT: v_mul_f32_e32 v4, v3, v2
+; GFX1064-NEXT: v_fma_f32 v5, -v1, v4, v3
+; GFX1064-NEXT: v_fmac_f32_e32 v4, v5, v2
+; GFX1064-NEXT: v_fma_f32 v1, -v1, v4, v3
+; GFX1064-NEXT: v_div_fmas_f32 v1, v1, v2, v4
; GFX1064-NEXT: v_div_fixup_f32 v1, v1, s2, v0
; GFX1064-NEXT: v_trunc_f32_e32 v1, v1
; GFX1064-NEXT: v_fma_f32 v0, -v1, s2, v0
@@ -2625,10 +2618,7 @@ define amdgpu_kernel void @fcmp64(float %n, float %s) {
; GFX1064-NEXT: s_lshr_b64 s[0:1], vcc, 1
; GFX1064-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0
; GFX1064-NEXT: s_bitset1_b32 s1, 31
-; GFX1064-NEXT: s_ff1_i32_b32 s0, s0
-; GFX1064-NEXT: s_ff1_i32_b32 s1, s1
-; GFX1064-NEXT: s_add_i32 s1, s1, 32
-; GFX1064-NEXT: s_min_u32 s0, s0, s1
+; GFX1064-NEXT: s_ff1_i32_b64 s0, s[0:1]
; GFX1064-NEXT: s_cmp_gt_u32 s0, 9
; GFX1064-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX1064-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
diff --git a/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll b/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll
index 3a33194..7eabe98 100644
--- a/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll
+++ b/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll
@@ -101,7 +101,6 @@ define void @test() #0 {
; GCN-O0-NEXT: s_mov_b64 s[20:21], s[0:1]
; GCN-O0-NEXT: s_mov_b64 s[0:1], s[20:21]
; GCN-O0-NEXT: s_mov_b64 s[2:3], s[22:23]
-; GCN-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; GCN-O0-NEXT: s_waitcnt lgkmcnt(0)
; GCN-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GCN-O0-NEXT: s_or_saveexec_b64 s[28:29], -1
diff --git a/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll b/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
index 11f6a29..e79cb66 100644
--- a/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
+++ b/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
@@ -406,7 +406,6 @@ define amdgpu_gfx void @strict_wwm_call(ptr addrspace(8) inreg %tmp14, i32 inreg
; GFX9-O0-NEXT: s_mov_b64 s[0:1], s[44:45]
; GFX9-O0-NEXT: s_mov_b64 s[2:3], s[46:47]
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v2
-; GFX9-O0-NEXT: ; implicit-def: $sgpr44_sgpr45
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[42:43]
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
; GFX9-O0-NEXT: v_add_u32_e64 v1, v1, v2
@@ -633,7 +632,6 @@ define amdgpu_gfx void @strict_wwm_call_i64(ptr addrspace(8) inreg %tmp14, i64 i
; GFX9-O0-NEXT: s_mov_b64 s[2:3], s[38:39]
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v2
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
-; GFX9-O0-NEXT: ; implicit-def: $sgpr36_sgpr37
; GFX9-O0-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[34:35]
; GFX9-O0-NEXT: s_or_saveexec_b64 s[46:47], -1
diff --git a/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll b/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll
index e5cebc1..def51f2 100644
--- a/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll
+++ b/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll
@@ -413,7 +413,6 @@ define amdgpu_kernel void @call(ptr addrspace(8) inreg %tmp14, i32 inreg %arg) {
; GFX9-O0-NEXT: ; implicit-def: $sgpr15
; GFX9-O0-NEXT: v_mov_b32_e32 v31, v3
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
; GFX9-O0-NEXT: buffer_load_dword v1, off, s[24:27], 0 ; 4-byte Folded Reload
@@ -657,7 +656,6 @@ define amdgpu_kernel void @call_i64(ptr addrspace(8) inreg %tmp14, i64 inreg %ar
; GFX9-O0-NEXT: v_mov_b32_e32 v31, v3
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v6
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; GFX9-O0-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
@@ -1285,7 +1283,6 @@ define amdgpu_kernel void @strict_wwm_call(ptr addrspace(8) inreg %tmp14, i32 in
; GFX9-O0-NEXT: ; implicit-def: $sgpr15
; GFX9-O0-NEXT: v_mov_b32_e32 v31, v3
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
; GFX9-O0-NEXT: buffer_load_dword v1, off, s[24:27], 0 ; 4-byte Folded Reload
@@ -1529,7 +1526,6 @@ define amdgpu_kernel void @strict_wwm_call_i64(ptr addrspace(8) inreg %tmp14, i6
; GFX9-O0-NEXT: v_mov_b32_e32 v31, v3
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v6
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; GFX9-O0-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
diff --git a/llvm/test/CodeGen/ARM/arm-and-tst-peephole.ll b/llvm/test/CodeGen/ARM/arm-and-tst-peephole.ll
index 365727c..0795525 100644
--- a/llvm/test/CodeGen/ARM/arm-and-tst-peephole.ll
+++ b/llvm/test/CodeGen/ARM/arm-and-tst-peephole.ll
@@ -8,10 +8,8 @@
%struct.Foo = type { ptr }
-; ARM-LABEL: foo:
-; THUMB-LABEL: foo:
-; T2-LABEL: foo:
define ptr @foo(ptr %this, i32 %acc) nounwind readonly align 2 {
+; ARM-LABEL: foo:
; ARM: @ %bb.0: @ %entry
; ARM-NEXT: add r2, r0, #4
; ARM-NEXT: mov r12, #1
@@ -44,6 +42,7 @@ define ptr @foo(ptr %this, i32 %acc) nounwind readonly align 2 {
; ARM-NEXT: add r0, r0, r1, lsl #2
; ARM-NEXT: mov pc, lr
;
+; THUMB-LABEL: foo:
; THUMB: @ %bb.0: @ %entry
; THUMB-NEXT: .save {r4, r5, r7, lr}
; THUMB-NEXT: push {r4, r5, r7, lr}
@@ -91,6 +90,7 @@ define ptr @foo(ptr %this, i32 %acc) nounwind readonly align 2 {
; THUMB-NEXT: pop {r0}
; THUMB-NEXT: bx r0
;
+; T2-LABEL: foo:
; T2: @ %bb.0: @ %entry
; T2-NEXT: adds r2, r0, #4
; T2-NEXT: mov.w r12, #1
@@ -125,6 +125,7 @@ define ptr @foo(ptr %this, i32 %acc) nounwind readonly align 2 {
; T2-NEXT: add.w r0, r0, r1, lsl #2
; T2-NEXT: bx lr
;
+; V8-LABEL: foo:
; V8: @ %bb.0: @ %entry
; V8-NEXT: adds r2, r0, #4
; V8-NEXT: mov.w r12, #1
@@ -210,11 +211,8 @@ sw.epilog: ; preds = %tailrecurse.switch
%struct.S = type { ptr, [1 x i8] }
-; ARM-LABEL: bar:
-; THUMB-LABEL: bar:
-; T2-LABEL: bar:
-; V8-LABEL: bar:
define internal zeroext i8 @bar(ptr %x, ptr nocapture %y) nounwind readonly {
+; ARM-LABEL: bar:
; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrb r2, [r0, #4]
; ARM-NEXT: ands r2, r2, #112
@@ -230,6 +228,7 @@ define internal zeroext i8 @bar(ptr %x, ptr nocapture %y) nounwind readonly {
; ARM-NEXT: mov r0, #1
; ARM-NEXT: mov pc, lr
;
+; THUMB-LABEL: bar:
; THUMB: @ %bb.0: @ %entry
; THUMB-NEXT: ldrb r2, [r0, #4]
; THUMB-NEXT: movs r3, #112
@@ -253,6 +252,7 @@ define internal zeroext i8 @bar(ptr %x, ptr nocapture %y) nounwind readonly {
; THUMB-NEXT: ands r0, r1
; THUMB-NEXT: bx lr
;
+; T2-LABEL: bar:
; T2: @ %bb.0: @ %entry
; T2-NEXT: ldrb r2, [r0, #4]
; T2-NEXT: ands r2, r2, #112
@@ -270,6 +270,7 @@ define internal zeroext i8 @bar(ptr %x, ptr nocapture %y) nounwind readonly {
; T2-NEXT: movs r0, #1
; T2-NEXT: bx lr
;
+; V8-LABEL: bar:
; V8: @ %bb.0: @ %entry
; V8-NEXT: ldrb r2, [r0, #4]
; V8-NEXT: ands r2, r2, #112
diff --git a/llvm/test/CodeGen/ARM/no-register-coalescing-in-returnsTwice.mir b/llvm/test/CodeGen/ARM/no-register-coalescing-in-returnsTwice.mir
index 5c59566..b4bbb9b 100644
--- a/llvm/test/CodeGen/ARM/no-register-coalescing-in-returnsTwice.mir
+++ b/llvm/test/CodeGen/ARM/no-register-coalescing-in-returnsTwice.mir
@@ -86,6 +86,8 @@
---
name: main
exposesReturnsTwice: true
+frameInfo:
+ adjustsStack: true
stack:
- { id: 0, name: P0, size: 80, alignment: 8, local-offset: -80 }
- { id: 1, name: jb1, size: 160, alignment: 8, local-offset: -240 }
diff --git a/llvm/test/CodeGen/ARM/select.ll b/llvm/test/CodeGen/ARM/select.ll
index 4bb7965..24ca9ae 100644
--- a/llvm/test/CodeGen/ARM/select.ll
+++ b/llvm/test/CodeGen/ARM/select.ll
@@ -1,14 +1,25 @@
-; RUN: llc -mtriple=arm-apple-darwin %s -o - | FileCheck %s
-
-; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - \
-; RUN: | FileCheck %s --check-prefix=CHECK-VFP
-
-; RUN: llc -mtriple=thumbv7-apple-darwin -mattr=+neon,+thumb2 %s -o - \
-; RUN: | FileCheck %s --check-prefix=CHECK-NEON
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=armv7-eabi -mattr=-fpregs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-ARM
+; RUN: llc -mtriple=armv7-eabi -mattr=+vfp2 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-VFP
+; RUN: llc -mtriple=thumbv7-apple-darwin -mattr=+neon,+thumb2 %s -o - | FileCheck %s --check-prefix=CHECK-NEON
define i32 @f1(i32 %a.s) {
-;CHECK-LABEL: f1:
-;CHECK: moveq
+; CHECK-LABEL: f1:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r1, #3
+; CHECK-NEXT: cmp r0, #4
+; CHECK-NEXT: movweq r1, #2
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: f1:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: movs r1, #3
+; CHECK-NEON-NEXT: cmp r0, #4
+; CHECK-NEON-NEXT: it eq
+; CHECK-NEON-NEXT: moveq r1, #2
+; CHECK-NEON-NEXT: mov r0, r1
+; CHECK-NEON-NEXT: bx lr
entry:
%tmp = icmp eq i32 %a.s, 4
%tmp1.s = select i1 %tmp, i32 2, i32 3
@@ -16,8 +27,22 @@ entry:
}
define i32 @f2(i32 %a.s) {
-;CHECK-LABEL: f2:
-;CHECK: movgt
+; CHECK-LABEL: f2:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r1, #3
+; CHECK-NEXT: cmp r0, #4
+; CHECK-NEXT: movwgt r1, #2
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: f2:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: movs r1, #3
+; CHECK-NEON-NEXT: cmp r0, #4
+; CHECK-NEON-NEXT: it gt
+; CHECK-NEON-NEXT: movgt r1, #2
+; CHECK-NEON-NEXT: mov r0, r1
+; CHECK-NEON-NEXT: bx lr
entry:
%tmp = icmp sgt i32 %a.s, 4
%tmp1.s = select i1 %tmp, i32 2, i32 3
@@ -25,8 +50,22 @@ entry:
}
define i32 @f3(i32 %a.s, i32 %b.s) {
-;CHECK-LABEL: f3:
-;CHECK: movlt
+; CHECK-LABEL: f3:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r2, #3
+; CHECK-NEXT: cmp r0, r1
+; CHECK-NEXT: movwlt r2, #2
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: f3:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: movs r2, #3
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it lt
+; CHECK-NEON-NEXT: movlt r2, #2
+; CHECK-NEON-NEXT: mov r0, r2
+; CHECK-NEON-NEXT: bx lr
entry:
%tmp = icmp slt i32 %a.s, %b.s
%tmp1.s = select i1 %tmp, i32 2, i32 3
@@ -34,8 +73,22 @@ entry:
}
define i32 @f4(i32 %a.s, i32 %b.s) {
-;CHECK-LABEL: f4:
-;CHECK: movle
+; CHECK-LABEL: f4:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r2, #3
+; CHECK-NEXT: cmp r0, r1
+; CHECK-NEXT: movwle r2, #2
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: f4:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: movs r2, #3
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it le
+; CHECK-NEON-NEXT: movle r2, #2
+; CHECK-NEON-NEXT: mov r0, r2
+; CHECK-NEON-NEXT: bx lr
entry:
%tmp = icmp sle i32 %a.s, %b.s
%tmp1.s = select i1 %tmp, i32 2, i32 3
@@ -43,8 +96,22 @@ entry:
}
define i32 @f5(i32 %a.u, i32 %b.u) {
-;CHECK-LABEL: f5:
-;CHECK: movls
+; CHECK-LABEL: f5:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r2, #3
+; CHECK-NEXT: cmp r0, r1
+; CHECK-NEXT: movwls r2, #2
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: f5:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: movs r2, #3
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it ls
+; CHECK-NEON-NEXT: movls r2, #2
+; CHECK-NEON-NEXT: mov r0, r2
+; CHECK-NEON-NEXT: bx lr
entry:
%tmp = icmp ule i32 %a.u, %b.u
%tmp1.s = select i1 %tmp, i32 2, i32 3
@@ -52,8 +119,22 @@ entry:
}
define i32 @f6(i32 %a.u, i32 %b.u) {
-;CHECK-LABEL: f6:
-;CHECK: movhi
+; CHECK-LABEL: f6:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r2, #3
+; CHECK-NEXT: cmp r0, r1
+; CHECK-NEXT: movwhi r2, #2
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: f6:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: movs r2, #3
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it hi
+; CHECK-NEON-NEXT: movhi r2, #2
+; CHECK-NEON-NEXT: mov r0, r2
+; CHECK-NEON-NEXT: bx lr
entry:
%tmp = icmp ugt i32 %a.u, %b.u
%tmp1.s = select i1 %tmp, i32 2, i32 3
@@ -61,11 +142,61 @@ entry:
}
define double @f7(double %a, double %b) {
-;CHECK-LABEL: f7:
-;CHECK: movmi
-;CHECK: movpl
-;CHECK-VFP-LABEL: f7:
-;CHECK-VFP: vmovmi
+; CHECK-ARM-LABEL: f7:
+; CHECK-ARM: @ %bb.0:
+; CHECK-ARM-NEXT: .save {r4, r5, r11, lr}
+; CHECK-ARM-NEXT: push {r4, r5, r11, lr}
+; CHECK-ARM-NEXT: mov r4, r3
+; CHECK-ARM-NEXT: movw r3, #48758
+; CHECK-ARM-NEXT: mov r5, r2
+; CHECK-ARM-NEXT: movw r2, #14680
+; CHECK-ARM-NEXT: movt r2, #51380
+; CHECK-ARM-NEXT: movt r3, #16371
+; CHECK-ARM-NEXT: bl __aeabi_dcmplt
+; CHECK-ARM-NEXT: cmp r0, #0
+; CHECK-ARM-NEXT: movwne r4, #0
+; CHECK-ARM-NEXT: movwne r5, #0
+; CHECK-ARM-NEXT: movtne r4, #49136
+; CHECK-ARM-NEXT: mov r0, r5
+; CHECK-ARM-NEXT: mov r1, r4
+; CHECK-ARM-NEXT: pop {r4, r5, r11, pc}
+;
+; CHECK-VFP-LABEL: f7:
+; CHECK-VFP: @ %bb.0:
+; CHECK-VFP-NEXT: vldr d17, .LCPI6_0
+; CHECK-VFP-NEXT: vmov d19, r0, r1
+; CHECK-VFP-NEXT: vmov.f64 d16, #-1.000000e+00
+; CHECK-VFP-NEXT: vcmp.f64 d19, d17
+; CHECK-VFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-VFP-NEXT: vmov d18, r2, r3
+; CHECK-VFP-NEXT: vmovmi.f64 d18, d16
+; CHECK-VFP-NEXT: vmov r0, r1, d18
+; CHECK-VFP-NEXT: bx lr
+; CHECK-VFP-NEXT: .p2align 3
+; CHECK-VFP-NEXT: @ %bb.1:
+; CHECK-VFP-NEXT: .LCPI6_0:
+; CHECK-VFP-NEXT: .long 3367254360 @ double 1.234
+; CHECK-VFP-NEXT: .long 1072938614
+;
+; CHECK-NEON-LABEL: f7:
+; CHECK-NEON: @ %bb.0:
+; CHECK-NEON-NEXT: vldr d17, LCPI6_0
+; CHECK-NEON-NEXT: vmov d19, r0, r1
+; CHECK-NEON-NEXT: vmov d18, r2, r3
+; CHECK-NEON-NEXT: vcmp.f64 d19, d17
+; CHECK-NEON-NEXT: vmov.f64 d16, #-1.000000e+00
+; CHECK-NEON-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEON-NEXT: it mi
+; CHECK-NEON-NEXT: vmovmi.f64 d18, d16
+; CHECK-NEON-NEXT: vmov r0, r1, d18
+; CHECK-NEON-NEXT: bx lr
+; CHECK-NEON-NEXT: .p2align 3
+; CHECK-NEON-NEXT: @ %bb.1:
+; CHECK-NEON-NEXT: .data_region
+; CHECK-NEON-NEXT: LCPI6_0:
+; CHECK-NEON-NEXT: .long 3367254360 @ double 1.234
+; CHECK-NEON-NEXT: .long 1072938614
+; CHECK-NEON-NEXT: .end_data_region
%tmp = fcmp olt double %a, 1.234e+00
%tmp1 = select i1 %tmp, double -1.000e+00, double %b
ret double %tmp1
@@ -77,18 +208,49 @@ define double @f7(double %a, double %b) {
; a lack of a custom lowering routine for an ISD::SELECT. This would result in
; two "it" blocks in the code: one for the "icmp" and another to move the index
; into the constant pool based on the value of the "icmp". If we have one "it"
-; block generated, odds are good that we have close to the ideal code for this:
+; block generated, odds are good that we have close to the ideal code for this.
+define arm_apcscc float @f8(i32 %a) nounwind {
+; CHECK-ARM-LABEL: f8:
+; CHECK-ARM: @ %bb.0:
+; CHECK-ARM-NEXT: movw r1, #29905
+; CHECK-ARM-NEXT: movw r2, #1123
+; CHECK-ARM-NEXT: movt r1, #16408
+; CHECK-ARM-NEXT: cmp r0, r2
+; CHECK-ARM-NEXT: movweq r1, #62390
+; CHECK-ARM-NEXT: movteq r1, #16285
+; CHECK-ARM-NEXT: mov r0, r1
+; CHECK-ARM-NEXT: bx lr
+;
+; CHECK-VFP-LABEL: f8:
+; CHECK-VFP: @ %bb.0:
+; CHECK-VFP-NEXT: movw r2, #1123
+; CHECK-VFP-NEXT: adr r1, .LCPI7_0
+; CHECK-VFP-NEXT: cmp r0, r2
+; CHECK-VFP-NEXT: addeq r1, r1, #4
+; CHECK-VFP-NEXT: ldr r0, [r1]
+; CHECK-VFP-NEXT: bx lr
+; CHECK-VFP-NEXT: .p2align 2
+; CHECK-VFP-NEXT: @ %bb.1:
+; CHECK-VFP-NEXT: .LCPI7_0:
+; CHECK-VFP-NEXT: .long 0x401874d1 @ float 2.38212991
+; CHECK-VFP-NEXT: .long 0x3f9df3b6 @ float 1.23399997
;
; CHECK-NEON-LABEL: f8:
-; CHECK-NEON: adr [[R2:r[0-9]+]], LCPI7_0
-; CHECK-NEON: movw [[R3:r[0-9]+]], #1123
-; CHECK-NEON-NEXT: cmp r0, [[R3]]
-; CHECK-NEON-NEXT: it eq
-; CHECK-NEON-NEXT: addeq{{.*}} [[R2]], #4
-; CHECK-NEON-NEXT: ldr
-; CHECK-NEON: bx
-
-define arm_apcscc float @f8(i32 %a) nounwind {
+; CHECK-NEON: @ %bb.0:
+; CHECK-NEON-NEXT: adr r1, LCPI7_0
+; CHECK-NEON-NEXT: movw r2, #1123
+; CHECK-NEON-NEXT: cmp r0, r2
+; CHECK-NEON-NEXT: it eq
+; CHECK-NEON-NEXT: addeq r1, #4
+; CHECK-NEON-NEXT: ldr r0, [r1]
+; CHECK-NEON-NEXT: bx lr
+; CHECK-NEON-NEXT: .p2align 2
+; CHECK-NEON-NEXT: @ %bb.1:
+; CHECK-NEON-NEXT: .data_region
+; CHECK-NEON-NEXT: LCPI7_0:
+; CHECK-NEON-NEXT: .long 0x401874d1 @ float 2.38212991
+; CHECK-NEON-NEXT: .long 0x3f9df3b6 @ float 1.23399997
+; CHECK-NEON-NEXT: .end_data_region
%tmp = icmp eq i32 %a, 1123
%tmp1 = select i1 %tmp, float 0x3FF3BE76C0000000, float 0x40030E9A20000000
ret float %tmp1
@@ -98,10 +260,40 @@ define arm_apcscc float @f8(i32 %a) nounwind {
; Glue values can only have a single use, but the following test exposed a
; case where a SELECT was lowered with 2 uses of a comparison, causing the
; scheduler to assert.
-; CHECK-VFP-LABEL: f9:
-
declare ptr @objc_msgSend(ptr, ptr, ...)
define void @f9() optsize {
+; CHECK-LABEL: f9:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: .pad #8
+; CHECK-NEXT: sub sp, sp, #8
+; CHECK-NEXT: movw r2, #0
+; CHECK-NEXT: movw r3, #0
+; CHECK-NEXT: mov r1, #1065353216
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: movt r2, #16672
+; CHECK-NEXT: movt r3, #32704
+; CHECK-NEXT: strd r0, r1, [sp]
+; CHECK-NEXT: bl objc_msgSend
+; CHECK-NEXT: add sp, sp, #8
+; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NEON-LABEL: f9:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: str lr, [sp, #-4]!
+; CHECK-NEON-NEXT: sub sp, #8
+; CHECK-NEON-NEXT: movs r2, #0
+; CHECK-NEON-NEXT: movs r3, #0
+; CHECK-NEON-NEXT: mov.w r0, #1065353216
+; CHECK-NEON-NEXT: movs r1, #0
+; CHECK-NEON-NEXT: movt r2, #16672
+; CHECK-NEON-NEXT: movt r3, #32704
+; CHECK-NEON-NEXT: strd r1, r0, [sp]
+; CHECK-NEON-NEXT: bl _objc_msgSend
+; CHECK-NEON-NEXT: add sp, #8
+; CHECK-NEON-NEXT: ldr lr, [sp], #4
+; CHECK-NEON-NEXT: bx lr
entry:
%cmp = icmp eq ptr undef, inttoptr (i32 4 to ptr)
%conv191 = select i1 %cmp, float -3.000000e+00, float 0.000000e+00
@@ -117,36 +309,151 @@ entry:
ret void
}
-; CHECK-LABEL: f10:
define float @f10(i32 %a, i32 %b) nounwind uwtable readnone ssp {
-; CHECK-NOT: floatsisf
+; CHECK-ARM-LABEL: f10:
+; CHECK-ARM: @ %bb.0:
+; CHECK-ARM-NEXT: mov r2, #0
+; CHECK-ARM-NEXT: cmp r0, r1
+; CHECK-ARM-NEXT: moveq r2, #1065353216
+; CHECK-ARM-NEXT: mov r0, r2
+; CHECK-ARM-NEXT: bx lr
+;
+; CHECK-VFP-LABEL: f10:
+; CHECK-VFP: @ %bb.0:
+; CHECK-VFP-NEXT: vmov.f32 s2, #1.000000e+00
+; CHECK-VFP-NEXT: vldr s0, .LCPI9_0
+; CHECK-VFP-NEXT: cmp r0, r1
+; CHECK-VFP-NEXT: vmoveq.f32 s0, s2
+; CHECK-VFP-NEXT: vmov r0, s0
+; CHECK-VFP-NEXT: bx lr
+; CHECK-VFP-NEXT: .p2align 2
+; CHECK-VFP-NEXT: @ %bb.1:
+; CHECK-VFP-NEXT: .LCPI9_0:
+; CHECK-VFP-NEXT: .long 0x00000000 @ float 0
+;
+; CHECK-NEON-LABEL: f10:
+; CHECK-NEON: @ %bb.0:
+; CHECK-NEON-NEXT: vldr s0, LCPI9_0
+; CHECK-NEON-NEXT: vmov.f32 s2, #1.000000e+00
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it eq
+; CHECK-NEON-NEXT: vmoveq.f32 s0, s2
+; CHECK-NEON-NEXT: vmov r0, s0
+; CHECK-NEON-NEXT: bx lr
+; CHECK-NEON-NEXT: .p2align 2
+; CHECK-NEON-NEXT: @ %bb.1:
+; CHECK-NEON-NEXT: .data_region
+; CHECK-NEON-NEXT: LCPI9_0:
+; CHECK-NEON-NEXT: .long 0x00000000 @ float 0
+; CHECK-NEON-NEXT: .end_data_region
%1 = icmp eq i32 %a, %b
%2 = zext i1 %1 to i32
%3 = sitofp i32 %2 to float
ret float %3
}
-; CHECK-LABEL: f11:
define float @f11(i32 %a, i32 %b) nounwind uwtable readnone ssp {
-; CHECK-NOT: floatsisf
+; CHECK-ARM-LABEL: f11:
+; CHECK-ARM: @ %bb.0:
+; CHECK-ARM-NEXT: mov r2, #0
+; CHECK-ARM-NEXT: cmp r0, r1
+; CHECK-ARM-NEXT: movweq r2, #0
+; CHECK-ARM-NEXT: movteq r2, #49024
+; CHECK-ARM-NEXT: mov r0, r2
+; CHECK-ARM-NEXT: bx lr
+;
+; CHECK-VFP-LABEL: f11:
+; CHECK-VFP: @ %bb.0:
+; CHECK-VFP-NEXT: vmov.f32 s2, #-1.000000e+00
+; CHECK-VFP-NEXT: vldr s0, .LCPI10_0
+; CHECK-VFP-NEXT: cmp r0, r1
+; CHECK-VFP-NEXT: vmoveq.f32 s0, s2
+; CHECK-VFP-NEXT: vmov r0, s0
+; CHECK-VFP-NEXT: bx lr
+; CHECK-VFP-NEXT: .p2align 2
+; CHECK-VFP-NEXT: @ %bb.1:
+; CHECK-VFP-NEXT: .LCPI10_0:
+; CHECK-VFP-NEXT: .long 0x00000000 @ float 0
+;
+; CHECK-NEON-LABEL: f11:
+; CHECK-NEON: @ %bb.0:
+; CHECK-NEON-NEXT: vldr s0, LCPI10_0
+; CHECK-NEON-NEXT: vmov.f32 s2, #-1.000000e+00
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it eq
+; CHECK-NEON-NEXT: vmoveq.f32 s0, s2
+; CHECK-NEON-NEXT: vmov r0, s0
+; CHECK-NEON-NEXT: bx lr
+; CHECK-NEON-NEXT: .p2align 2
+; CHECK-NEON-NEXT: @ %bb.1:
+; CHECK-NEON-NEXT: .data_region
+; CHECK-NEON-NEXT: LCPI10_0:
+; CHECK-NEON-NEXT: .long 0x00000000 @ float 0
+; CHECK-NEON-NEXT: .end_data_region
%1 = icmp eq i32 %a, %b
%2 = sitofp i1 %1 to float
ret float %2
}
-; CHECK-LABEL: f12:
define float @f12(i32 %a, i32 %b) nounwind uwtable readnone ssp {
-; CHECK-NOT: floatunsisf
+; CHECK-ARM-LABEL: f12:
+; CHECK-ARM: @ %bb.0:
+; CHECK-ARM-NEXT: mov r2, #0
+; CHECK-ARM-NEXT: cmp r0, r1
+; CHECK-ARM-NEXT: moveq r2, #1065353216
+; CHECK-ARM-NEXT: mov r0, r2
+; CHECK-ARM-NEXT: bx lr
+;
+; CHECK-VFP-LABEL: f12:
+; CHECK-VFP: @ %bb.0:
+; CHECK-VFP-NEXT: vmov.f32 s2, #1.000000e+00
+; CHECK-VFP-NEXT: vldr s0, .LCPI11_0
+; CHECK-VFP-NEXT: cmp r0, r1
+; CHECK-VFP-NEXT: vmoveq.f32 s0, s2
+; CHECK-VFP-NEXT: vmov r0, s0
+; CHECK-VFP-NEXT: bx lr
+; CHECK-VFP-NEXT: .p2align 2
+; CHECK-VFP-NEXT: @ %bb.1:
+; CHECK-VFP-NEXT: .LCPI11_0:
+; CHECK-VFP-NEXT: .long 0x00000000 @ float 0
+;
+; CHECK-NEON-LABEL: f12:
+; CHECK-NEON: @ %bb.0:
+; CHECK-NEON-NEXT: vldr s0, LCPI11_0
+; CHECK-NEON-NEXT: vmov.f32 s2, #1.000000e+00
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it eq
+; CHECK-NEON-NEXT: vmoveq.f32 s0, s2
+; CHECK-NEON-NEXT: vmov r0, s0
+; CHECK-NEON-NEXT: bx lr
+; CHECK-NEON-NEXT: .p2align 2
+; CHECK-NEON-NEXT: @ %bb.1:
+; CHECK-NEON-NEXT: .data_region
+; CHECK-NEON-NEXT: LCPI11_0:
+; CHECK-NEON-NEXT: .long 0x00000000 @ float 0
+; CHECK-NEON-NEXT: .end_data_region
%1 = icmp eq i32 %a, %b
%2 = uitofp i1 %1 to float
ret float %2
}
-; CHECK-LABEL: test_overflow_recombine:
define i1 @test_overflow_recombine(i32 %in1, i32 %in2) {
-; CHECK: smull [[LO:r[0-9]+]], [[HI:r[0-9]+]]
-; CHECK: subs [[ZERO:r[0-9]+]], [[HI]], [[LO]], asr #31
-; CHECK: movne [[ZERO]], #1
+; CHECK-LABEL: test_overflow_recombine:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mul r2, r0, r1
+; CHECK-NEXT: smmul r0, r0, r1
+; CHECK-NEXT: subs r0, r0, r2, asr #31
+; CHECK-NEXT: movwne r0, #1
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: test_overflow_recombine:
+; CHECK-NEON: @ %bb.0:
+; CHECK-NEON-NEXT: mul r2, r0, r1
+; CHECK-NEON-NEXT: smmul r0, r0, r1
+; CHECK-NEON-NEXT: subs.w r0, r0, r2, asr #31
+; CHECK-NEON-NEXT: it ne
+; CHECK-NEON-NEXT: movne r0, #1
+; CHECK-NEON-NEXT: bx lr
%prod = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %in1, i32 %in2)
%overflow = extractvalue { i32, i1 } %prod, 1
ret i1 %overflow
diff --git a/llvm/test/CodeGen/AVR/bug-81911.ll b/llvm/test/CodeGen/AVR/bug-81911.ll
new file mode 100644
index 0000000..2a22666
--- /dev/null
+++ b/llvm/test/CodeGen/AVR/bug-81911.ll
@@ -0,0 +1,163 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=avr -mcpu=atmega328 -O1 -verify-machineinstrs | FileCheck %s
+
+define internal i8 @main() {
+; CHECK-LABEL: main:
+; CHECK: ; %bb.0: ; %bb0
+; CHECK-NEXT: push r2
+; CHECK-NEXT: push r3
+; CHECK-NEXT: push r4
+; CHECK-NEXT: push r5
+; CHECK-NEXT: push r6
+; CHECK-NEXT: push r7
+; CHECK-NEXT: push r8
+; CHECK-NEXT: push r9
+; CHECK-NEXT: push r10
+; CHECK-NEXT: push r11
+; CHECK-NEXT: push r12
+; CHECK-NEXT: push r13
+; CHECK-NEXT: push r14
+; CHECK-NEXT: push r15
+; CHECK-NEXT: push r16
+; CHECK-NEXT: push r17
+; CHECK-NEXT: push r28
+; CHECK-NEXT: push r29
+; CHECK-NEXT: in r28, 61
+; CHECK-NEXT: in r29, 62
+; CHECK-NEXT: sbiw r28, 13
+; CHECK-NEXT: in r0, 63
+; CHECK-NEXT: cli
+; CHECK-NEXT: out 62, r29
+; CHECK-NEXT: out 63, r0
+; CHECK-NEXT: out 61, r28
+; CHECK-NEXT: ldi r16, 0
+; CHECK-NEXT: ldi r17, 0
+; CHECK-NEXT: ldi r18, -1
+; CHECK-NEXT: ;APP
+; CHECK-NEXT: ldi r24, 123
+; CHECK-NEXT: ;NO_APP
+; CHECK-NEXT: std Y+1, r24 ; 1-byte Folded Spill
+; CHECK-NEXT: movw r24, r28
+; CHECK-NEXT: adiw r24, 6
+; CHECK-NEXT: std Y+3, r25 ; 2-byte Folded Spill
+; CHECK-NEXT: std Y+2, r24 ; 2-byte Folded Spill
+; CHECK-NEXT: movw r8, r16
+; CHECK-NEXT: movw r6, r16
+; CHECK-NEXT: movw r4, r16
+; CHECK-NEXT: movw r2, r16
+; CHECK-NEXT: rjmp .LBB0_2
+; CHECK-NEXT: .LBB0_1: ; %bb1
+; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: andi r30, 1
+; CHECK-NEXT: ldd r31, Y+4 ; 1-byte Folded Reload
+; CHECK-NEXT: dec r31
+; CHECK-NEXT: cpi r30, 0
+; CHECK-NEXT: movw r8, r18
+; CHECK-NEXT: movw r6, r20
+; CHECK-NEXT: movw r4, r22
+; CHECK-NEXT: movw r2, r24
+; CHECK-NEXT: mov r18, r31
+; CHECK-NEXT: brne .LBB0_2
+; CHECK-NEXT: rjmp .LBB0_4
+; CHECK-NEXT: .LBB0_2: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: std Y+4, r18 ; 1-byte Folded Spill
+; CHECK-NEXT: movw r18, r8
+; CHECK-NEXT: movw r20, r6
+; CHECK-NEXT: movw r22, r4
+; CHECK-NEXT: movw r24, r2
+; CHECK-NEXT: ldi r26, 10
+; CHECK-NEXT: ldi r27, 0
+; CHECK-NEXT: movw r10, r26
+; CHECK-NEXT: movw r12, r16
+; CHECK-NEXT: movw r14, r16
+; CHECK-NEXT: call __udivdi3
+; CHECK-NEXT: std Y+13, r25
+; CHECK-NEXT: std Y+12, r24
+; CHECK-NEXT: std Y+11, r23
+; CHECK-NEXT: std Y+10, r22
+; CHECK-NEXT: std Y+9, r21
+; CHECK-NEXT: std Y+8, r20
+; CHECK-NEXT: std Y+7, r19
+; CHECK-NEXT: std Y+6, r18
+; CHECK-NEXT: ldd r30, Y+2 ; 2-byte Folded Reload
+; CHECK-NEXT: ldd r31, Y+3 ; 2-byte Folded Reload
+; CHECK-NEXT: ;APP
+; CHECK-NEXT: ;NO_APP
+; CHECK-NEXT: ldi r30, 1
+; CHECK-NEXT: cp r8, r1
+; CHECK-NEXT: cpc r9, r1
+; CHECK-NEXT: cpc r6, r16
+; CHECK-NEXT: cpc r7, r17
+; CHECK-NEXT: cpc r4, r16
+; CHECK-NEXT: cpc r5, r17
+; CHECK-NEXT: cpc r2, r16
+; CHECK-NEXT: cpc r3, r17
+; CHECK-NEXT: breq .LBB0_3
+; CHECK-NEXT: rjmp .LBB0_1
+; CHECK-NEXT: .LBB0_3: ; %bb1
+; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: mov r30, r1
+; CHECK-NEXT: rjmp .LBB0_1
+; CHECK-NEXT: .LBB0_4: ; %bb3
+; CHECK-NEXT: ldd r24, Y+1 ; 1-byte Folded Reload
+; CHECK-NEXT: std Y+5, r24
+; CHECK-NEXT: movw r24, r28
+; CHECK-NEXT: adiw r24, 5
+; CHECK-NEXT: ;APP
+; CHECK-NEXT: ;NO_APP
+; CHECK-NEXT: ldd r24, Y+5
+; CHECK-NEXT: adiw r28, 13
+; CHECK-NEXT: in r0, 63
+; CHECK-NEXT: cli
+; CHECK-NEXT: out 62, r29
+; CHECK-NEXT: out 63, r0
+; CHECK-NEXT: out 61, r28
+; CHECK-NEXT: pop r29
+; CHECK-NEXT: pop r28
+; CHECK-NEXT: pop r17
+; CHECK-NEXT: pop r16
+; CHECK-NEXT: pop r15
+; CHECK-NEXT: pop r14
+; CHECK-NEXT: pop r13
+; CHECK-NEXT: pop r12
+; CHECK-NEXT: pop r11
+; CHECK-NEXT: pop r10
+; CHECK-NEXT: pop r9
+; CHECK-NEXT: pop r8
+; CHECK-NEXT: pop r7
+; CHECK-NEXT: pop r6
+; CHECK-NEXT: pop r5
+; CHECK-NEXT: pop r4
+; CHECK-NEXT: pop r3
+; CHECK-NEXT: pop r2
+; CHECK-NEXT: ret
+bb0:
+ %0 = alloca i64
+ %1 = alloca i8
+ %2 = tail call i8 asm sideeffect "ldi ${0}, 123", "=&r,~{sreg},~{memory}"()
+
+ br label %bb1
+
+bb1:
+ %3 = phi i64 [ %5, %bb1 ], [ 0, %bb0 ]
+ %4 = phi i8 [ %6, %bb1 ], [ 0, %bb0 ]
+ %5 = udiv i64 %3, 10
+ %6 = add i8 %4, 1
+
+ store i64 %5, ptr %0
+ call void asm sideeffect "", "r,~{memory}"(ptr %0)
+
+ %7 = icmp eq i64 %3, 0
+ %8 = icmp eq i8 %6, 0
+
+ br i1 %7, label %bb3, label %bb1
+
+bb3:
+ store i8 %2, ptr %1
+ call void asm sideeffect "", "r,~{memory}"(ptr %1)
+
+ %9 = load i8, ptr %1
+
+ ret i8 %9
+}
diff --git a/llvm/test/CodeGen/BPF/addr-space-globals.ll b/llvm/test/CodeGen/BPF/addr-space-globals.ll
index 878ba0d..73e80b7 100644
--- a/llvm/test/CodeGen/BPF/addr-space-globals.ll
+++ b/llvm/test/CodeGen/BPF/addr-space-globals.ll
@@ -18,7 +18,7 @@
; Verify that a,b,c reside in the same section
-; CHECK: .section .arena.272,"aw",@progbits
+; CHECK: .section .addr_space.272,"aw",@progbits
; CHECK-NOT: .section
; CHECK: .globl a
; CHECK: .ascii "\001\002"
diff --git a/llvm/test/CodeGen/BPF/addr-space-globals2.ll b/llvm/test/CodeGen/BPF/addr-space-globals2.ll
index d1e2318..5944cb2 100644
--- a/llvm/test/CodeGen/BPF/addr-space-globals2.ll
+++ b/llvm/test/CodeGen/BPF/addr-space-globals2.ll
@@ -14,12 +14,12 @@
; Verify that a,b reside in separate sections
-; CHECK: .section .arena.1,"aw",@progbits
+; CHECK: .section .addr_space.1,"aw",@progbits
; CHECK-NOT: .section
; CHECK: .globl a
; CHECK: .ascii "\001\002"
-; CHECK: .section .arena.2,"aw",@progbits
+; CHECK: .section .addr_space.2,"aw",@progbits
; CHECK-NOT: .section
; CHECK: .globl b
; CHECK: .ascii "\003\004"
diff --git a/llvm/test/CodeGen/BPF/cttz-ctlz.ll b/llvm/test/CodeGen/BPF/cttz-ctlz.ll
new file mode 100644
index 0000000..f42b2e2
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/cttz-ctlz.ll
@@ -0,0 +1,304 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -march=bpf | FileCheck %s
+
+; test that we can expand CTTZ & CTLZ
+
+declare i32 @llvm.cttz.i32(i32, i1)
+
+define i32 @cttz_i32_zdef(i32 %a) {
+; CHECK-LABEL: cttz_i32_zdef:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 = -r2
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r1 *= 125613361
+; CHECK-NEXT: r2 = 4160749568 ll
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r1 >>= 27
+; CHECK-NEXT: r2 = {{\.?LCPI[0-9]+_[0-9]+}} ll
+; CHECK-NEXT: r2 += r1
+; CHECK-NEXT: r0 = *(u8 *)(r2 + 0)
+; CHECK-NEXT: exit
+ %ret = call i32 @llvm.cttz.i32(i32 %a, i1 1)
+ ret i32 %ret
+}
+
+
+define i32 @cttz_i32(i32 %a) {
+; CHECK-LABEL: cttz_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r0 = 32
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 <<= 32
+; CHECK-NEXT: r2 >>= 32
+; CHECK-NEXT: if r2 == 0 goto LBB1_2
+; CHECK-NEXT: # %bb.1: # %cond.false
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 = -r2
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r1 *= 125613361
+; CHECK-NEXT: r2 = 4160749568 ll
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r1 >>= 27
+; CHECK-NEXT: r2 = {{\.?LCPI[0-9]+_[0-9]+}} ll
+; CHECK-NEXT: r2 += r1
+; CHECK-NEXT: r0 = *(u8 *)(r2 + 0)
+; CHECK-NEXT: LBB1_2: # %cond.end
+; CHECK-NEXT: exit
+ %ret = call i32 @llvm.cttz.i32(i32 %a, i1 0)
+ ret i32 %ret
+}
+
+declare i64 @llvm.cttz.i64(i64, i1)
+
+define i64 @cttz_i64_zdef(i64 %a) {
+; CHECK-LABEL: cttz_i64_zdef:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 = -r2
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r2 = 151050438420815295 ll
+; CHECK-NEXT: r1 *= r2
+; CHECK-NEXT: r1 >>= 58
+; CHECK-NEXT: r2 = {{\.?LCPI[0-9]+_[0-9]+}} ll
+; CHECK-NEXT: r2 += r1
+; CHECK-NEXT: r0 = *(u8 *)(r2 + 0)
+; CHECK-NEXT: exit
+ %ret = call i64 @llvm.cttz.i64(i64 %a, i1 1)
+ ret i64 %ret
+}
+
+
+define i64 @cttz_i64(i64 %a) {
+; CHECK-LABEL: cttz_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r0 = 64
+; CHECK-NEXT: if r1 == 0 goto LBB3_2
+; CHECK-NEXT: # %bb.1: # %cond.false
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 = -r2
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r2 = 151050438420815295 ll
+; CHECK-NEXT: r1 *= r2
+; CHECK-NEXT: r1 >>= 58
+; CHECK-NEXT: r2 = {{\.?LCPI[0-9]+_[0-9]+}} ll
+; CHECK-NEXT: r2 += r1
+; CHECK-NEXT: r0 = *(u8 *)(r2 + 0)
+; CHECK-NEXT: LBB3_2: # %cond.end
+; CHECK-NEXT: exit
+ %ret = call i64 @llvm.cttz.i64(i64 %a, i1 0)
+ ret i64 %ret
+}
+
+
+declare i32 @llvm.ctlz.i32(i32, i1)
+
+define i32 @ctlz_i32_zdef(i32 %a) {
+; CHECK-LABEL: ctlz_i32_zdef:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r2 = 4294967294 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 1
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294967292 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 2
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294967280 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 4
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294967040 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 8
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294901760 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 16
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r1 ^= -1
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 1
+; CHECK-NEXT: r2 &= 1431655765
+; CHECK-NEXT: r1 -= r2
+; CHECK-NEXT: r0 = r1
+; CHECK-NEXT: r0 &= 858993459
+; CHECK-NEXT: r1 >>= 2
+; CHECK-NEXT: r1 &= 858993459
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r1 = r0
+; CHECK-NEXT: r1 >>= 4
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r0 &= 252645135
+; CHECK-NEXT: r0 *= 16843009
+; CHECK-NEXT: r1 = 4278190080 ll
+; CHECK-NEXT: r0 &= r1
+; CHECK-NEXT: r0 >>= 24
+; CHECK-NEXT: exit
+ %ret = call i32 @llvm.ctlz.i32(i32 %a, i1 1)
+ ret i32 %ret
+}
+
+
+define i32 @ctlz_i32(i32 %a) {
+; CHECK-LABEL: ctlz_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r0 = 32
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 <<= 32
+; CHECK-NEXT: r2 >>= 32
+; CHECK-NEXT: if r2 == 0 goto LBB5_2
+; CHECK-NEXT: # %bb.1: # %cond.false
+; CHECK-NEXT: r2 = 4294967294 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 1
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294967292 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 2
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294967280 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 4
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294967040 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 8
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294901760 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 16
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r1 ^= -1
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 1
+; CHECK-NEXT: r2 &= 1431655765
+; CHECK-NEXT: r1 -= r2
+; CHECK-NEXT: r0 = r1
+; CHECK-NEXT: r0 &= 858993459
+; CHECK-NEXT: r1 >>= 2
+; CHECK-NEXT: r1 &= 858993459
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r1 = r0
+; CHECK-NEXT: r1 >>= 4
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r0 &= 252645135
+; CHECK-NEXT: r0 *= 16843009
+; CHECK-NEXT: r1 = 4278190080 ll
+; CHECK-NEXT: r0 &= r1
+; CHECK-NEXT: r0 >>= 24
+; CHECK-NEXT: LBB5_2: # %cond.end
+; CHECK-NEXT: exit
+ %ret = call i32 @llvm.ctlz.i32(i32 %a, i1 0)
+ ret i32 %ret
+}
+
+declare i64 @llvm.ctlz.i64(i64, i1)
+
+define i64 @ctlz_i64_zdef(i64 %a) {
+; CHECK-LABEL: ctlz_i64_zdef:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 1
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 2
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 4
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 8
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 16
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 32
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r1 ^= -1
+; CHECK-NEXT: r2 = 6148914691236517205 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 >>= 1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r1 -= r3
+; CHECK-NEXT: r2 = 3689348814741910323 ll
+; CHECK-NEXT: r0 = r1
+; CHECK-NEXT: r0 &= r2
+; CHECK-NEXT: r1 >>= 2
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r1 = r0
+; CHECK-NEXT: r1 >>= 4
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r1 = 1085102592571150095 ll
+; CHECK-NEXT: r0 &= r1
+; CHECK-NEXT: r1 = 72340172838076673 ll
+; CHECK-NEXT: r0 *= r1
+; CHECK-NEXT: r0 >>= 56
+; CHECK-NEXT: exit
+ %ret = call i64 @llvm.ctlz.i64(i64 %a, i1 1)
+ ret i64 %ret
+}
+
+
+define i64 @ctlz_i64(i64 %a) {
+; CHECK-LABEL: ctlz_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r0 = 64
+; CHECK-NEXT: if r1 == 0 goto LBB7_2
+; CHECK-NEXT: # %bb.1: # %cond.false
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 1
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 2
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 4
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 8
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 16
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 32
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r1 ^= -1
+; CHECK-NEXT: r2 = 6148914691236517205 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 >>= 1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r1 -= r3
+; CHECK-NEXT: r2 = 3689348814741910323 ll
+; CHECK-NEXT: r0 = r1
+; CHECK-NEXT: r0 &= r2
+; CHECK-NEXT: r1 >>= 2
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r1 = r0
+; CHECK-NEXT: r1 >>= 4
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r1 = 1085102592571150095 ll
+; CHECK-NEXT: r0 &= r1
+; CHECK-NEXT: r1 = 72340172838076673 ll
+; CHECK-NEXT: r0 *= r1
+; CHECK-NEXT: r0 >>= 56
+; CHECK-NEXT: LBB7_2: # %cond.end
+; CHECK-NEXT: exit
+ %ret = call i64 @llvm.ctlz.i64(i64 %a, i1 0)
+ ret i64 %ret
+}
+
diff --git a/llvm/test/CodeGen/DirectX/ShaderFlags/double-extensions.ll b/llvm/test/CodeGen/DirectX/ShaderFlags/double-extensions.ll
index 865fefe..d027216 100644
--- a/llvm/test/CodeGen/DirectX/ShaderFlags/double-extensions.ll
+++ b/llvm/test/CodeGen/DirectX/ShaderFlags/double-extensions.ll
@@ -3,10 +3,11 @@
target triple = "dxil-pc-shadermodel6.7-library"
-; CHECK: ; Shader Flags Value: 0x00000021
+; CHECK: ; Shader Flags Value: 0x00000044
; CHECK: ; Note: shader requires additional functionality:
; CHECK-NEXT: ; Double-precision floating point
; CHECK-NEXT: ; Double-precision extensions for 11.1
+; CHECK-NEXT: ; Note: extra DXIL module flags:
; CHECK-NEXT: {{^;$}}
define double @div(double %a, double %b) {
%res = fdiv double %a, %b
diff --git a/llvm/test/CodeGen/DirectX/ShaderFlags/doubles.ll b/llvm/test/CodeGen/DirectX/ShaderFlags/doubles.ll
index f90db61..c1a4c21 100644
--- a/llvm/test/CodeGen/DirectX/ShaderFlags/doubles.ll
+++ b/llvm/test/CodeGen/DirectX/ShaderFlags/doubles.ll
@@ -3,10 +3,12 @@
target triple = "dxil-pc-shadermodel6.7-library"
-; CHECK: ; Shader Flags Value: 0x00000001
+; CHECK: ; Shader Flags Value: 0x00000004
; CHECK: ; Note: shader requires additional functionality:
; CHECK-NEXT: ; Double-precision floating point
+; CHECK-NEXT: ; Note: extra DXIL module flags:
; CHECK-NEXT: {{^;$}}
+
define double @add(double %a, double %b) {
%sum = fadd double %a, %b
ret double %sum
diff --git a/llvm/test/CodeGen/DirectX/abs-vec.ll b/llvm/test/CodeGen/DirectX/abs-vec.ll
new file mode 100644
index 0000000..1c40555
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/abs-vec.ll
@@ -0,0 +1,34 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s
+
+; Make sure dxil operation function calls for abs are generated for int vectors.
+
+; CHECK-LABEL: abs_i16Vec2
+define noundef <2 x i16> @abs_i16Vec2(<2 x i16> noundef %a) #0 {
+entry:
+; CHECK: sub <2 x i16> zeroinitializer, %a
+; CHECK: call <2 x i16> @llvm.smax.v2i16(<2 x i16> %a, <2 x i16> %{{.*}})
+ %elt.abs = call <2 x i16> @llvm.abs.v2i16(<2 x i16> %a, i1 false)
+ ret <2 x i16> %elt.abs
+}
+
+; CHECK-LABEL: abs_i32Vec3
+define noundef <3 x i32> @abs_i32Vec3(<3 x i32> noundef %a) #0 {
+entry:
+; CHECK: sub <3 x i32> zeroinitializer, %a
+; CHECK: call <3 x i32> @llvm.smax.v3i32(<3 x i32> %a, <3 x i32> %{{.*}})
+ %elt.abs = call <3 x i32> @llvm.abs.v3i32(<3 x i32> %a, i1 false)
+ ret <3 x i32> %elt.abs
+}
+
+; CHECK-LABEL: abs_i64Vec4
+define noundef <4 x i64> @abs_i64Vec4(<4 x i64> noundef %a) #0 {
+entry:
+; CHECK: sub <4 x i64> zeroinitializer, %a
+; CHECK: call <4 x i64> @llvm.smax.v4i64(<4 x i64> %a, <4 x i64> %{{.*}})
+ %elt.abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 false)
+ ret <4 x i64> %elt.abs
+}
+
+declare <2 x i16> @llvm.abs.v2i16(<2 x i16>, i1 immarg)
+declare <3 x i32> @llvm.abs.v3i32(<3 x i32>, i1 immarg)
+declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1 immarg)
diff --git a/llvm/test/CodeGen/DirectX/abs.ll b/llvm/test/CodeGen/DirectX/abs.ll
new file mode 100644
index 0000000..822580e
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/abs.ll
@@ -0,0 +1,38 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s --check-prefixes=CHECK,EXPCHECK
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s --check-prefixes=CHECK,DOPCHECK
+
+; Make sure dxil operation function calls for abs are generated for int16_t/int/int64_t.
+
+; CHECK-LABEL: abs_i16
+define noundef i16 @abs_i16(i16 noundef %a) {
+entry:
+; CHECK: sub i16 0, %a
+; EXPCHECK: call i16 @llvm.smax.i16(i16 %a, i16 %{{.*}})
+; DOPCHECK: call i16 @dx.op.binary.i16(i32 37, i16 %a, i16 %{{.*}})
+ %elt.abs = call i16 @llvm.abs.i16(i16 %a, i1 false)
+ ret i16 %elt.abs
+}
+
+; CHECK-LABEL: abs_i32
+define noundef i32 @abs_i32(i32 noundef %a) {
+entry:
+; CHECK: sub i32 0, %a
+; EXPCHECK: call i32 @llvm.smax.i32(i32 %a, i32 %{{.*}})
+; DOPCHECK: call i32 @dx.op.binary.i32(i32 37, i32 %a, i32 %{{.*}})
+ %elt.abs = call i32 @llvm.abs.i32(i32 %a, i1 false)
+ ret i32 %elt.abs
+}
+
+; CHECK-LABEL: abs_i64
+define noundef i64 @abs_i64(i64 noundef %a) {
+entry:
+; CHECK: sub i64 0, %a
+; EXPCHECK: call i64 @llvm.smax.i64(i64 %a, i64 %{{.*}})
+; DOPCHECK: call i64 @dx.op.binary.i64(i32 37, i64 %a, i64 %{{.*}})
+ %elt.abs = call i64 @llvm.abs.i64(i64 %a, i1 false)
+ ret i64 %elt.abs
+}
+
+declare i16 @llvm.abs.i16(i16, i1 immarg)
+declare i32 @llvm.abs.i32(i32, i1 immarg)
+declare i64 @llvm.abs.i64(i64, i1 immarg)
diff --git a/llvm/test/CodeGen/DirectX/any.ll b/llvm/test/CodeGen/DirectX/any.ll
new file mode 100644
index 0000000..e8d8707
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/any.ll
@@ -0,0 +1,113 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for any are generated for float and half.
+
+; CHECK-LABEL: any_bool
+; CHECK: icmp ne i1 %{{.*}}, false
+define noundef i1 @any_bool(i1 noundef %p0) {
+entry:
+ %p0.addr = alloca i8, align 1
+ %frombool = zext i1 %p0 to i8
+ store i8 %frombool, ptr %p0.addr, align 1
+ %0 = load i8, ptr %p0.addr, align 1
+ %tobool = trunc i8 %0 to i1
+ %dx.any = call i1 @llvm.dx.any.i1(i1 %tobool)
+ ret i1 %dx.any
+}
+
+; CHECK-LABEL: any_int64_t
+; CHECK: icmp ne i64 %{{.*}}, 0
+define noundef i1 @any_int64_t(i64 noundef %p0) {
+entry:
+ %p0.addr = alloca i64, align 8
+ store i64 %p0, ptr %p0.addr, align 8
+ %0 = load i64, ptr %p0.addr, align 8
+ %dx.any = call i1 @llvm.dx.any.i64(i64 %0)
+ ret i1 %dx.any
+}
+
+; CHECK-LABEL: any_int
+; CHECK: icmp ne i32 %{{.*}}, 0
+define noundef i1 @any_int(i32 noundef %p0) {
+entry:
+ %p0.addr = alloca i32, align 4
+ store i32 %p0, ptr %p0.addr, align 4
+ %0 = load i32, ptr %p0.addr, align 4
+ %dx.any = call i1 @llvm.dx.any.i32(i32 %0)
+ ret i1 %dx.any
+}
+
+; CHECK-LABEL: any_int16_t
+; CHECK: icmp ne i16 %{{.*}}, 0
+define noundef i1 @any_int16_t(i16 noundef %p0) {
+entry:
+ %p0.addr = alloca i16, align 2
+ store i16 %p0, ptr %p0.addr, align 2
+ %0 = load i16, ptr %p0.addr, align 2
+ %dx.any = call i1 @llvm.dx.any.i16(i16 %0)
+ ret i1 %dx.any
+}
+
+; CHECK-LABEL: any_double
+; CHECK: fcmp une double %{{.*}}, 0.000000e+00
+define noundef i1 @any_double(double noundef %p0) {
+entry:
+ %p0.addr = alloca double, align 8
+ store double %p0, ptr %p0.addr, align 8
+ %0 = load double, ptr %p0.addr, align 8
+ %dx.any = call i1 @llvm.dx.any.f64(double %0)
+ ret i1 %dx.any
+}
+
+; CHECK-LABEL: any_float
+; CHECK: fcmp une float %{{.*}}, 0.000000e+00
+define noundef i1 @any_float(float noundef %p0) {
+entry:
+ %p0.addr = alloca float, align 4
+ store float %p0, ptr %p0.addr, align 4
+ %0 = load float, ptr %p0.addr, align 4
+ %dx.any = call i1 @llvm.dx.any.f32(float %0)
+ ret i1 %dx.any
+}
+
+; CHECK-LABEL: any_half
+; CHECK: fcmp une half %{{.*}}, 0xH0000
+define noundef i1 @any_half(half noundef %p0) {
+entry:
+ %p0.addr = alloca half, align 2
+ store half %p0, ptr %p0.addr, align 2
+ %0 = load half, ptr %p0.addr, align 2
+ %dx.any = call i1 @llvm.dx.any.f16(half %0)
+ ret i1 %dx.any
+}
+
+; CHECK-LABEL: any_bool4
+; CHECK: icmp ne <4 x i1> %extractvec, zeroinitialize
+; CHECK: extractelement <4 x i1> %{{.*}}, i64 0
+; CHECK: extractelement <4 x i1> %{{.*}}, i64 1
+; CHECK: or i1 %{{.*}}, %{{.*}}
+; CHECK: extractelement <4 x i1> %{{.*}}, i64 2
+; CHECK: or i1 %{{.*}}, %{{.*}}
+; CHECK: extractelement <4 x i1> %{{.*}}, i64 3
+; CHECK: or i1 %{{.*}}, %{{.*}}
+define noundef i1 @any_bool4(<4 x i1> noundef %p0) {
+entry:
+ %p0.addr = alloca i8, align 1
+ %insertvec = shufflevector <4 x i1> %p0, <4 x i1> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+ %0 = bitcast <8 x i1> %insertvec to i8
+ store i8 %0, ptr %p0.addr, align 1
+ %load_bits = load i8, ptr %p0.addr, align 1
+ %1 = bitcast i8 %load_bits to <8 x i1>
+ %extractvec = shufflevector <8 x i1> %1, <8 x i1> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %dx.any = call i1 @llvm.dx.any.v4i1(<4 x i1> %extractvec)
+ ret i1 %dx.any
+}
+
+declare i1 @llvm.dx.any.v4i1(<4 x i1>)
+declare i1 @llvm.dx.any.i1(i1)
+declare i1 @llvm.dx.any.i16(i16)
+declare i1 @llvm.dx.any.i32(i32)
+declare i1 @llvm.dx.any.i64(i64)
+declare i1 @llvm.dx.any.f16(half)
+declare i1 @llvm.dx.any.f32(float)
+declare i1 @llvm.dx.any.f64(double)
diff --git a/llvm/test/CodeGen/DirectX/ceil.ll b/llvm/test/CodeGen/DirectX/ceil.ll
new file mode 100644
index 0000000..1585471
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ceil.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for ceil are generated for float and half.
+
+define noundef float @ceil_float(float noundef %a) {
+entry:
+; CHECK:call float @dx.op.unary.f32(i32 28, float %{{.*}})
+ %elt.ceil = call float @llvm.ceil.f32(float %a)
+ ret float %elt.ceil
+}
+
+define noundef half @ceil_half(half noundef %a) {
+entry:
+; CHECK:call half @dx.op.unary.f16(i32 28, half %{{.*}})
+ %elt.ceil = call half @llvm.ceil.f16(half %a)
+ ret half %elt.ceil
+}
+
+declare half @llvm.ceil.f16(half)
+declare float @llvm.ceil.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/ceil_error.ll b/llvm/test/CodeGen/DirectX/ceil_error.ll
new file mode 100644
index 0000000..1b554d8
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ceil_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation ceil does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef double @ceil_double(double noundef %a) {
+entry:
+ %elt.ceil = call double @llvm.ceil.f64(double %a)
+ ret double %elt.ceil
+}
diff --git a/llvm/test/CodeGen/DirectX/clamp-vec.ll b/llvm/test/CodeGen/DirectX/clamp-vec.ll
new file mode 100644
index 0000000..d4f33a1
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/clamp-vec.ll
@@ -0,0 +1,74 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s
+
+; Make sure dxil operation function calls for clamp are generated for float/int/uint vectors.
+
+; CHECK-LABEL: clamp_half3
+define noundef <3 x half> @clamp_half3(<3 x half> noundef %a, <3 x half> noundef %b, <3 x half> noundef %c) {
+entry:
+ ; CHECK: call <3 x half> @llvm.maxnum.v3f16(<3 x half> %a, <3 x half> %b)
+ ; CHECK: call <3 x half> @llvm.minnum.v3f16(<3 x half> %{{.*}}, <3 x half> %c)
+ %dx.clamp = call <3 x half> @llvm.dx.clamp.v3f16(<3 x half> %a, <3 x half> %b, <3 x half> %c)
+ ret <3 x half> %dx.clamp
+}
+
+; CHECK-LABEL: clamp_float4
+define noundef <4 x float> @clamp_float4(<4 x float> noundef %a, <4 x float> noundef %b, <4 x float> noundef %c) {
+entry:
+ ; CHECK: call <4 x float> @llvm.maxnum.v4f32(<4 x float> %a, <4 x float> %b)
+ ; CHECK: call <4 x float> @llvm.minnum.v4f32(<4 x float> %{{.*}}, <4 x float> %c)
+ %dx.clamp = call <4 x float> @llvm.dx.clamp.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c)
+ ret <4 x float> %dx.clamp
+}
+
+; CHECK-LABEL: clamp_double2
+define noundef <2 x double> @clamp_double2(<2 x double> noundef %a, <2 x double> noundef %b, <2 x double> noundef %c) {
+entry:
+ ; CHECK: call <2 x double> @llvm.maxnum.v2f64(<2 x double> %a, <2 x double> %b)
+ ; CHECK: call <2 x double> @llvm.minnum.v2f64(<2 x double> %{{.*}}, <2 x double> %c)
+ %dx.clamp = call <2 x double> @llvm.dx.clamp.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c)
+ ret <2 x double> %dx.clamp
+}
+
+; CHECK-LABEL: clamp_int4
+define noundef <4 x i32> @clamp_int4(<4 x i32> noundef %a, <4 x i32> noundef %b, <4 x i32> noundef %c) {
+entry:
+ ; CHECK: call <4 x i32> @llvm.smax.v4i32(<4 x i32> %a, <4 x i32> %b)
+ ; CHECK: call <4 x i32> @llvm.smin.v4i32(<4 x i32> %{{.*}}, <4 x i32> %c)
+ %dx.clamp = call <4 x i32> @llvm.dx.clamp.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c)
+ ret <4 x i32> %dx.clamp
+}
+
+; CHECK-LABEL: clamp_uint16_t3
+define noundef <3 x i16> @clamp_uint16_t3(<3 x i16> noundef %a, <3 x i16> noundef %b, <3 x i16> noundef %c) {
+entry:
+ ; CHECK: call <3 x i16> @llvm.umax.v3i16(<3 x i16> %a, <3 x i16> %b)
+ ; CHECK: call <3 x i16> @llvm.umin.v3i16(<3 x i16> %{{.*}}, <3 x i16> %c)
+ %dx.clamp = call <3 x i16> @llvm.dx.uclamp.v3i16(<3 x i16> %a, <3 x i16> %b, <3 x i16> %c)
+ ret <3 x i16> %dx.clamp
+}
+
+; CHECK-LABEL: clamp_uint4
+define noundef <4 x i32> @clamp_uint4(<4 x i32> noundef %a, <4 x i32> noundef %b, <4 x i32> noundef %c) {
+entry:
+ ; CHECK: call <4 x i32> @llvm.umax.v4i32(<4 x i32> %a, <4 x i32> %b)
+ ; CHECK: call <4 x i32> @llvm.umin.v4i32(<4 x i32> %{{.*}}, <4 x i32> %c)
+ %dx.clamp = call <4 x i32> @llvm.dx.uclamp.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c)
+ ret <4 x i32> %dx.clamp
+}
+
+; CHECK-LABEL: clamp_uint64_t4
+define noundef <2 x i64> @clamp_uint64_t4(<2 x i64> noundef %a, <2 x i64> noundef %b, <2 x i64> noundef %c) {
+entry:
+ ; CHECK: call <2 x i64> @llvm.umax.v2i64(<2 x i64> %a, <2 x i64> %b)
+ ; CHECK: call <2 x i64> @llvm.umin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %c)
+ %dx.clamp = call <2 x i64> @llvm.dx.uclamp.v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c)
+ ret <2 x i64> %dx.clamp
+}
+
+declare <3 x half> @llvm.dx.clamp.v3f16(<3 x half>, <3 x half>, <3 x half>)
+declare <4 x float> @llvm.dx.clamp.v4f32(<4 x float>, <4 x float>, <4 x float>)
+declare <2 x double> @llvm.dx.clamp.v2f64(<2 x double>, <2 x double>, <2 x double>)
+declare <4 x i32> @llvm.dx.clamp.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <3 x i16> @llvm.dx.uclamp.v3i32(<3 x i16>, <3 x i32>, <3 x i16>)
+declare <4 x i32> @llvm.dx.uclamp.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.dx.uclamp.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
diff --git a/llvm/test/CodeGen/DirectX/clamp.ll b/llvm/test/CodeGen/DirectX/clamp.ll
new file mode 100644
index 0000000..f122313
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/clamp.ll
@@ -0,0 +1,94 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for clamp/uclamp are generated for half/float/double/i16/i32/i64.
+
+; CHECK-LABEL:test_clamp_i16
+define noundef i16 @test_clamp_i16(i16 noundef %a, i16 noundef %b, i16 noundef %c) {
+entry:
+; CHECK: call i16 @dx.op.binary.i16(i32 37, i16 %{{.*}}, i16 %{{.*}})
+; CHECK: call i16 @dx.op.binary.i16(i32 38, i16 %{{.*}}, i16 %{{.*}})
+ %0 = call i16 @llvm.dx.clamp.i16(i16 %a, i16 %b, i16 %c)
+ ret i16 %0
+}
+
+; CHECK-LABEL:test_clamp_i32
+define noundef i32 @test_clamp_i32(i32 noundef %a, i32 noundef %b, i32 noundef %c) {
+entry:
+; CHECK: call i32 @dx.op.binary.i32(i32 37, i32 %{{.*}}, i32 %{{.*}})
+; CHECK: call i32 @dx.op.binary.i32(i32 38, i32 %{{.*}}, i32 %{{.*}})
+ %0 = call i32 @llvm.dx.clamp.i32(i32 %a, i32 %b, i32 %c)
+ ret i32 %0
+}
+
+; CHECK-LABEL:test_clamp_i64
+define noundef i64 @test_clamp_i64(i64 noundef %a, i64 noundef %b, i64 noundef %c) {
+entry:
+; CHECK: call i64 @dx.op.binary.i64(i32 37, i64 %a, i64 %b)
+; CHECK: call i64 @dx.op.binary.i64(i32 38, i64 %{{.*}}, i64 %c)
+ %0 = call i64 @llvm.dx.clamp.i64(i64 %a, i64 %b, i64 %c)
+ ret i64 %0
+}
+
+; CHECK-LABEL:test_clamp_half
+define noundef half @test_clamp_half(half noundef %a, half noundef %b, half noundef %c) {
+entry:
+; CHECK: call half @dx.op.binary.f16(i32 35, half %{{.*}}, half %{{.*}})
+; CHECK: call half @dx.op.binary.f16(i32 36, half %{{.*}}, half %{{.*}})
+ %0 = call half @llvm.dx.clamp.f16(half %a, half %b, half %c)
+ ret half %0
+}
+
+; CHECK-LABEL:test_clamp_float
+define noundef float @test_clamp_float(float noundef %a, float noundef %b, float noundef %c) {
+entry:
+; CHECK: call float @dx.op.binary.f32(i32 35, float %{{.*}}, float %{{.*}})
+; CHECK: call float @dx.op.binary.f32(i32 36, float %{{.*}}, float %{{.*}})
+ %0 = call float @llvm.dx.clamp.f32(float %a, float %b, float %c)
+ ret float %0
+}
+
+; CHECK-LABEL:test_clamp_double
+define noundef double @test_clamp_double(double noundef %a, double noundef %b, double noundef %c) {
+entry:
+; CHECK: call double @dx.op.binary.f64(i32 35, double %{{.*}}, double %{{.*}})
+; CHECK: call double @dx.op.binary.f64(i32 36, double %{{.*}}, double %{{.*}})
+ %0 = call double @llvm.dx.clamp.f64(double %a, double %b, double %c)
+ ret double %0
+}
+
+; CHECK-LABEL:test_uclamp_i16
+define noundef i16 @test_uclamp_i16(i16 noundef %a, i16 noundef %b, i16 noundef %c) {
+entry:
+; CHECK: call i16 @dx.op.binary.i16(i32 39, i16 %{{.*}}, i16 %{{.*}})
+; CHECK: call i16 @dx.op.binary.i16(i32 40, i16 %{{.*}}, i16 %{{.*}})
+ %0 = call i16 @llvm.dx.uclamp.i16(i16 %a, i16 %b, i16 %c)
+ ret i16 %0
+}
+
+; CHECK-LABEL:test_uclamp_i32
+define noundef i32 @test_uclamp_i32(i32 noundef %a, i32 noundef %b, i32 noundef %c) {
+entry:
+; CHECK: call i32 @dx.op.binary.i32(i32 39, i32 %{{.*}}, i32 %{{.*}})
+; CHECK: call i32 @dx.op.binary.i32(i32 40, i32 %{{.*}}, i32 %{{.*}})
+ %0 = call i32 @llvm.dx.uclamp.i32(i32 %a, i32 %b, i32 %c)
+ ret i32 %0
+}
+
+; CHECK-LABEL:test_uclamp_i64
+define noundef i64 @test_uclamp_i64(i64 noundef %a, i64 noundef %b, i64 noundef %c) {
+entry:
+; CHECK: call i64 @dx.op.binary.i64(i32 39, i64 %a, i64 %b)
+; CHECK: call i64 @dx.op.binary.i64(i32 40, i64 %{{.*}}, i64 %c)
+ %0 = call i64 @llvm.dx.uclamp.i64(i64 %a, i64 %b, i64 %c)
+ ret i64 %0
+}
+
+declare half @llvm.dx.clamp.f16(half, half, half)
+declare float @llvm.dx.clamp.f32(float, float, float)
+declare double @llvm.dx.clamp.f64(double, double, double)
+declare i16 @llvm.dx.clamp.i16(i16, i16, i16)
+declare i32 @llvm.dx.clamp.i32(i32, i32, i32)
+declare i64 @llvm.dx.clamp.i64(i64, i64, i64)
+declare i16 @llvm.dx.uclamp.i16(i16, i16, i16)
+declare i32 @llvm.dx.uclamp.i32(i32, i32, i32)
+declare i64 @llvm.dx.uclamp.i64(i64, i64, i64)
diff --git a/llvm/test/CodeGen/DirectX/cos.ll b/llvm/test/CodeGen/DirectX/cos.ll
new file mode 100644
index 0000000..00f2e2c
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/cos.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for cos are generated for float and half.
+
+define noundef float @cos_float(float noundef %a) #0 {
+entry:
+; CHECK:call float @dx.op.unary.f32(i32 12, float %{{.*}})
+ %elt.cos = call float @llvm.cos.f32(float %a)
+ ret float %elt.cos
+}
+
+define noundef half @cos_half(half noundef %a) #0 {
+entry:
+; CHECK:call half @dx.op.unary.f16(i32 12, half %{{.*}})
+ %elt.cos = call half @llvm.cos.f16(half %a)
+ ret half %elt.cos
+}
+
+declare half @llvm.cos.f16(half)
+declare float @llvm.cos.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/cos_error.ll b/llvm/test/CodeGen/DirectX/cos_error.ll
new file mode 100644
index 0000000..a074f5b
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/cos_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation cos does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef double @cos_double(double noundef %a) {
+entry:
+ %elt.cos = call double @llvm.cos.f64(double %a)
+ ret double %elt.cos
+}
diff --git a/llvm/test/CodeGen/DirectX/dot2_error.ll b/llvm/test/CodeGen/DirectX/dot2_error.ll
new file mode 100644
index 0000000..a27bfae
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/dot2_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation dot2 does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload
+
+define noundef double @dot_double2(<2 x double> noundef %a, <2 x double> noundef %b) {
+entry:
+ %dx.dot = call double @llvm.dx.dot2.v2f64(<2 x double> %a, <2 x double> %b)
+ ret double %dx.dot
+}
diff --git a/llvm/test/CodeGen/DirectX/dot3_error.ll b/llvm/test/CodeGen/DirectX/dot3_error.ll
new file mode 100644
index 0000000..eb69fb1
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/dot3_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation dot3 does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload
+
+define noundef double @dot_double3(<3 x double> noundef %a, <3 x double> noundef %b) {
+entry:
+ %dx.dot = call double @llvm.dx.dot3.v3f64(<3 x double> %a, <3 x double> %b)
+ ret double %dx.dot
+}
diff --git a/llvm/test/CodeGen/DirectX/dot4_error.ll b/llvm/test/CodeGen/DirectX/dot4_error.ll
new file mode 100644
index 0000000..5cd6326
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/dot4_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation dot4 does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload
+
+define noundef double @dot_double4(<4 x double> noundef %a, <4 x double> noundef %b) {
+entry:
+ %dx.dot = call double @llvm.dx.dot4.v4f64(<4 x double> %a, <4 x double> %b)
+ ret double %dx.dot
+}
diff --git a/llvm/test/CodeGen/DirectX/exp-vec.ll b/llvm/test/CodeGen/DirectX/exp-vec.ll
new file mode 100644
index 0000000..c937155
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/exp-vec.ll
@@ -0,0 +1,17 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s
+
+; Make sure dxil operation function calls for exp are generated for float and half.
+
+; CHECK-LABEL: exp_float4
+; CHECK: fmul <4 x float> <float 0x3FF7154760000000, float 0x3FF7154760000000, float 0x3FF7154760000000, float 0x3FF7154760000000>, %{{.*}}
+; CHECK: call <4 x float> @llvm.exp2.v4f32(<4 x float> %{{.*}})
+define noundef <4 x float> @exp_float4(<4 x float> noundef %p0) {
+entry:
+ %p0.addr = alloca <4 x float>, align 16
+ store <4 x float> %p0, ptr %p0.addr, align 16
+ %0 = load <4 x float>, ptr %p0.addr, align 16
+ %elt.exp = call <4 x float> @llvm.exp.v4f32(<4 x float> %0)
+ ret <4 x float> %elt.exp
+}
+
+declare <4 x float> @llvm.exp.v4f32(<4 x float>)
diff --git a/llvm/test/CodeGen/DirectX/exp.ll b/llvm/test/CodeGen/DirectX/exp.ll
new file mode 100644
index 0000000..fdafc14
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/exp.ll
@@ -0,0 +1,31 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for exp are generated for float and half.
+
+; CHECK-LABEL: exp_float
+; CHECK: fmul float 0x3FF7154760000000, %{{.*}}
+; CHECK: call float @dx.op.unary.f32(i32 21, float %{{.*}})
+define noundef float @exp_float(float noundef %a) {
+entry:
+ %a.addr = alloca float, align 4
+ store float %a, ptr %a.addr, align 4
+ %0 = load float, ptr %a.addr, align 4
+ %elt.exp = call float @llvm.exp.f32(float %0)
+ ret float %elt.exp
+}
+
+; CHECK-LABEL: exp_half
+; CHECK: fmul half 0xH3DC5, %{{.*}}
+; CHECK: call half @dx.op.unary.f16(i32 21, half %{{.*}})
+; Function Attrs: noinline nounwind optnone
+define noundef half @exp_half(half noundef %a) {
+entry:
+ %a.addr = alloca half, align 2
+ store half %a, ptr %a.addr, align 2
+ %0 = load half, ptr %a.addr, align 2
+ %elt.exp = call half @llvm.exp.f16(half %0)
+ ret half %elt.exp
+}
+
+declare half @llvm.exp.f16(half)
+declare float @llvm.exp.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/fabs.ll b/llvm/test/CodeGen/DirectX/fabs.ll
new file mode 100644
index 0000000..3b3f8aa
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/fabs.ll
@@ -0,0 +1,32 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for abs are generated for float, half, and double.
+
+
+; CHECK-LABEL: fabs_half
+define noundef half @fabs_half(half noundef %a) {
+entry:
+ ; CHECK: call half @dx.op.unary.f16(i32 6, half %{{.*}})
+ %elt.abs = call half @llvm.fabs.f16(half %a)
+ ret half %elt.abs
+}
+
+; CHECK-LABEL: fabs_float
+define noundef float @fabs_float(float noundef %a) {
+entry:
+; CHECK: call float @dx.op.unary.f32(i32 6, float %{{.*}})
+ %elt.abs = call float @llvm.fabs.f32(float %a)
+ ret float %elt.abs
+}
+
+; CHECK-LABEL: fabs_double
+define noundef double @fabs_double(double noundef %a) {
+entry:
+; CHECK: call double @dx.op.unary.f64(i32 6, double %{{.*}})
+ %elt.abs = call double @llvm.fabs.f64(double %a)
+ ret double %elt.abs
+}
+
+declare half @llvm.fabs.f16(half)
+declare float @llvm.fabs.f32(float)
+declare double @llvm.fabs.f64(double)
diff --git a/llvm/test/CodeGen/DirectX/fdot.ll b/llvm/test/CodeGen/DirectX/fdot.ll
new file mode 100644
index 0000000..3e13b2a
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/fdot.ll
@@ -0,0 +1,94 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for dot are generated for int/uint vectors.
+
+; CHECK-LABEL: dot_half2
+define noundef half @dot_half2(<2 x half> noundef %a, <2 x half> noundef %b) {
+entry:
+; CHECK: extractelement <2 x half> %a, i32 0
+; CHECK: extractelement <2 x half> %a, i32 1
+; CHECK: extractelement <2 x half> %b, i32 0
+; CHECK: extractelement <2 x half> %b, i32 1
+; CHECK: call half @dx.op.dot2.f16(i32 54, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}})
+ %dx.dot = call half @llvm.dx.dot2.v2f16(<2 x half> %a, <2 x half> %b)
+ ret half %dx.dot
+}
+
+; CHECK-LABEL: dot_half3
+define noundef half @dot_half3(<3 x half> noundef %a, <3 x half> noundef %b) {
+entry:
+; CHECK: extractelement <3 x half> %a, i32 0
+; CHECK: extractelement <3 x half> %a, i32 1
+; CHECK: extractelement <3 x half> %a, i32 2
+; CHECK: extractelement <3 x half> %b, i32 0
+; CHECK: extractelement <3 x half> %b, i32 1
+; CHECK: extractelement <3 x half> %b, i32 2
+; CHECK: call half @dx.op.dot3.f16(i32 55, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}})
+ %dx.dot = call half @llvm.dx.dot3.v3f16(<3 x half> %a, <3 x half> %b)
+ ret half %dx.dot
+}
+
+; CHECK-LABEL: dot_half4
+define noundef half @dot_half4(<4 x half> noundef %a, <4 x half> noundef %b) {
+entry:
+; CHECK: extractelement <4 x half> %a, i32 0
+; CHECK: extractelement <4 x half> %a, i32 1
+; CHECK: extractelement <4 x half> %a, i32 2
+; CHECK: extractelement <4 x half> %a, i32 3
+; CHECK: extractelement <4 x half> %b, i32 0
+; CHECK: extractelement <4 x half> %b, i32 1
+; CHECK: extractelement <4 x half> %b, i32 2
+; CHECK: extractelement <4 x half> %b, i32 3
+; CHECK: call half @dx.op.dot4.f16(i32 56, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}})
+ %dx.dot = call half @llvm.dx.dot4.v4f16(<4 x half> %a, <4 x half> %b)
+ ret half %dx.dot
+}
+
+; CHECK-LABEL: dot_float2
+define noundef float @dot_float2(<2 x float> noundef %a, <2 x float> noundef %b) {
+entry:
+; CHECK: extractelement <2 x float> %a, i32 0
+; CHECK: extractelement <2 x float> %a, i32 1
+; CHECK: extractelement <2 x float> %b, i32 0
+; CHECK: extractelement <2 x float> %b, i32 1
+; CHECK: call float @dx.op.dot2.f32(i32 54, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}})
+ %dx.dot = call float @llvm.dx.dot2.v2f32(<2 x float> %a, <2 x float> %b)
+ ret float %dx.dot
+}
+
+; CHECK-LABEL: dot_float3
+define noundef float @dot_float3(<3 x float> noundef %a, <3 x float> noundef %b) {
+entry:
+; CHECK: extractelement <3 x float> %a, i32 0
+; CHECK: extractelement <3 x float> %a, i32 1
+; CHECK: extractelement <3 x float> %a, i32 2
+; CHECK: extractelement <3 x float> %b, i32 0
+; CHECK: extractelement <3 x float> %b, i32 1
+; CHECK: extractelement <3 x float> %b, i32 2
+; CHECK: call float @dx.op.dot3.f32(i32 55, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}})
+ %dx.dot = call float @llvm.dx.dot3.v3f32(<3 x float> %a, <3 x float> %b)
+ ret float %dx.dot
+}
+
+; CHECK-LABEL: dot_float4
+define noundef float @dot_float4(<4 x float> noundef %a, <4 x float> noundef %b) {
+entry:
+; CHECK: extractelement <4 x float> %a, i32 0
+; CHECK: extractelement <4 x float> %a, i32 1
+; CHECK: extractelement <4 x float> %a, i32 2
+; CHECK: extractelement <4 x float> %a, i32 3
+; CHECK: extractelement <4 x float> %b, i32 0
+; CHECK: extractelement <4 x float> %b, i32 1
+; CHECK: extractelement <4 x float> %b, i32 2
+; CHECK: extractelement <4 x float> %b, i32 3
+; CHECK: call float @dx.op.dot4.f32(i32 56, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}})
+ %dx.dot = call float @llvm.dx.dot4.v4f32(<4 x float> %a, <4 x float> %b)
+ ret float %dx.dot
+}
+
+declare half @llvm.dx.dot.v2f16(<2 x half> , <2 x half> )
+declare half @llvm.dx.dot.v3f16(<3 x half> , <3 x half> )
+declare half @llvm.dx.dot.v4f16(<4 x half> , <4 x half> )
+declare float @llvm.dx.dot.v2f32(<2 x float>, <2 x float>)
+declare float @llvm.dx.dot.v3f32(<3 x float>, <3 x float>)
+declare float @llvm.dx.dot.v4f32(<4 x float>, <4 x float>)
diff --git a/llvm/test/CodeGen/DirectX/floor.ll b/llvm/test/CodeGen/DirectX/floor.ll
new file mode 100644
index 0000000..b033e2e
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/floor.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for floor are generated for float and half.
+
+define noundef float @floor_float(float noundef %a) #0 {
+entry:
+; CHECK:call float @dx.op.unary.f32(i32 27, float %{{.*}})
+ %elt.floor = call float @llvm.floor.f32(float %a)
+ ret float %elt.floor
+}
+
+define noundef half @floor_half(half noundef %a) #0 {
+entry:
+; CHECK:call half @dx.op.unary.f16(i32 27, half %{{.*}})
+ %elt.floor = call half @llvm.floor.f16(half %a)
+ ret half %elt.floor
+}
+
+declare half @llvm.floor.f16(half)
+declare float @llvm.floor.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/floor_error.ll b/llvm/test/CodeGen/DirectX/floor_error.ll
new file mode 100644
index 0000000..3b51a4b
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/floor_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation floor does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef double @floor_double(double noundef %a) {
+entry:
+ %elt.floor = call double @llvm.floor.f64(double %a)
+ ret double %elt.floor
+}
diff --git a/llvm/test/CodeGen/DirectX/fmax.ll b/llvm/test/CodeGen/DirectX/fmax.ll
new file mode 100644
index 0000000..aff722c
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/fmax.ll
@@ -0,0 +1,31 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for fmax are generated for half/float/double.
+
+; CHECK-LABEL:test_fmax_half
+define noundef half @test_fmax_half(half noundef %a, half noundef %b) {
+entry:
+; CHECK: call half @dx.op.binary.f16(i32 35, half %{{.*}}, half %{{.*}})
+ %0 = call half @llvm.maxnum.f16(half %a, half %b)
+ ret half %0
+}
+
+; CHECK-LABEL:test_fmax_float
+define noundef float @test_fmax_float(float noundef %a, float noundef %b) {
+entry:
+; CHECK: call float @dx.op.binary.f32(i32 35, float %{{.*}}, float %{{.*}})
+ %0 = call float @llvm.maxnum.f32(float %a, float %b)
+ ret float %0
+}
+
+; CHECK-LABEL:test_fmax_double
+define noundef double @test_fmax_double(double noundef %a, double noundef %b) {
+entry:
+; CHECK: call double @dx.op.binary.f64(i32 35, double %{{.*}}, double %{{.*}})
+ %0 = call double @llvm.maxnum.f64(double %a, double %b)
+ ret double %0
+}
+
+declare half @llvm.maxnum.f16(half, half)
+declare float @llvm.maxnum.f32(float, float)
+declare double @llvm.maxnum.f64(double, double)
diff --git a/llvm/test/CodeGen/DirectX/fmin.ll b/llvm/test/CodeGen/DirectX/fmin.ll
new file mode 100644
index 0000000..2f7c209
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/fmin.ll
@@ -0,0 +1,31 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for fmin are generated for half/float/double.
+
+; CHECK-LABEL:test_fmin_half
+define noundef half @test_fmin_half(half noundef %a, half noundef %b) {
+entry:
+; CHECK: call half @dx.op.binary.f16(i32 36, half %{{.*}}, half %{{.*}})
+ %0 = call half @llvm.minnum.f16(half %a, half %b)
+ ret half %0
+}
+
+; CHECK-LABEL:test_fmin_float
+define noundef float @test_fmin_float(float noundef %a, float noundef %b) {
+entry:
+; CHECK: call float @dx.op.binary.f32(i32 36, float %{{.*}}, float %{{.*}})
+ %0 = call float @llvm.minnum.f32(float %a, float %b)
+ ret float %0
+}
+
+; CHECK-LABEL:test_fmin_double
+define noundef double @test_fmin_double(double noundef %a, double noundef %b) {
+entry:
+; CHECK: call double @dx.op.binary.f64(i32 36, double %{{.*}}, double %{{.*}})
+ %0 = call double @llvm.minnum.f64(double %a, double %b)
+ ret double %0
+}
+
+declare half @llvm.minnum.f16(half, half)
+declare float @llvm.minnum.f32(float, float)
+declare double @llvm.minnum.f64(double, double)
diff --git a/llvm/test/CodeGen/DirectX/idot.ll b/llvm/test/CodeGen/DirectX/idot.ll
new file mode 100644
index 0000000..9f89a8d
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/idot.ll
@@ -0,0 +1,100 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s --check-prefixes=CHECK,EXPCHECK
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s --check-prefixes=CHECK,DOPCHECK
+
+; Make sure dxil operation function calls for dot are generated for int/uint vectors.
+
+; CHECK-LABEL: dot_int16_t2
+define noundef i16 @dot_int16_t2(<2 x i16> noundef %a, <2 x i16> noundef %b) {
+entry:
+; CHECK: extractelement <2 x i16> %a, i64 0
+; CHECK: extractelement <2 x i16> %b, i64 0
+; CHECK: mul i16 %{{.*}}, %{{.*}}
+; CHECK: extractelement <2 x i16> %a, i64 1
+; CHECK: extractelement <2 x i16> %b, i64 1
+; EXPCHECK: call i16 @llvm.dx.imad.i16(i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}})
+; DOPCHECK: call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}})
+ %dx.dot = call i16 @llvm.dx.sdot.v3i16(<2 x i16> %a, <2 x i16> %b)
+ ret i16 %dx.dot
+}
+
+; CHECK-LABEL: sdot_int4
+define noundef i32 @sdot_int4(<4 x i32> noundef %a, <4 x i32> noundef %b) {
+entry:
+; CHECK: extractelement <4 x i32> %a, i64 0
+; CHECK: extractelement <4 x i32> %b, i64 0
+; CHECK: mul i32 %{{.*}}, %{{.*}}
+; CHECK: extractelement <4 x i32> %a, i64 1
+; CHECK: extractelement <4 x i32> %b, i64 1
+; EXPCHECK: call i32 @llvm.dx.imad.i32(i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; DOPCHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; CHECK: extractelement <4 x i32> %a, i64 2
+; CHECK: extractelement <4 x i32> %b, i64 2
+; EXPCHECK: call i32 @llvm.dx.imad.i32(i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; DOPCHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; CHECK: extractelement <4 x i32> %a, i64 3
+; CHECK: extractelement <4 x i32> %b, i64 3
+; EXPCHECK: call i32 @llvm.dx.imad.i32(i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; DOPCHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+ %dx.dot = call i32 @llvm.dx.sdot.v4i32(<4 x i32> %a, <4 x i32> %b)
+ ret i32 %dx.dot
+}
+
+; CHECK-LABEL: dot_uint16_t3
+define noundef i16 @dot_uint16_t3(<3 x i16> noundef %a, <3 x i16> noundef %b) {
+entry:
+; CHECK: extractelement <3 x i16> %a, i64 0
+; CHECK: extractelement <3 x i16> %b, i64 0
+; CHECK: mul i16 %{{.*}}, %{{.*}}
+; CHECK: extractelement <3 x i16> %a, i64 1
+; CHECK: extractelement <3 x i16> %b, i64 1
+; EXPCHECK: call i16 @llvm.dx.umad.i16(i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}})
+; DOPCHECK: call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}})
+; CHECK: extractelement <3 x i16> %a, i64 2
+; CHECK: extractelement <3 x i16> %b, i64 2
+; EXPCHECK: call i16 @llvm.dx.umad.i16(i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}})
+; DOPCHECK: call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}})
+ %dx.dot = call i16 @llvm.dx.udot.v3i16(<3 x i16> %a, <3 x i16> %b)
+ ret i16 %dx.dot
+}
+
+; CHECK-LABEL: dot_uint4
+define noundef i32 @dot_uint4(<4 x i32> noundef %a, <4 x i32> noundef %b) {
+entry:
+; CHECK: extractelement <4 x i32> %a, i64 0
+; CHECK: extractelement <4 x i32> %b, i64 0
+; CHECK: mul i32 %{{.*}}, %{{.*}}
+; CHECK: extractelement <4 x i32> %a, i64 1
+; CHECK: extractelement <4 x i32> %b, i64 1
+; EXPCHECK: call i32 @llvm.dx.umad.i32(i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; DOPCHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; CHECK: extractelement <4 x i32> %a, i64 2
+; CHECK: extractelement <4 x i32> %b, i64 2
+; EXPCHECK: call i32 @llvm.dx.umad.i32(i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; DOPCHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; CHECK: extractelement <4 x i32> %a, i64 3
+; CHECK: extractelement <4 x i32> %b, i64 3
+; EXPCHECK: call i32 @llvm.dx.umad.i32(i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; DOPCHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+ %dx.dot = call i32 @llvm.dx.udot.v4i32(<4 x i32> %a, <4 x i32> %b)
+ ret i32 %dx.dot
+}
+
+; CHECK-LABEL: dot_uint64_t4
+define noundef i64 @dot_uint64_t4(<2 x i64> noundef %a, <2 x i64> noundef %b) {
+entry:
+; CHECK: extractelement <2 x i64> %a, i64 0
+; CHECK: extractelement <2 x i64> %b, i64 0
+; CHECK: mul i64 %{{.*}}, %{{.*}}
+; CHECK: extractelement <2 x i64> %a, i64 1
+; CHECK: extractelement <2 x i64> %b, i64 1
+; EXPCHECK: call i64 @llvm.dx.umad.i64(i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}})
+; DOPCHECK: call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}})
+ %dx.dot = call i64 @llvm.dx.udot.v2i64(<2 x i64> %a, <2 x i64> %b)
+ ret i64 %dx.dot
+}
+
+declare i16 @llvm.dx.sdot.v2i16(<2 x i16>, <2 x i16>)
+declare i32 @llvm.dx.sdot.v4i32(<4 x i32>, <4 x i32>)
+declare i16 @llvm.dx.udot.v3i32(<3 x i16>, <3 x i16>)
+declare i32 @llvm.dx.udot.v4i32(<4 x i32>, <4 x i32>)
+declare i64 @llvm.dx.udot.v2i64(<2 x i64>, <2 x i64>)
diff --git a/llvm/test/CodeGen/DirectX/isinf.ll b/llvm/test/CodeGen/DirectX/isinf.ll
new file mode 100644
index 0000000..e2975da
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/isinf.ll
@@ -0,0 +1,25 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for isinf are generated for float and half.
+; CHECK: call i1 @dx.op.isSpecialFloat.f32(i32 9, float %{{.*}})
+; CHECK: call i1 @dx.op.isSpecialFloat.f16(i32 9, half %{{.*}})
+
+; Function Attrs: noinline nounwind optnone
+define noundef i1 @isinf_float(float noundef %a) #0 {
+entry:
+ %a.addr = alloca float, align 4
+ store float %a, ptr %a.addr, align 4
+ %0 = load float, ptr %a.addr, align 4
+ %dx.isinf = call i1 @llvm.dx.isinf.f32(float %0)
+ ret i1 %dx.isinf
+}
+
+; Function Attrs: noinline nounwind optnone
+define noundef i1 @isinf_half(half noundef %p0) #0 {
+entry:
+ %p0.addr = alloca half, align 2
+ store half %p0, ptr %p0.addr, align 2
+ %0 = load half, ptr %p0.addr, align 2
+ %dx.isinf = call i1 @llvm.dx.isinf.f16(half %0)
+ ret i1 %dx.isinf
+}
diff --git a/llvm/test/CodeGen/DirectX/isinf_error.ll b/llvm/test/CodeGen/DirectX/isinf_error.ll
new file mode 100644
index 0000000..95b2d0c
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/isinf_error.ll
@@ -0,0 +1,13 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation isinf does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef i1 @isinf_double(double noundef %a) #0 {
+entry:
+ %a.addr = alloca double, align 8
+ store double %a, ptr %a.addr, align 8
+ %0 = load double, ptr %a.addr, align 8
+ %dx.isinf = call i1 @llvm.dx.isinf.f64(double %0)
+ ret i1 %dx.isinf
+}
diff --git a/llvm/test/CodeGen/DirectX/lerp.ll b/llvm/test/CodeGen/DirectX/lerp.ll
new file mode 100644
index 0000000..ebd7e13
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/lerp.ll
@@ -0,0 +1,56 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for lerp are generated for float and half.
+
+; CHECK-LABEL: lerp_half
+; CHECK: fsub half %{{.*}}, %{{.*}}
+; CHECK: fmul half %{{.*}}, %{{.*}}
+; CHECK: fadd half %{{.*}}, %{{.*}}
+define noundef half @lerp_half(half noundef %p0) {
+entry:
+ %p0.addr = alloca half, align 2
+ store half %p0, ptr %p0.addr, align 2
+ %0 = load half, ptr %p0.addr, align 2
+ %1 = load half, ptr %p0.addr, align 2
+ %2 = load half, ptr %p0.addr, align 2
+ %dx.lerp = call half @llvm.dx.lerp.f16(half %0, half %1, half %2)
+ ret half %dx.lerp
+}
+
+; CHECK-LABEL: lerp_float
+; CHECK: fsub float %{{.*}}, %{{.*}}
+; CHECK: fmul float %{{.*}}, %{{.*}}
+; CHECK: fadd float %{{.*}}, %{{.*}}
+define noundef float @lerp_float(float noundef %p0, float noundef %p1) {
+entry:
+ %p1.addr = alloca float, align 4
+ %p0.addr = alloca float, align 4
+ store float %p1, ptr %p1.addr, align 4
+ store float %p0, ptr %p0.addr, align 4
+ %0 = load float, ptr %p0.addr, align 4
+ %1 = load float, ptr %p0.addr, align 4
+ %2 = load float, ptr %p0.addr, align 4
+ %dx.lerp = call float @llvm.dx.lerp.f32(float %0, float %1, float %2)
+ ret float %dx.lerp
+}
+
+; CHECK-LABEL: lerp_float4
+; CHECK: fsub <4 x float> %{{.*}}, %{{.*}}
+; CHECK: fmul <4 x float> %{{.*}}, %{{.*}}
+; CHECK: fadd <4 x float> %{{.*}}, %{{.*}}
+define noundef <4 x float> @lerp_float4(<4 x float> noundef %p0, <4 x float> noundef %p1) {
+entry:
+ %p1.addr = alloca <4 x float>, align 16
+ %p0.addr = alloca <4 x float>, align 16
+ store <4 x float> %p1, ptr %p1.addr, align 16
+ store <4 x float> %p0, ptr %p0.addr, align 16
+ %0 = load <4 x float>, ptr %p0.addr, align 16
+ %1 = load <4 x float>, ptr %p0.addr, align 16
+ %2 = load <4 x float>, ptr %p0.addr, align 16
+ %dx.lerp = call <4 x float> @llvm.dx.lerp.v4f32(<4 x float> %0, <4 x float> %1, <4 x float> %2)
+ ret <4 x float> %dx.lerp
+}
+
+declare half @llvm.dx.lerp.f16(half, half, half)
+declare float @llvm.dx.lerp.f32(float, float, float)
+declare <4 x float> @llvm.dx.lerp.v4f32(<4 x float>, <4 x float>, <4 x float>)
diff --git a/llvm/test/CodeGen/DirectX/lib_entry.ll b/llvm/test/CodeGen/DirectX/lib_entry.ll
index 9208d6d..5254a08 100644
--- a/llvm/test/CodeGen/DirectX/lib_entry.ll
+++ b/llvm/test/CodeGen/DirectX/lib_entry.ll
@@ -7,7 +7,7 @@ target triple = "dxil-unknown-shadermodel6.7-library"
; Make sure generate empty entry for lib profile.
;CHECK:![[empty_entry]] = !{null, !"", null, null, ![[shader_flags:[0-9]+]]}
; Make sure double is marked for shader flags.
-;CHECK:![[shader_flags]] = !{i32 0, i64 1}
+;CHECK:![[shader_flags]] = !{i32 0, i64 4}
;CHECK:![[entry]] = !{ptr @entry, !"entry", null, null, ![[extra:[0-9]+]]}
;CHECK:![[extra]] = !{i32 8, i32 5, i32 4, ![[numthreads:[0-9]+]]}
;CHECK:![[numthreads]] = !{i32 1, i32 2, i32 1}
diff --git a/llvm/test/CodeGen/DirectX/log-vec.ll b/llvm/test/CodeGen/DirectX/log-vec.ll
new file mode 100644
index 0000000..4768fdd
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/log-vec.ll
@@ -0,0 +1,30 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s
+
+; Make sure dxil operation function calls for log are generated for float and half.
+
+; CHECK-LABEL: log_float4
+; CHECK: call <4 x float> @llvm.log2.v4f32(<4 x float> %{{.*}})
+; CHECK: fmul <4 x float> <float 0x3FE62E4300000000, float 0x3FE62E4300000000, float 0x3FE62E4300000000, float 0x3FE62E4300000000>, %{{.*}}
+define noundef <4 x float> @log_float4(<4 x float> noundef %p0) {
+entry:
+ %p0.addr = alloca <4 x float>, align 16
+ store <4 x float> %p0, ptr %p0.addr, align 16
+ %0 = load <4 x float>, ptr %p0.addr, align 16
+ %elt.log = call <4 x float> @llvm.log.v4f32(<4 x float> %0)
+ ret <4 x float> %elt.log
+}
+
+; CHECK-LABEL: log10_float4
+; CHECK: call <4 x float> @llvm.log2.v4f32(<4 x float> %{{.*}})
+; CHECK: fmul <4 x float> <float 0x3FD3441340000000, float 0x3FD3441340000000, float 0x3FD3441340000000, float 0x3FD3441340000000>, %{{.*}}
+define noundef <4 x float> @log10_float4(<4 x float> noundef %p0) {
+entry:
+ %p0.addr = alloca <4 x float>, align 16
+ store <4 x float> %p0, ptr %p0.addr, align 16
+ %0 = load <4 x float>, ptr %p0.addr, align 16
+ %elt.log10 = call <4 x float> @llvm.log10.v4f32(<4 x float> %0)
+ ret <4 x float> %elt.log10
+}
+
+declare <4 x float> @llvm.log.v4f32(<4 x float>)
+declare <4 x float> @llvm.log10.v4f32(<4 x float>)
diff --git a/llvm/test/CodeGen/DirectX/log.ll b/llvm/test/CodeGen/DirectX/log.ll
new file mode 100644
index 0000000..172c3bf
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/log.ll
@@ -0,0 +1,25 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s --check-prefixes=CHECK,EXPCHECK
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s --check-prefixes=CHECK,DOPCHECK
+
+; Make sure dxil operation function calls for log are generated.
+
+define noundef float @log_float(float noundef %a) #0 {
+entry:
+; DOPCHECK: call float @dx.op.unary.f32(i32 23, float %{{.*}})
+; EXPCHECK: call float @llvm.log2.f32(float %a)
+; CHECK: fmul float 0x3FE62E4300000000, %{{.*}}
+ %elt.log = call float @llvm.log.f32(float %a)
+ ret float %elt.log
+}
+
+define noundef half @log_half(half noundef %a) #0 {
+entry:
+; DOPCHECK: call half @dx.op.unary.f16(i32 23, half %{{.*}})
+; EXPCHECK: call half @llvm.log2.f16(half %a)
+; CHECK: fmul half 0xH398C, %{{.*}}
+ %elt.log = call half @llvm.log.f16(half %a)
+ ret half %elt.log
+}
+
+declare half @llvm.log.f16(half)
+declare float @llvm.log.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/log10.ll b/llvm/test/CodeGen/DirectX/log10.ll
new file mode 100644
index 0000000..d4f827a
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/log10.ll
@@ -0,0 +1,25 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s --check-prefixes=CHECK,EXPCHECK
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s --check-prefixes=CHECK,DOPCHECK
+
+; Make sure dxil operation function calls for log10 are generated.
+
+define noundef float @log10_float(float noundef %a) #0 {
+entry:
+; DOPCHECK: call float @dx.op.unary.f32(i32 23, float %{{.*}})
+; EXPCHECK: call float @llvm.log2.f32(float %a)
+; CHECK: fmul float 0x3FD3441340000000, %{{.*}}
+ %elt.log10 = call float @llvm.log10.f32(float %a)
+ ret float %elt.log10
+}
+
+define noundef half @log10_half(half noundef %a) #0 {
+entry:
+; DOPCHECK: call half @dx.op.unary.f16(i32 23, half %{{.*}})
+; EXPCHECK: call half @llvm.log2.f16(half %a)
+; CHECK: fmul half 0xH34D1, %{{.*}}
+ %elt.log10 = call half @llvm.log10.f16(half %a)
+ ret half %elt.log10
+}
+
+declare half @llvm.log10.f16(half)
+declare float @llvm.log10.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/log2.ll b/llvm/test/CodeGen/DirectX/log2.ll
new file mode 100644
index 0000000..2164d4d
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/log2.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for log2 are generated for float and half.
+
+define noundef float @log2_float(float noundef %a) #0 {
+entry:
+; CHECK:call float @dx.op.unary.f32(i32 23, float %{{.*}})
+ %elt.log2 = call float @llvm.log2.f32(float %a)
+ ret float %elt.log2
+}
+
+define noundef half @log2_half(half noundef %a) #0 {
+entry:
+; CHECK:call half @dx.op.unary.f16(i32 23, half %{{.*}})
+ %elt.log2 = call half @llvm.log2.f16(half %a)
+ ret half %elt.log2
+}
+
+declare half @llvm.log2.f16(half)
+declare float @llvm.log2.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/log2_error.ll b/llvm/test/CodeGen/DirectX/log2_error.ll
new file mode 100644
index 0000000..a26f6e8
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/log2_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation log2 does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef double @log2_double(double noundef %a) {
+entry:
+ %elt.log2 = call double @llvm.log2.f64(double %a)
+ ret double %elt.log2
+}
diff --git a/llvm/test/CodeGen/DirectX/pow-vec.ll b/llvm/test/CodeGen/DirectX/pow-vec.ll
new file mode 100644
index 0000000..781fa5b
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/pow-vec.ll
@@ -0,0 +1,15 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s
+
+; Make sure dxil operation function calls for pow are generated for float and half.
+
+; CHECK-LABEL: pow_float4
+; CHECK: call <4 x float> @llvm.log2.v4f32(<4 x float> %a)
+; CHECK: fmul <4 x float> %{{.*}}, %b
+; CHECK: call <4 x float> @llvm.exp2.v4f32(<4 x float> %{{.*}})
+define noundef <4 x float> @pow_float4(<4 x float> noundef %a, <4 x float> noundef %b) {
+entry:
+ %elt.pow = call <4 x float> @llvm.pow.v4f32(<4 x float> %a, <4 x float> %b)
+ ret <4 x float> %elt.pow
+}
+
+declare <4 x float> @llvm.pow.v4f32(<4 x float>,<4 x float>)
diff --git a/llvm/test/CodeGen/DirectX/pow.ll b/llvm/test/CodeGen/DirectX/pow.ll
new file mode 100644
index 0000000..25ce0fe
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/pow.ll
@@ -0,0 +1,29 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s --check-prefixes=CHECK,EXPCHECK
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s --check-prefixes=CHECK,DOPCHECK
+
+; Make sure dxil operation function calls for pow are generated.
+
+define noundef float @pow_float(float noundef %a, float noundef %b) {
+entry:
+; DOPCHECK: call float @dx.op.unary.f32(i32 23, float %a)
+; EXPCHECK: call float @llvm.log2.f32(float %a)
+; CHECK: fmul float %{{.*}}, %b
+; DOPCHECK: call float @dx.op.unary.f32(i32 21, float %{{.*}})
+; EXPCHECK: call float @llvm.exp2.f32(float %{{.*}})
+ %elt.pow = call float @llvm.pow.f32(float %a, float %b)
+ ret float %elt.pow
+}
+
+define noundef half @pow_half(half noundef %a, half noundef %b) {
+entry:
+; DOPCHECK: call half @dx.op.unary.f16(i32 23, half %a)
+; EXPCHECK: call half @llvm.log2.f16(half %a)
+; CHECK: fmul half %{{.*}}, %b
+; DOPCHECK: call half @dx.op.unary.f16(i32 21, half %{{.*}})
+; EXPCHECK: call half @llvm.exp2.f16(half %{{.*}})
+ %elt.pow = call half @llvm.pow.f16(half %a, half %b)
+ ret half %elt.pow
+}
+
+declare half @llvm.pow.f16(half,half)
+declare float @llvm.pow.f32(float,float)
diff --git a/llvm/test/CodeGen/DirectX/rcp.ll b/llvm/test/CodeGen/DirectX/rcp.ll
new file mode 100644
index 0000000..65abe83
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/rcp.ll
@@ -0,0 +1,52 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for rcp are generated for float, double, and half.
+
+; CHECK-LABEL: rcp_float4
+; CHECK: fdiv <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, %{{.*}}
+define noundef <4 x float> @rcp_float4(<4 x float> noundef %p0) {
+entry:
+ %p0.addr = alloca <4 x float>, align 16
+ store <4 x float> %p0, ptr %p0.addr, align 16
+ %0 = load <4 x float>, ptr %p0.addr, align 16
+ %dx.rcp = call <4 x float> @llvm.dx.rcp.v4f32(<4 x float> %0)
+ ret <4 x float> %dx.rcp
+}
+
+; CHECK-LABEL: rcp_double4
+; CHECK: fdiv <4 x double> <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>, %{{.*}}
+define noundef <4 x double> @rcp_double4(<4 x double> noundef %p0) {
+entry:
+ %p0.addr = alloca <4 x double>, align 16
+ store <4 x double> %p0, ptr %p0.addr, align 16
+ %0 = load <4 x double>, ptr %p0.addr, align 16
+ %dx.rcp = call <4 x double> @llvm.dx.rcp.v4f64(<4 x double> %0)
+ ret <4 x double> %dx.rcp
+}
+
+; CHECK-LABEL: rcp_half4
+; CHECK: fdiv <4 x half> <half 0xH3C00, half 0xH3C00, half 0xH3C00, half 0xH3C00>, %{{.*}}
+define noundef <4 x half> @rcp_half4(<4 x half> noundef %p0) {
+entry:
+ %p0.addr = alloca <4 x half>, align 16
+ store <4 x half> %p0, ptr %p0.addr, align 16
+ %0 = load <4 x half>, ptr %p0.addr, align 16
+ %dx.rcp = call <4 x half> @llvm.dx.rcp.v4f16(<4 x half> %0)
+ ret <4 x half> %dx.rcp
+}
+
+; CHECK-LABEL: rcp_half
+; CHECK: fdiv half 0xH3C00, %{{.*}}
+define noundef half @rcp_half(half noundef %p0) {
+entry:
+ %p0.addr = alloca half, align 2
+ store half %p0, ptr %p0.addr, align 2
+ %0 = load half, ptr %p0.addr, align 2
+ %dx.rcp = call half @llvm.dx.rcp.f16(half %0)
+ ret half %dx.rcp
+}
+
+declare half @llvm.dx.rcp.f16(half)
+declare <4 x half> @llvm.dx.rcp.v4f16(<4 x half>)
+declare <4 x float> @llvm.dx.rcp.v4f32(<4 x float>)
+declare <4 x double> @llvm.dx.rcp.v4f64(<4 x double>)
diff --git a/llvm/test/CodeGen/DirectX/reversebits.ll b/llvm/test/CodeGen/DirectX/reversebits.ll
new file mode 100644
index 0000000..b6a7a1b
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/reversebits.ll
@@ -0,0 +1,31 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for reversebits are generated for all integer types.
+
+; Function Attrs: nounwind
+define noundef i16 @test_bitreverse_short(i16 noundef %a) {
+entry:
+; CHECK:call i16 @dx.op.unary.i16(i32 30, i16 %{{.*}})
+ %elt.bitreverse = call i16 @llvm.bitreverse.i16(i16 %a)
+ ret i16 %elt.bitreverse
+}
+
+; Function Attrs: nounwind
+define noundef i32 @test_bitreverse_int(i32 noundef %a) {
+entry:
+; CHECK:call i32 @dx.op.unary.i32(i32 30, i32 %{{.*}})
+ %elt.bitreverse = call i32 @llvm.bitreverse.i32(i32 %a)
+ ret i32 %elt.bitreverse
+}
+
+; Function Attrs: nounwind
+define noundef i64 @test_bitreverse_long(i64 noundef %a) {
+entry:
+; CHECK:call i64 @dx.op.unary.i64(i32 30, i64 %{{.*}})
+ %elt.bitreverse = call i64 @llvm.bitreverse.i64(i64 %a)
+ ret i64 %elt.bitreverse
+}
+
+declare i16 @llvm.bitreverse.i16(i16)
+declare i32 @llvm.bitreverse.i32(i32)
+declare i64 @llvm.bitreverse.i64(i64)
diff --git a/llvm/test/CodeGen/DirectX/round.ll b/llvm/test/CodeGen/DirectX/round.ll
index 5d53a79..e0a3772 100644
--- a/llvm/test/CodeGen/DirectX/round.ll
+++ b/llvm/test/CodeGen/DirectX/round.ll
@@ -1,31 +1,22 @@
; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
; Make sure dxil operation function calls for round are generated for float and half.
-; CHECK:call float @dx.op.unary.f32(i32 26, float %{{.*}})
-; CHECK:call half @dx.op.unary.f16(i32 26, half %{{.*}})
-target datalayout = "e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-f16:16-f32:32-f64:64-n8:16:32:64"
-target triple = "dxil-pc-shadermodel6.7-library"
-
-; Function Attrs: noinline nounwind optnone
-define noundef float @round_float(float noundef %a) #0 {
+; CHECK-LABEL: round_half
+define noundef half @round_half(half noundef %a) {
entry:
- %a.addr = alloca float, align 4
- store float %a, ptr %a.addr, align 4
- %0 = load float, ptr %a.addr, align 4
- %elt.round = call float @llvm.round.f32(float %0)
- ret float %elt.round
+; CHECK: call half @dx.op.unary.f16(i32 26, half %{{.*}})
+ %elt.roundeven = call half @llvm.roundeven.f16(half %a)
+ ret half %elt.roundeven
}
-; Function Attrs: nocallback nofree nosync nounwind readnone speculatable willreturn
-declare float @llvm.round.f32(float) #1
-
-; Function Attrs: noinline nounwind optnone
-define noundef half @round_half(half noundef %a) #0 {
+; CHECK-LABEL: round_float
+define noundef float @round_float(float noundef %a) {
entry:
- %a.addr = alloca half, align 2
- store half %a, ptr %a.addr, align 2
- %0 = load half, ptr %a.addr, align 2
- %elt.round = call half @llvm.round.f16(half %0)
- ret half %elt.round
+; CHECK: call float @dx.op.unary.f32(i32 26, float %{{.*}})
+ %elt.roundeven = call float @llvm.roundeven.f32(float %a)
+ ret float %elt.roundeven
}
+
+declare half @llvm.roundeven.f16(half)
+declare float @llvm.roundeven.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/round_error.ll b/llvm/test/CodeGen/DirectX/round_error.ll
index 3bd87b2..2d27fbb 100644
--- a/llvm/test/CodeGen/DirectX/round_error.ll
+++ b/llvm/test/CodeGen/DirectX/round_error.ll
@@ -8,6 +8,6 @@ entry:
%a.addr = alloca double, align 8
store double %a, ptr %a.addr, align 8
%0 = load double, ptr %a.addr, align 8
- %elt.round = call double @llvm.round.f64(double %0)
- ret double %elt.round
+ %elt.roundeven = call double @llvm.roundeven.f64(double %0)
+ ret double %elt.roundeven
}
diff --git a/llvm/test/CodeGen/DirectX/rsqrt.ll b/llvm/test/CodeGen/DirectX/rsqrt.ll
new file mode 100644
index 0000000..52af0e6
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/rsqrt.ll
@@ -0,0 +1,28 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for rsqrt are generated for float and half.
+
+; CHECK-LABEL: rsqrt_float
+; CHECK: call float @dx.op.unary.f32(i32 25, float %{{.*}})
+define noundef float @rsqrt_float(float noundef %a) {
+entry:
+ %a.addr = alloca float, align 4
+ store float %a, ptr %a.addr, align 4
+ %0 = load float, ptr %a.addr, align 4
+ %dx.rsqrt = call float @llvm.dx.rsqrt.f32(float %0)
+ ret float %dx.rsqrt
+}
+
+; CHECK-LABEL: rsqrt_half
+; CHECK: call half @dx.op.unary.f16(i32 25, half %{{.*}})
+define noundef half @rsqrt_half(half noundef %a) {
+entry:
+ %a.addr = alloca half, align 2
+ store half %a, ptr %a.addr, align 2
+ %0 = load half, ptr %a.addr, align 2
+ %dx.rsqrt = call half @llvm.dx.rsqrt.f16(half %0)
+ ret half %dx.rsqrt
+}
+
+declare half @llvm.dx.rsqrt.f16(half)
+declare float @llvm.dx.rsqrt.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/rsqrt_error.ll b/llvm/test/CodeGen/DirectX/rsqrt_error.ll
new file mode 100644
index 0000000..9cd5002
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/rsqrt_error.ll
@@ -0,0 +1,14 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation rsqrt does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+; Function Attrs: noinline nounwind optnone
+define noundef double @rsqrt_double(double noundef %a) #0 {
+entry:
+ %a.addr = alloca double, align 8
+ store double %a, ptr %a.addr, align 8
+ %0 = load double, ptr %a.addr, align 8
+ %dx.rsqrt = call double @llvm.dx.rsqrt.f64(double %0)
+ ret double %dx.rsqrt
+}
diff --git a/llvm/test/CodeGen/DirectX/smax.ll b/llvm/test/CodeGen/DirectX/smax.ll
new file mode 100644
index 0000000..8b24067
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/smax.ll
@@ -0,0 +1,31 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for smax are generated for i16/i32/i64.
+
+; CHECK-LABEL:test_smax_i16
+define noundef i16 @test_smax_i16(i16 noundef %a, i16 noundef %b) {
+entry:
+; CHECK: call i16 @dx.op.binary.i16(i32 37, i16 %{{.*}}, i16 %{{.*}})
+ %0 = call i16 @llvm.smax.i16(i16 %a, i16 %b)
+ ret i16 %0
+}
+
+; CHECK-LABEL:test_smax_i32
+define noundef i32 @test_smax_i32(i32 noundef %a, i32 noundef %b) {
+entry:
+; CHECK: call i32 @dx.op.binary.i32(i32 37, i32 %{{.*}}, i32 %{{.*}})
+ %0 = call i32 @llvm.smax.i32(i32 %a, i32 %b)
+ ret i32 %0
+}
+
+; CHECK-LABEL:test_smax_i64
+define noundef i64 @test_smax_i64(i64 noundef %a, i64 noundef %b) {
+entry:
+; CHECK: call i64 @dx.op.binary.i64(i32 37, i64 %{{.*}}, i64 %{{.*}})
+ %0 = call i64 @llvm.smax.i64(i64 %a, i64 %b)
+ ret i64 %0
+}
+
+declare i16 @llvm.smax.i16(i16, i16)
+declare i32 @llvm.smax.i32(i32, i32)
+declare i64 @llvm.smax.i64(i64, i64)
diff --git a/llvm/test/CodeGen/DirectX/smin.ll b/llvm/test/CodeGen/DirectX/smin.ll
new file mode 100644
index 0000000..b2b40a1
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/smin.ll
@@ -0,0 +1,31 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for smin are generated for i16/i32/i64.
+
+; CHECK-LABEL:test_smin_i16
+define noundef i16 @test_smin_i16(i16 noundef %a, i16 noundef %b) {
+entry:
+; CHECK: call i16 @dx.op.binary.i16(i32 38, i16 %{{.*}}, i16 %{{.*}})
+ %0 = call i16 @llvm.smin.i16(i16 %a, i16 %b)
+ ret i16 %0
+}
+
+; CHECK-LABEL:test_smin_i32
+define noundef i32 @test_smin_i32(i32 noundef %a, i32 noundef %b) {
+entry:
+; CHECK: call i32 @dx.op.binary.i32(i32 38, i32 %{{.*}}, i32 %{{.*}})
+ %0 = call i32 @llvm.smin.i32(i32 %a, i32 %b)
+ ret i32 %0
+}
+
+; CHECK-LABEL:test_smin_i64
+define noundef i64 @test_smin_i64(i64 noundef %a, i64 noundef %b) {
+entry:
+; CHECK: call i64 @dx.op.binary.i64(i32 38, i64 %{{.*}}, i64 %{{.*}})
+ %0 = call i64 @llvm.smin.i64(i64 %a, i64 %b)
+ ret i64 %0
+}
+
+declare i16 @llvm.smin.i16(i16, i16)
+declare i32 @llvm.smin.i32(i32, i32)
+declare i64 @llvm.smin.i64(i64, i64)
diff --git a/llvm/test/CodeGen/DirectX/sqrt.ll b/llvm/test/CodeGen/DirectX/sqrt.ll
new file mode 100644
index 0000000..76a572e
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/sqrt.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for sqrt are generated for float and half.
+
+define noundef float @sqrt_float(float noundef %a) #0 {
+entry:
+; CHECK:call float @dx.op.unary.f32(i32 24, float %{{.*}})
+ %elt.sqrt = call float @llvm.sqrt.f32(float %a)
+ ret float %elt.sqrt
+}
+
+define noundef half @sqrt_half(half noundef %a) #0 {
+entry:
+; CHECK:call half @dx.op.unary.f16(i32 24, half %{{.*}})
+ %elt.sqrt = call half @llvm.sqrt.f16(half %a)
+ ret half %elt.sqrt
+}
+
+declare half @llvm.sqrt.f16(half)
+declare float @llvm.sqrt.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/sqrt_error.ll b/llvm/test/CodeGen/DirectX/sqrt_error.ll
new file mode 100644
index 0000000..fffa2e1
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/sqrt_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation sqrt does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef double @sqrt_double(double noundef %a) {
+entry:
+ %elt.sqrt = call double @llvm.sqrt.f64(double %a)
+ ret double %elt.sqrt
+}
diff --git a/llvm/test/CodeGen/DirectX/trunc.ll b/llvm/test/CodeGen/DirectX/trunc.ll
new file mode 100644
index 0000000..2072f28
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/trunc.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for trunc are generated for float and half.
+
+define noundef float @trunc_float(float noundef %a) {
+entry:
+; CHECK:call float @dx.op.unary.f32(i32 29, float %{{.*}})
+ %elt.trunc = call float @llvm.trunc.f32(float %a)
+ ret float %elt.trunc
+}
+
+define noundef half @trunc_half(half noundef %a) {
+entry:
+; CHECK:call half @dx.op.unary.f16(i32 29, half %{{.*}})
+ %elt.trunc = call half @llvm.trunc.f16(half %a)
+ ret half %elt.trunc
+}
+
+declare half @llvm.trunc.f16(half)
+declare float @llvm.trunc.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/trunc_error.ll b/llvm/test/CodeGen/DirectX/trunc_error.ll
new file mode 100644
index 0000000..751b0b9
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/trunc_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation trunc does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef double @trunc_double(double noundef %a) {
+entry:
+ %elt.trunc = call double @llvm.trunc.f64(double %a)
+ ret double %elt.trunc
+}
diff --git a/llvm/test/CodeGen/DirectX/umax.ll b/llvm/test/CodeGen/DirectX/umax.ll
index c7b6a87..be0f557 100644
--- a/llvm/test/CodeGen/DirectX/umax.ll
+++ b/llvm/test/CodeGen/DirectX/umax.ll
@@ -1,30 +1,31 @@
; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
-; Make sure dxil operation function calls for umax are generated for i32/i64.
+; Make sure dxil operation function calls for umax are generated for i16/i32/i64.
-target datalayout = "e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-f16:16-f32:32-f64:64-n8:16:32:64"
-target triple = "dxil-pc-shadermodel6.7-library"
+; CHECK-LABEL:test_umax_i16
+define noundef i16 @test_umax_i16(i16 noundef %a, i16 noundef %b) {
+entry:
+; CHECK: call i16 @dx.op.binary.i16(i32 39, i16 %{{.*}}, i16 %{{.*}})
+ %0 = call i16 @llvm.umax.i16(i16 %a, i16 %b)
+ ret i16 %0
+}
; CHECK-LABEL:test_umax_i32
-; Function Attrs: noinline nounwind optnone
-define noundef i32 @test_umax_i32(i32 noundef %a, i32 noundef %b) #0 {
+define noundef i32 @test_umax_i32(i32 noundef %a, i32 noundef %b) {
entry:
-; CHECK:call i32 @dx.op.binary.i32(i32 39, i32 %{{.*}}, i32 %{{.*}})
+; CHECK: call i32 @dx.op.binary.i32(i32 39, i32 %{{.*}}, i32 %{{.*}})
%0 = call i32 @llvm.umax.i32(i32 %a, i32 %b)
ret i32 %0
}
; CHECK-LABEL:test_umax_i64
-define noundef i64 @test_umax_i64(i64 noundef %a, i64 noundef %b) #0 {
+define noundef i64 @test_umax_i64(i64 noundef %a, i64 noundef %b) {
entry:
-; CHECK:call i64 @dx.op.binary.i64(i32 39, i64 %{{.*}}, i64 %{{.*}})
+; CHECK: call i64 @dx.op.binary.i64(i32 39, i64 %{{.*}}, i64 %{{.*}})
%0 = call i64 @llvm.umax.i64(i64 %a, i64 %b)
ret i64 %0
}
-; Function Attrs: nocallback nofree nosync nounwind readnone speculatable willreturn
-declare i32 @llvm.umax.i32(i32, i32) #1
-declare i64 @llvm.umax.i64(i64, i64) #1
-
-attributes #0 = { noinline nounwind }
-attributes #1 = { nocallback nofree nosync nounwind readnone speculatable willreturn }
+declare i16 @llvm.umax.i16(i16, i16)
+declare i32 @llvm.umax.i32(i32, i32)
+declare i64 @llvm.umax.i64(i64, i64)
diff --git a/llvm/test/CodeGen/DirectX/umin.ll b/llvm/test/CodeGen/DirectX/umin.ll
new file mode 100644
index 0000000..5051c71
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/umin.ll
@@ -0,0 +1,31 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for umin are generated for i16/i32/i64.
+
+; CHECK-LABEL:test_umin_i16
+define noundef i16 @test_umin_i16(i16 noundef %a, i16 noundef %b) {
+entry:
+; CHECK: call i16 @dx.op.binary.i16(i32 40, i16 %{{.*}}, i16 %{{.*}})
+ %0 = call i16 @llvm.umin.i16(i16 %a, i16 %b)
+ ret i16 %0
+}
+
+; CHECK-LABEL:test_umin_i32
+define noundef i32 @test_umin_i32(i32 noundef %a, i32 noundef %b) {
+entry:
+; CHECK: call i32 @dx.op.binary.i32(i32 40, i32 %{{.*}}, i32 %{{.*}})
+ %0 = call i32 @llvm.umin.i32(i32 %a, i32 %b)
+ ret i32 %0
+}
+
+; CHECK-LABEL:test_umin_i64
+define noundef i64 @test_umin_i64(i64 noundef %a, i64 noundef %b) {
+entry:
+; CHECK: call i64 @dx.op.binary.i64(i32 40, i64 %{{.*}}, i64 %{{.*}})
+ %0 = call i64 @llvm.umin.i64(i64 %a, i64 %b)
+ ret i64 %0
+}
+
+declare i16 @llvm.umin.i16(i16, i16)
+declare i32 @llvm.umin.i32(i32, i32)
+declare i64 @llvm.umin.i64(i64, i64)
diff --git a/llvm/test/CodeGen/Generic/ForceStackAlign.ll b/llvm/test/CodeGen/Generic/ForceStackAlign.ll
index 2c35ad3..7993b3e 100644
--- a/llvm/test/CodeGen/Generic/ForceStackAlign.ll
+++ b/llvm/test/CodeGen/Generic/ForceStackAlign.ll
@@ -8,7 +8,7 @@
; Stack realignment not supported.
; XFAIL: target=sparc{{.*}}
-; NVPTX cannot select dynamic_stackalloc
+; NVPTX can only select dynamic_stackalloc on sm_52+ and with ptx73+
; XFAIL: target=nvptx{{.*}}
define i32 @f(ptr %p) nounwind {
diff --git a/llvm/test/CodeGen/Generic/allow-check.ll b/llvm/test/CodeGen/Generic/allow-check.ll
new file mode 100644
index 0000000..43dab68
--- /dev/null
+++ b/llvm/test/CodeGen/Generic/allow-check.ll
@@ -0,0 +1,31 @@
+; Avoid `!DL->isLittleEndian() && !CLI->enableBigEndian()` missmatch on PPC64BE.
+; REQUIRES: host-byteorder-little-endian
+
+; -global-isel=1 is unsupported.
+; XFAIL: target=nvptx{{.*}}
+; XFAIL: target=sparc{{.*}}
+; XFAIL: target=hexagon-{{.*}}
+
+; RUN: llc < %s -O3 -global-isel=0 -fast-isel=0
+; RUN: llc < %s -O3 -global-isel=1 -fast-isel=0
+; RUN: llc < %s -O3 -global-isel=0 -fast-isel=1
+
+; RUN: llc < %s -O0 -global-isel=0 -fast-isel=0
+; RUN: llc < %s -O0 -global-isel=1 -fast-isel=0
+; RUN: llc < %s -O0 -global-isel=0 -fast-isel=1
+
+define i1 @test_runtime() local_unnamed_addr {
+entry:
+ %allow = call i1 @llvm.allow.runtime.check(metadata !"test_check")
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.runtime.check(metadata) nounwind
+
+define i1 @test_ubsan() local_unnamed_addr {
+entry:
+ %allow = call i1 @llvm.allow.ubsan.check(i8 7)
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.ubsan.check(i8) nounwind
diff --git a/llvm/test/CodeGen/Generic/builtin-hot.ll b/llvm/test/CodeGen/Generic/builtin-hot.ll
deleted file mode 100644
index 449f58d..0000000
--- a/llvm/test/CodeGen/Generic/builtin-hot.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -o - %s | FileCheck %s
-
-; REQUIRES: aarch64-registered-target
-
-target triple = "aarch64-linux"
-
-define i1 @test() {
-; CHECK-LABEL: test:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w0, wzr
-; CHECK-NEXT: ret
-entry:
- %hot = call i1 @llvm.experimental.hot()
- ret i1 %hot
-}
-
-declare i1 @llvm.expect.hot() nounwind
-
diff --git a/llvm/test/CodeGen/Generic/gc-lowering.ll b/llvm/test/CodeGen/Generic/gc-lowering.ll
new file mode 100644
index 0000000..fa2e92a
--- /dev/null
+++ b/llvm/test/CodeGen/Generic/gc-lowering.ll
@@ -0,0 +1,62 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -passes='require<collector-metadata>,function(gc-lowering)' < %s | FileCheck %s
+
+declare ptr @llvm_gc_allocate(i32)
+declare void @llvm_gc_initialize(i32)
+
+declare void @llvm.gcroot(ptr, ptr)
+declare void @llvm.gcwrite(ptr, ptr, ptr)
+
+define i32 @main() gc "shadow-stack" {
+; CHECK-LABEL: define i32 @main() gc "shadow-stack" {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: store ptr null, ptr [[A]], align 8
+; CHECK-NEXT: [[B:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: store ptr null, ptr [[B]], align 8
+; CHECK-NEXT: call void @llvm_gc_initialize(i32 1048576)
+; CHECK-NEXT: call void @llvm.gcroot(ptr [[A]], ptr null)
+; CHECK-NEXT: [[APTR:%.*]] = call ptr @llvm_gc_allocate(i32 10)
+; CHECK-NEXT: store ptr [[APTR]], ptr [[A]], align 8
+; CHECK-NEXT: call void @llvm.gcroot(ptr [[B]], ptr null)
+; CHECK-NEXT: [[B_UPGRD_1:%.*]] = call ptr @llvm_gc_allocate(i32 8)
+; CHECK-NEXT: store ptr [[B_UPGRD_1]], ptr [[B]], align 8
+; CHECK-NEXT: [[B_1:%.*]] = load ptr, ptr [[B]], align 8
+; CHECK-NEXT: [[A_1:%.*]] = load ptr, ptr [[A]], align 8
+; CHECK-NEXT: store ptr [[A_1]], ptr [[B_1]], align 8
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ %A = alloca ptr
+ %B = alloca ptr
+
+ call void @llvm_gc_initialize(i32 1048576) ; Start with 1MB heap
+
+ ;; ptr A;
+ call void @llvm.gcroot(ptr %A, ptr null)
+
+ ;; A = gcalloc(10);
+ %Aptr = call ptr @llvm_gc_allocate(i32 10)
+ store ptr %Aptr, ptr %A
+
+ ;; ptr B;
+ call void @llvm.gcroot(ptr %B, ptr null)
+
+ ;; B = gcalloc(4);
+ %B.upgrd.1 = call ptr @llvm_gc_allocate(i32 8)
+ store ptr %B.upgrd.1, ptr %B
+
+ ;; *B = A;
+ %B.1 = load ptr, ptr %B
+ %A.1 = load ptr, ptr %A
+ call void @llvm.gcwrite(ptr %A.1, ptr %B.upgrd.1, ptr %B.1)
+
+ ret i32 0
+}
+
+define void @no_gc() {
+; CHECK-LABEL: define void @no_gc() {
+; CHECK-NEXT: ret void
+;
+ ret void
+}
diff --git a/llvm/test/CodeGen/Hexagon/addrmode-immop.mir b/llvm/test/CodeGen/Hexagon/addrmode-immop.mir
index 3069cbe..1412d31 100644
--- a/llvm/test/CodeGen/Hexagon/addrmode-immop.mir
+++ b/llvm/test/CodeGen/Hexagon/addrmode-immop.mir
@@ -15,7 +15,7 @@
; Function Attrs: norecurse
define void @f0() #0 {
b0:
- %v0 = load ptr, ptr getelementptr (i8, ptr getelementptr inbounds ({ [3 x ptr], [3 x ptr] }, ptr @g0, i32 0, inrange i32 0, i32 3), i32 sub (i32 ptrtoint (ptr @f1 to i32), i32 1)), align 4
+ %v0 = load ptr, ptr getelementptr (i8, ptr getelementptr inbounds ({ [3 x ptr], [3 x ptr] }, ptr @g0, i32 0, i32 0, i32 3), i32 sub (i32 ptrtoint (ptr @f1 to i32), i32 1)), align 4
%v1 = call i32 %v0(ptr nonnull undef)
unreachable
}
@@ -33,7 +33,7 @@ tracksRegLiveness: true
body: |
bb.0.b0:
$r2 = A2_tfrsi @g0 + 12
- $r2 = L2_loadri_io killed $r2, @f1 - 1 :: (load (s32) from `ptr getelementptr (i8, ptr getelementptr inbounds ({ [3 x ptr], [3 x ptr] }, ptr @g0, i32 0, inrange i32 0, i32 3), i32 sub (i32 ptrtoint (ptr @f1 to i32), i32 1))`)
+ $r2 = L2_loadri_io killed $r2, @f1 - 1 :: (load (s32) from `ptr getelementptr (i8, ptr getelementptr inbounds ({ [3 x ptr], [3 x ptr] }, ptr @g0, i32 0, i32 0, i32 3), i32 sub (i32 ptrtoint (ptr @f1 to i32), i32 1))`)
ADJCALLSTACKDOWN 0, 0, implicit-def $r29, implicit-def dead $r30, implicit $r31, implicit $r30, implicit $r29
PS_callr_nr killed $r2, hexagoncsr, implicit undef $r0, implicit-def $r29, implicit-def dead $r0
ADJCALLSTACKUP 0, 0, implicit-def dead $r29, implicit-def dead $r30, implicit-def dead $r31, implicit $r29
diff --git a/llvm/test/CodeGen/Hexagon/build-attributes.ll b/llvm/test/CodeGen/Hexagon/build-attributes.ll
new file mode 100644
index 0000000..48ee31a
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/build-attributes.ll
@@ -0,0 +1,16 @@
+;; Generate build attributes from llc.
+
+; RUN: llc -mtriple=hexagon-unknown-elf \
+; RUN: -mattr=+hvxv73,+cabac,+v71,+hvx-ieee-fp,+hvx-length128b %s -o - | FileCheck %s
+
+; CHECK: .attribute 4, 71 // Tag_arch
+; CHECK-NEXT: .attribute 5, 73 // Tag_hvx_arch
+; CHECK-NEXT: .attribute 6, 1 // Tag_hvx_ieeefp
+; CHECK-NEXT: .attribute 7, 1 // Tag_hvx_qfloat
+; CHECK-NEXT: .attribute 8, 1 // Tag_zreg
+; CHECK-NEXT: .attribute 10, 1 // Tag_cabac
+
+define i32 @addi(i32 %a) {
+ %1 = add i32 %a, 1
+ ret i32 %1
+} \ No newline at end of file
diff --git a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll
index 0771fda..7ccee16 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -hexagon-vlcr | opt -passes=adce -S | FileCheck %s
+; RUN: opt -mtriple=hexagon-- -passes='loop(hexagon-vlcr),adce' -S %s | FileCheck %s
; CHECK: %.hexagon.vlcr = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B
; ModuleID = 'hexagon_vector_loop_carried_reuse.c'
diff --git a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll
index 25afb9f..532f7fd 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -march=hexagon -hexagon-vlcr | opt -passes=adce -S | FileCheck %s
+; RUN: opt -mtriple hexagon-- -passes='loop(hexagon-vlcr),adce' -S %s | FileCheck %s
; CHECK: %v32.hexagon.vlcr = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B
diff --git a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll
index 5397342..ecfcf53 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -hexagon-vlcr | opt -passes=adce -S | FileCheck %s
+; RUN: opt -mtriple=hexagon-- -passes='loop(hexagon-vlcr),adce' -S %s | FileCheck %s
; CHECK-NOT: %.hexagon.vlcr
; ModuleID = 'hexagon_vector_loop_carried_reuse.c'
diff --git a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_invalid.ll b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_invalid.ll
index b440dba..9872fae 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_invalid.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_invalid.ll
@@ -1,4 +1,4 @@
-; RUN: opt -hexagon-vlcr < %s -S | FileCheck %s
+; RUN: opt -mtriple=hexagon-- -passes=hexagon-vlcr -S %s | FileCheck %s
; Test that reuse doesn't occur due to two shufflevectors with different masks.
diff --git a/llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll b/llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll
index ab7bf1b..c53e578 100644
--- a/llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll
+++ b/llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll
@@ -1,4 +1,5 @@
; RUN: opt -march=hexagon -hexagon-loop-idiom -S < %s | FileCheck %s
+; RUN: opt -mtriple=hexagon-- -p hexagon-loop-idiom -disable-memcpy-idiom -S < %s | FileCheck %s
; Make sure we don't convert load/store loops into memcpy if the access type
; is a vector. Using vector instructions is generally better in such cases.
diff --git a/llvm/test/CodeGen/Hexagon/livephysregs-regmask-clobber.mir b/llvm/test/CodeGen/Hexagon/livephysregs-regmask-clobber.mir
index 8f1cb42..5221307 100644
--- a/llvm/test/CodeGen/Hexagon/livephysregs-regmask-clobber.mir
+++ b/llvm/test/CodeGen/Hexagon/livephysregs-regmask-clobber.mir
@@ -17,6 +17,8 @@
name: f0
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
stack:
- { id: 0, offset: 0, size: 128, alignment: 128 }
- { id: 1, offset: 128, size: 128, alignment: 128 }
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll
index c711026..5ace9e6 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll
@@ -1,6 +1,8 @@
; Check for recognizing the "memmove" idiom.
; RUN: opt -hexagon-loop-idiom -S -mtriple hexagon-unknown-elf < %s \
; RUN: | FileCheck %s
+; RUN: opt -p hexagon-loop-idiom -S -mtriple hexagon-unknown-elf < %s \
+; RUN: | FileCheck %s
; CHECK: call void @llvm.memmove
; Function Attrs: norecurse nounwind
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll
index 234e4f5..ed56a33 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll
@@ -1,5 +1,7 @@
; RUN: opt -hexagon-loop-idiom -S -mtriple hexagon-unknown-elf < %s \
; RUN: | FileCheck %s
+; RUN: opt -p hexagon-loop-idiom -S -mtriple hexagon-unknown-elf < %s \
+; RUN: | FileCheck %s
define void @PR14241(ptr %s, i64 %size) #0 {
; Ensure that we don't form a memcpy for strided loops. Briefly, when we taught
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll
index 140c676..e5bcc2b 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -hexagon-loop-idiom < %s | opt -S -passes='loop(loop-deletion),gvn'
+; RUN: opt -mtriple hexagon-- -S -passes='loop(hexagon-loop-idiom,loop-deletion),gvn'
; REQUIRES: asserts
; This tests that the HexagonLoopIdiom pass does not mark LCSSA information
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll
index 7a7d1d9..78f0c9e 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll
@@ -1,4 +1,5 @@
; RUN: opt -hexagon-loop-idiom -S < %s | FileCheck %s
+; RUN: opt -p hexagon-loop-idiom -S < %s | FileCheck %s
; Make sure that we generate correct runtime checks.
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll
index 37e1bb6..ce02b62 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll
@@ -1,4 +1,5 @@
; RUN: opt -hexagon-loop-idiom -mtriple hexagon-unknown-elf < %s
+; RUN: opt -p hexagon-loop-idiom -mtriple hexagon-unknown-elf < %s
; REQUIRES: asserts
target triple = "hexagon"
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll
index 1934ced..74c02d6 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll
@@ -1,4 +1,5 @@
; RUN: opt -march=hexagon -hexagon-loop-idiom -S < %s | FileCheck %s
+; RUN: opt -march=hexagon -p hexagon-loop-idiom -S < %s | FileCheck %s
; CHECK-LABEL: define void @fred
; Check that this test does not crash.
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-long-loop.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-long-loop.ll
index b25010f..94b0c96 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-long-loop.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-long-loop.ll
@@ -1,4 +1,5 @@
; RUN: opt -march=hexagon -hexagon-loop-idiom -S < %s | FileCheck %s
+; RUN: opt -march=hexagon -p hexagon-loop-idiom -S < %s | FileCheck %s
;
; The number of nested selects caused the simplification loop to take
; more than the maximum number of iterations. This caused the compiler
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll
index e4b2b5a..a00b1d5 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll
@@ -1,4 +1,5 @@
; RUN: opt -march=hexagon -hexagon-loop-idiom -S < %s | FileCheck %s
+; RUN: opt -march=hexagon -p hexagon-loop-idiom -S < %s | FileCheck %s
; REQUIRES: asserts
;
; Check for sane output, this used to crash.
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy.ll
index 781618e..2461e1c 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy.ll
@@ -1,5 +1,7 @@
; RUN: opt -hexagon-loop-idiom < %s -mtriple=hexagon-unknown-unknown -S \
; RUN: | FileCheck %s
+; RUN: opt -p hexagon-loop-idiom < %s -mtriple=hexagon-unknown-unknown -S \
+; RUN: | FileCheck %s
target triple = "hexagon"
diff --git a/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir b/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir
index 67f4dd7..9468b18 100644
--- a/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir
+++ b/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir
@@ -135,7 +135,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 0
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
maxCallFrameSize: 0
hasOpaqueSPAdjustment: false
diff --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-permi.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-permi.ll
index 92669d2..0d9f9da 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-permi.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-permi.ll
@@ -36,33 +36,3 @@ entry:
%res = call <32 x i8> @llvm.loongarch.lasx.xvpermi.q(<32 x i8> %va, <32 x i8> %vb, i32 1)
ret <32 x i8> %res
}
-
-define <32 x i8> @lasx_xvpermi_q_204(<32 x i8> %va, <32 x i8> %vb) nounwind {
-; CHECK-LABEL: lasx_xvpermi_q_204:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xvpermi.q $xr0, $xr1, 0
-; CHECK-NEXT: ret
-entry:
- %res = call <32 x i8> @llvm.loongarch.lasx.xvpermi.q(<32 x i8> %va, <32 x i8> %vb, i32 204)
- ret <32 x i8> %res
-}
-
-define <32 x i8> @lasx_xvpermi_q_221(<32 x i8> %va, <32 x i8> %vb) nounwind {
-; CHECK-LABEL: lasx_xvpermi_q_221:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xvpermi.q $xr0, $xr1, 17
-; CHECK-NEXT: ret
-entry:
- %res = call <32 x i8> @llvm.loongarch.lasx.xvpermi.q(<32 x i8> %va, <32 x i8> %vb, i32 221)
- ret <32 x i8> %res
-}
-
-define <32 x i8> @lasx_xvpermi_q_255(<32 x i8> %va, <32 x i8> %vb) nounwind {
-; CHECK-LABEL: lasx_xvpermi_q_255:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xvpermi.q $xr0, $xr1, 51
-; CHECK-NEXT: ret
-entry:
- %res = call <32 x i8> @llvm.loongarch.lasx.xvpermi.q(<32 x i8> %va, <32 x i8> %vb, i32 255)
- ret <32 x i8> %res
-}
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll
index 25106b4..6629d34 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll
@@ -123,9 +123,10 @@ define void @insert_32xi8_idx(ptr %src, ptr %dst, i8 %in, i32 %idx) nounwind {
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 4, 0
-; CHECK-NEXT: st.b $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 4, 0
+; CHECK-NEXT: st.b $a2, $a3, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
@@ -149,9 +150,10 @@ define void @insert_16xi16_idx(ptr %src, ptr %dst, i16 %in, i32 %idx) nounwind {
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 4, 1
-; CHECK-NEXT: st.h $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 4, 1
+; CHECK-NEXT: st.h $a2, $a3, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
@@ -175,9 +177,10 @@ define void @insert_8xi32_idx(ptr %src, ptr %dst, i32 %in, i32 %idx) nounwind {
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 4, 2
-; CHECK-NEXT: st.w $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 4, 2
+; CHECK-NEXT: st.w $a2, $a3, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
@@ -201,9 +204,10 @@ define void @insert_4xi64_idx(ptr %src, ptr %dst, i64 %in, i32 %idx) nounwind {
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 4, 3
-; CHECK-NEXT: st.d $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 4, 3
+; CHECK-NEXT: st.d $a2, $a3, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
@@ -227,9 +231,10 @@ define void @insert_8xfloat_idx(ptr %src, ptr %dst, float %in, i32 %idx) nounwin
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr1, $a0, 0
; CHECK-NEXT: xvst $xr1, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a2, 4, 2
-; CHECK-NEXT: fst.s $fa0, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0
+; CHECK-NEXT: addi.d $a2, $sp, 0
+; CHECK-NEXT: bstrins.d $a2, $a0, 4, 2
+; CHECK-NEXT: fst.s $fa0, $a2, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
@@ -253,9 +258,10 @@ define void @insert_4xdouble_idx(ptr %src, ptr %dst, double %in, i32 %idx) nounw
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr1, $a0, 0
; CHECK-NEXT: xvst $xr1, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a2, 4, 3
-; CHECK-NEXT: fst.d $fa0, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0
+; CHECK-NEXT: addi.d $a2, $sp, 0
+; CHECK-NEXT: bstrins.d $a2, $a0, 4, 3
+; CHECK-NEXT: fst.d $fa0, $a2, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll
index 7f23207..19171b7 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll
@@ -87,9 +87,10 @@ define void @insert_16xi8_idx(ptr %src, ptr %dst, i8 %ins, i32 %idx) nounwind {
; CHECK-NEXT: addi.d $sp, $sp, -16
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vst $vr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 3, 0
-; CHECK-NEXT: st.b $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 3, 0
+; CHECK-NEXT: st.b $a2, $a3, 0
; CHECK-NEXT: vld $vr0, $sp, 0
; CHECK-NEXT: vst $vr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $sp, 16
@@ -106,9 +107,10 @@ define void @insert_8xi16_idx(ptr %src, ptr %dst, i16 %ins, i32 %idx) nounwind {
; CHECK-NEXT: addi.d $sp, $sp, -16
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vst $vr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 3, 1
-; CHECK-NEXT: st.h $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 3, 1
+; CHECK-NEXT: st.h $a2, $a3, 0
; CHECK-NEXT: vld $vr0, $sp, 0
; CHECK-NEXT: vst $vr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $sp, 16
@@ -125,9 +127,10 @@ define void @insert_4xi32_idx(ptr %src, ptr %dst, i32 %ins, i32 %idx) nounwind {
; CHECK-NEXT: addi.d $sp, $sp, -16
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vst $vr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 3, 2
-; CHECK-NEXT: st.w $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 3, 2
+; CHECK-NEXT: st.w $a2, $a3, 0
; CHECK-NEXT: vld $vr0, $sp, 0
; CHECK-NEXT: vst $vr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $sp, 16
@@ -144,9 +147,10 @@ define void @insert_2xi64_idx(ptr %src, ptr %dst, i64 %ins, i32 %idx) nounwind {
; CHECK-NEXT: addi.d $sp, $sp, -16
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vst $vr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 3, 3
-; CHECK-NEXT: st.d $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 3, 3
+; CHECK-NEXT: st.d $a2, $a3, 0
; CHECK-NEXT: vld $vr0, $sp, 0
; CHECK-NEXT: vst $vr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $sp, 16
@@ -163,9 +167,10 @@ define void @insert_4xfloat_idx(ptr %src, ptr %dst, float %ins, i32 %idx) nounwi
; CHECK-NEXT: addi.d $sp, $sp, -16
; CHECK-NEXT: vld $vr1, $a0, 0
; CHECK-NEXT: vst $vr1, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a2, 3, 2
-; CHECK-NEXT: fst.s $fa0, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0
+; CHECK-NEXT: addi.d $a2, $sp, 0
+; CHECK-NEXT: bstrins.d $a2, $a0, 3, 2
+; CHECK-NEXT: fst.s $fa0, $a2, 0
; CHECK-NEXT: vld $vr0, $sp, 0
; CHECK-NEXT: vst $vr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $sp, 16
@@ -182,9 +187,10 @@ define void @insert_2xdouble_idx(ptr %src, ptr %dst, double %ins, i32 %idx) noun
; CHECK-NEXT: addi.d $sp, $sp, -16
; CHECK-NEXT: vld $vr1, $a0, 0
; CHECK-NEXT: vst $vr1, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a2, 3, 3
-; CHECK-NEXT: fst.d $fa0, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0
+; CHECK-NEXT: addi.d $a2, $sp, 0
+; CHECK-NEXT: bstrins.d $a2, $a0, 3, 3
+; CHECK-NEXT: fst.d $fa0, $a2, 0
; CHECK-NEXT: vld $vr0, $sp, 0
; CHECK-NEXT: vst $vr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $sp, 16
diff --git a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir
deleted file mode 100644
index d66dd10..0000000
--- a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir
+++ /dev/null
@@ -1,10 +0,0 @@
-# RUN: not llc -mtriple=aarch64-- -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
-# When a low-level type is 0 bits
----
-name: test_scalar_size_0
-body: |
- bb.0:
- liveins: $x0
- ; CHECK: [[@LINE+1]]:10: invalid size for scalar type
- %0:_(s0) = G_IMPLICIT_DEF
-...
diff --git a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir
index 6985687..632e5fa 100644
--- a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir
@@ -5,6 +5,6 @@ name: test_vector_element_size_0
body: |
bb.0:
liveins: $x0
- ; CHECK: [[@LINE+1]]:15: invalid size for scalar type
+ ; CHECK: [[@LINE+1]]:15: invalid size for scalar element in vector
%0:_(<2 x s0>) = G_IMPLICIT_DEF
...
diff --git a/llvm/test/CodeGen/MIR/AMDGPU/stack-id-assert.mir b/llvm/test/CodeGen/MIR/AMDGPU/stack-id-assert.mir
index e40d187..9831f78 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/stack-id-assert.mir
+++ b/llvm/test/CodeGen/MIR/AMDGPU/stack-id-assert.mir
@@ -29,6 +29,8 @@ liveins:
- { reg: '$vgpr0', virtual-reg: '' }
- { reg: '$vgpr1', virtual-reg: '' }
- { reg: '$vgpr2', virtual-reg: '' }
+frameInfo:
+ adjustsStack: true
stack:
- { id: 0, name: '', type: spill-slot, offset: 0, size: 8, alignment: 4,
stack-id: sgpr-spill, callee-saved-register: '', callee-saved-restored: true,
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/trap.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/trap.mir
index 6438893..dc99ce8 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/trap.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/trap.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
+# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
--- |
declare void @llvm.trap()
@@ -9,12 +9,15 @@
---
name: f
alignment: 4
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
; MIPS32-LABEL: name: f
; MIPS32: TRAP
- ; MIPS32: RetRA
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; MIPS32-NEXT: RetRA
+ G_TRAP
RetRA
...
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir
index 52352ed..e471e10 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir
@@ -220,10 +220,12 @@ body: |
; MIPS32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY3]], [[COPY1]]
; MIPS32-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY1]]
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY]]
; MIPS32-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
- ; MIPS32-NEXT: $v0 = COPY [[ADD2]](s32)
- ; MIPS32-NEXT: $v1 = COPY [[ADD]](s32)
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
+ ; MIPS32-NEXT: $v0 = COPY [[COPY5]](s32)
+ ; MIPS32-NEXT: $v1 = COPY [[COPY4]](s32)
; MIPS32-NEXT: RetRA implicit $v0, implicit $v1
%2:_(s32) = COPY $a0
%3:_(s32) = COPY $a1
@@ -268,6 +270,7 @@ body: |
; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p0) :: (load (s32) from %fixed-stack.3)
; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[COPY]]
; MIPS32-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY]]
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[LOAD1]], [[COPY1]]
; MIPS32-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), [[LOAD1]]
; MIPS32-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
@@ -275,6 +278,7 @@ body: |
; MIPS32-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[C]]
; MIPS32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP2]], [[ICMP]]
; MIPS32-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ICMP1]], [[AND]]
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
; MIPS32-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[LOAD2]], [[COPY2]]
; MIPS32-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD3]](s32), [[LOAD2]]
; MIPS32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
@@ -283,13 +287,15 @@ body: |
; MIPS32-NEXT: [[ICMP4:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD4]](s32), [[C]]
; MIPS32-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ICMP4]], [[OR]]
; MIPS32-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ICMP3]], [[AND2]]
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ADD4]](s32)
; MIPS32-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[LOAD3]], [[COPY3]]
; MIPS32-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[OR1]], [[C1]]
; MIPS32-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[ADD5]], [[AND3]]
- ; MIPS32-NEXT: $v0 = COPY [[ADD]](s32)
- ; MIPS32-NEXT: $v1 = COPY [[ADD2]](s32)
- ; MIPS32-NEXT: $a0 = COPY [[ADD4]](s32)
- ; MIPS32-NEXT: $a1 = COPY [[ADD6]](s32)
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ADD6]](s32)
+ ; MIPS32-NEXT: $v0 = COPY [[COPY4]](s32)
+ ; MIPS32-NEXT: $v1 = COPY [[COPY5]](s32)
+ ; MIPS32-NEXT: $a0 = COPY [[COPY6]](s32)
+ ; MIPS32-NEXT: $a1 = COPY [[COPY7]](s32)
; MIPS32-NEXT: RetRA implicit $v0, implicit $v1, implicit $a0, implicit $a1
%2:_(s32) = COPY $a0
%3:_(s32) = COPY $a1
@@ -331,10 +337,11 @@ body: |
; MIPS32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $a3
; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
; MIPS32-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY1]]
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; MIPS32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; MIPS32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C]]
; MIPS32-NEXT: G_STORE [[AND]](s32), [[COPY3]](p0) :: (store (s8) into %ir.pcarry_flag)
- ; MIPS32-NEXT: G_STORE [[ADD]](s32), [[COPY2]](p0) :: (store (s32) into %ir.padd)
+ ; MIPS32-NEXT: G_STORE [[COPY4]](s32), [[COPY2]](p0) :: (store (s32) into %ir.padd)
; MIPS32-NEXT: RetRA
%0:_(s32) = COPY $a0
%1:_(s32) = COPY $a1
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/ctpop.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/ctpop.mir
index 136c039..f518e9e 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/ctpop.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/ctpop.mir
@@ -10,29 +10,30 @@ body: |
; MIPS32-LABEL: name: ctpop_i32
; MIPS32: liveins: $a0
- ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
- ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
- ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1431655765
- ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C1]]
- ; MIPS32: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[AND]]
- ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; MIPS32: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[SUB]], [[C2]](s32)
- ; MIPS32: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 858993459
- ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C3]]
- ; MIPS32: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SUB]], [[C3]]
- ; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[AND1]], [[AND2]]
- ; MIPS32: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; MIPS32: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[ADD]], [[C4]](s32)
- ; MIPS32: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[LSHR2]], [[ADD]]
- ; MIPS32: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
- ; MIPS32: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C5]]
- ; MIPS32: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; MIPS32: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
- ; MIPS32: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; MIPS32: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C7]](s32)
- ; MIPS32: $v0 = COPY [[LSHR3]](s32)
- ; MIPS32: RetRA implicit $v0
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1431655765
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C1]]
+ ; MIPS32-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[AND]]
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[SUB]], [[C2]](s32)
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 858993459
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C3]]
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SUB]], [[C3]]
+ ; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[AND1]], [[AND2]]
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[ADD]], [[C4]](s32)
+ ; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[LSHR2]], [[ADD]]
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C5]]
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
+ ; MIPS32-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; MIPS32-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
+ ; MIPS32-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C7]](s32)
+ ; MIPS32-NEXT: $v0 = COPY [[LSHR3]](s32)
+ ; MIPS32-NEXT: RetRA implicit $v0
%0:_(s32) = COPY $a0
%1:_(s32) = G_CTPOP %0(s32)
$v0 = COPY %1(s32)
@@ -49,45 +50,46 @@ body: |
; MIPS32-LABEL: name: ctpop_i64
; MIPS32: liveins: $a0, $a1
- ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
- ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
- ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1431655765
- ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C1]]
- ; MIPS32: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[AND]]
- ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; MIPS32: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[SUB]], [[C2]](s32)
- ; MIPS32: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 858993459
- ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C3]]
- ; MIPS32: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SUB]], [[C3]]
- ; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[AND1]], [[AND2]]
- ; MIPS32: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; MIPS32: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[ADD]], [[C4]](s32)
- ; MIPS32: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[LSHR2]], [[ADD]]
- ; MIPS32: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
- ; MIPS32: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C5]]
- ; MIPS32: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; MIPS32: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
- ; MIPS32: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; MIPS32: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C7]](s32)
- ; MIPS32: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
- ; MIPS32: [[AND4:%[0-9]+]]:_(s32) = G_AND [[LSHR4]], [[C1]]
- ; MIPS32: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[AND4]]
- ; MIPS32: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[SUB1]], [[C2]](s32)
- ; MIPS32: [[AND5:%[0-9]+]]:_(s32) = G_AND [[LSHR5]], [[C3]]
- ; MIPS32: [[AND6:%[0-9]+]]:_(s32) = G_AND [[SUB1]], [[C3]]
- ; MIPS32: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[AND5]], [[AND6]]
- ; MIPS32: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[ADD2]], [[C4]](s32)
- ; MIPS32: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[LSHR6]], [[ADD2]]
- ; MIPS32: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ADD3]], [[C5]]
- ; MIPS32: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C6]]
- ; MIPS32: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C7]](s32)
- ; MIPS32: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[LSHR7]], [[LSHR3]]
- ; MIPS32: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; MIPS32: $v0 = COPY [[ADD4]](s32)
- ; MIPS32: $v1 = COPY [[C8]](s32)
- ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1431655765
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C1]]
+ ; MIPS32-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[AND]]
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[SUB]], [[C2]](s32)
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 858993459
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C3]]
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SUB]], [[C3]]
+ ; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[AND1]], [[AND2]]
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[ADD]], [[C4]](s32)
+ ; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[LSHR2]], [[ADD]]
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C5]]
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
+ ; MIPS32-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; MIPS32-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
+ ; MIPS32-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C7]](s32)
+ ; MIPS32-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
+ ; MIPS32-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[LSHR4]], [[C1]]
+ ; MIPS32-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[AND4]]
+ ; MIPS32-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[SUB1]], [[C2]](s32)
+ ; MIPS32-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[LSHR5]], [[C3]]
+ ; MIPS32-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[SUB1]], [[C3]]
+ ; MIPS32-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[AND5]], [[AND6]]
+ ; MIPS32-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[ADD2]], [[C4]](s32)
+ ; MIPS32-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[LSHR6]], [[ADD2]]
+ ; MIPS32-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ADD3]], [[C5]]
+ ; MIPS32-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C6]]
+ ; MIPS32-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C7]](s32)
+ ; MIPS32-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[LSHR7]], [[LSHR3]]
+ ; MIPS32-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; MIPS32-NEXT: $v0 = COPY [[ADD4]](s32)
+ ; MIPS32-NEXT: $v1 = COPY [[C8]](s32)
+ ; MIPS32-NEXT: RetRA implicit $v0, implicit $v1
%1:_(s32) = COPY $a0
%2:_(s32) = COPY $a1
%0:_(s64) = G_MERGE_VALUES %1(s32), %2(s32)
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/cttz.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/cttz.mir
index 3e7bcdc..a06bb6d 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/cttz.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/cttz.mir
@@ -139,9 +139,11 @@ body: |
; MIPS32-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[ADD1]], [[SUB1]]
; MIPS32-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[SELECT]], [[C]]
; MIPS32-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD3]](s32), [[C]]
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD3]](s32)
; MIPS32-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[C1]], [[C1]]
; MIPS32-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[ICMP1]]
- ; MIPS32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[ADD3]](s32), [[ADD5]](s32)
+ ; MIPS32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ADD5]](s32)
+ ; MIPS32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
; MIPS32-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[C1]]
; MIPS32-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[COPY1]], [[C1]]
; MIPS32-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[XOR2]], [[XOR3]]
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir
index 7ad286b..674d7b6 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir
@@ -275,8 +275,10 @@ body: |
; MIPS32-NEXT: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[LOAD]], [[COPY]]
; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL1]], [[MUL2]]
; MIPS32-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[MUL2]]
- ; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[UMULH]]
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY4]], [[UMULH]]
; MIPS32-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), [[UMULH]]
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD1]](s32)
; MIPS32-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ICMP]], [[ICMP1]]
; MIPS32-NEXT: [[MUL3:%[0-9]+]]:_(s32) = G_MUL [[LOAD2]], [[COPY]]
; MIPS32-NEXT: [[MUL4:%[0-9]+]]:_(s32) = G_MUL [[LOAD1]], [[COPY1]]
@@ -285,17 +287,22 @@ body: |
; MIPS32-NEXT: [[UMULH2:%[0-9]+]]:_(s32) = G_UMULH [[LOAD]], [[COPY1]]
; MIPS32-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[MUL3]], [[MUL4]]
; MIPS32-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD3]](s32), [[MUL4]]
- ; MIPS32-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[MUL5]]
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ADD3]](s32)
+ ; MIPS32-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[COPY6]], [[MUL5]]
; MIPS32-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD4]](s32), [[MUL5]]
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ADD4]](s32)
; MIPS32-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ICMP2]], [[ICMP3]]
- ; MIPS32-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[UMULH1]]
+ ; MIPS32-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[COPY7]], [[UMULH1]]
; MIPS32-NEXT: [[ICMP4:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD6]](s32), [[UMULH1]]
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[ADD6]](s32)
; MIPS32-NEXT: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[ADD5]], [[ICMP4]]
- ; MIPS32-NEXT: [[ADD8:%[0-9]+]]:_(s32) = G_ADD [[ADD6]], [[UMULH2]]
+ ; MIPS32-NEXT: [[ADD8:%[0-9]+]]:_(s32) = G_ADD [[COPY8]], [[UMULH2]]
; MIPS32-NEXT: [[ICMP5:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD8]](s32), [[UMULH2]]
+ ; MIPS32-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[ADD8]](s32)
; MIPS32-NEXT: [[ADD9:%[0-9]+]]:_(s32) = G_ADD [[ADD7]], [[ICMP5]]
- ; MIPS32-NEXT: [[ADD10:%[0-9]+]]:_(s32) = G_ADD [[ADD8]], [[ADD2]]
+ ; MIPS32-NEXT: [[ADD10:%[0-9]+]]:_(s32) = G_ADD [[COPY9]], [[ADD2]]
; MIPS32-NEXT: [[ICMP6:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD10]](s32), [[ADD2]]
+ ; MIPS32-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY [[ADD10]](s32)
; MIPS32-NEXT: [[ADD11:%[0-9]+]]:_(s32) = G_ADD [[ADD9]], [[ICMP6]]
; MIPS32-NEXT: [[MUL6:%[0-9]+]]:_(s32) = G_MUL [[LOAD3]], [[COPY]]
; MIPS32-NEXT: [[MUL7:%[0-9]+]]:_(s32) = G_MUL [[LOAD2]], [[COPY1]]
@@ -312,8 +319,8 @@ body: |
; MIPS32-NEXT: [[ADD17:%[0-9]+]]:_(s32) = G_ADD [[ADD16]], [[UMULH5]]
; MIPS32-NEXT: [[ADD18:%[0-9]+]]:_(s32) = G_ADD [[ADD17]], [[ADD11]]
; MIPS32-NEXT: $v0 = COPY [[MUL]](s32)
- ; MIPS32-NEXT: $v1 = COPY [[ADD1]](s32)
- ; MIPS32-NEXT: $a0 = COPY [[ADD10]](s32)
+ ; MIPS32-NEXT: $v1 = COPY [[COPY5]](s32)
+ ; MIPS32-NEXT: $a0 = COPY [[COPY10]](s32)
; MIPS32-NEXT: $a1 = COPY [[ADD18]](s32)
; MIPS32-NEXT: RetRA implicit $v0, implicit $v1, implicit $a0, implicit $a1
%2:_(s32) = COPY $a0
@@ -359,23 +366,28 @@ body: |
; MIPS32-NEXT: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[COPY2]], [[COPY]]
; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL]], [[MUL1]]
; MIPS32-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[MUL1]]
- ; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[UMULH]]
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY4]], [[UMULH]]
; MIPS32-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), [[UMULH]]
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD1]](s32)
; MIPS32-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ICMP]], [[ICMP1]]
; MIPS32-NEXT: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[COPY3]], [[COPY1]]
; MIPS32-NEXT: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH [[COPY3]], [[COPY]]
; MIPS32-NEXT: [[UMULH2:%[0-9]+]]:_(s32) = G_UMULH [[COPY2]], [[COPY1]]
; MIPS32-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[MUL2]], [[UMULH1]]
; MIPS32-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD3]](s32), [[UMULH1]]
- ; MIPS32-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[UMULH2]]
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ADD3]](s32)
+ ; MIPS32-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[COPY6]], [[UMULH2]]
; MIPS32-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD4]](s32), [[UMULH2]]
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ADD4]](s32)
; MIPS32-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ICMP2]], [[ICMP3]]
- ; MIPS32-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[ADD2]]
+ ; MIPS32-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[COPY7]], [[ADD2]]
; MIPS32-NEXT: [[ICMP4:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD6]](s32), [[ADD2]]
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[ADD6]](s32)
; MIPS32-NEXT: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[ADD5]], [[ICMP4]]
; MIPS32-NEXT: [[UMULH3:%[0-9]+]]:_(s32) = G_UMULH [[COPY3]], [[COPY1]]
; MIPS32-NEXT: [[ADD8:%[0-9]+]]:_(s32) = G_ADD [[UMULH3]], [[ADD7]]
- ; MIPS32-NEXT: $v0 = COPY [[ADD6]](s32)
+ ; MIPS32-NEXT: $v0 = COPY [[COPY8]](s32)
; MIPS32-NEXT: $v1 = COPY [[ADD8]](s32)
; MIPS32-NEXT: RetRA implicit $v0, implicit $v1
%2:_(s32) = COPY $a0
diff --git a/llvm/test/CodeGen/Mips/atomic-min-max.ll b/llvm/test/CodeGen/Mips/atomic-min-max.ll
index bc3643f..a96581bd 100644
--- a/llvm/test/CodeGen/Mips/atomic-min-max.ll
+++ b/llvm/test/CodeGen/Mips/atomic-min-max.ll
@@ -2146,6 +2146,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS32-NEXT: $BB6_1: # %entry
; MIPS32-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS32-NEXT: ll $2, 0($6)
+; MIPS32-NEXT: and $2, $2, $8
+; MIPS32-NEXT: and $7, $7, $8
; MIPS32-NEXT: sltu $5, $2, $7
; MIPS32-NEXT: move $3, $2
; MIPS32-NEXT: movn $3, $7, $5
@@ -2186,6 +2188,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MIPSEL-NEXT: $BB6_1: # %entry
; MIPSEL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSEL-NEXT: ll $2, 0($6)
+; MIPSEL-NEXT: and $2, $2, $8
+; MIPSEL-NEXT: and $7, $7, $8
; MIPSEL-NEXT: sltu $5, $2, $7
; MIPSEL-NEXT: move $3, $2
; MIPSEL-NEXT: movn $3, $7, $5
@@ -2225,6 +2229,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MIPSELR6-NEXT: $BB6_1: # %entry
; MIPSELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSELR6-NEXT: ll $2, 0($6)
+; MIPSELR6-NEXT: and $2, $2, $8
+; MIPSELR6-NEXT: and $7, $7, $8
; MIPSELR6-NEXT: sltu $5, $2, $7
; MIPSELR6-NEXT: seleqz $3, $2, $5
; MIPSELR6-NEXT: selnez $5, $7, $5
@@ -2263,6 +2269,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MMEL-NEXT: $BB6_1: # %entry
; MMEL-NEXT: # =>This Inner Loop Header: Depth=1
; MMEL-NEXT: ll $2, 0($6)
+; MMEL-NEXT: and $2, $2, $8
+; MMEL-NEXT: and $7, $7, $8
; MMEL-NEXT: sltu $5, $2, $7
; MMEL-NEXT: or $3, $2, $zero
; MMEL-NEXT: movn $3, $7, $5
@@ -2300,6 +2308,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MMELR6-NEXT: $BB6_1: # %entry
; MMELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MMELR6-NEXT: ll $2, 0($6)
+; MMELR6-NEXT: and $2, $2, $8
+; MMELR6-NEXT: and $7, $7, $8
; MMELR6-NEXT: sltu $5, $2, $7
; MMELR6-NEXT: seleqz $3, $2, $5
; MMELR6-NEXT: selnez $5, $7, $5
@@ -2417,6 +2427,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS64EL-NEXT: .LBB6_1: # %entry
; MIPS64EL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EL-NEXT: ll $2, 0($6)
+; MIPS64EL-NEXT: and $2, $2, $8
+; MIPS64EL-NEXT: and $7, $7, $8
; MIPS64EL-NEXT: sltu $5, $2, $7
; MIPS64EL-NEXT: move $3, $2
; MIPS64EL-NEXT: movn $3, $7, $5
@@ -2456,6 +2468,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS64ELR6-NEXT: .LBB6_1: # %entry
; MIPS64ELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64ELR6-NEXT: ll $2, 0($6)
+; MIPS64ELR6-NEXT: and $2, $2, $8
+; MIPS64ELR6-NEXT: and $7, $7, $8
; MIPS64ELR6-NEXT: sltu $5, $2, $7
; MIPS64ELR6-NEXT: seleqz $3, $2, $5
; MIPS64ELR6-NEXT: selnez $5, $7, $5
@@ -2655,6 +2669,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS32-NEXT: $BB7_1: # %entry
; MIPS32-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS32-NEXT: ll $2, 0($6)
+; MIPS32-NEXT: and $2, $2, $8
+; MIPS32-NEXT: and $7, $7, $8
; MIPS32-NEXT: sltu $5, $2, $7
; MIPS32-NEXT: move $3, $2
; MIPS32-NEXT: movz $3, $7, $5
@@ -2696,6 +2712,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MIPSEL-NEXT: $BB7_1: # %entry
; MIPSEL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSEL-NEXT: ll $2, 0($6)
+; MIPSEL-NEXT: and $2, $2, $8
+; MIPSEL-NEXT: and $7, $7, $8
; MIPSEL-NEXT: sltu $5, $2, $7
; MIPSEL-NEXT: move $3, $2
; MIPSEL-NEXT: movz $3, $7, $5
@@ -2735,6 +2753,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MIPSELR6-NEXT: $BB7_1: # %entry
; MIPSELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSELR6-NEXT: ll $2, 0($6)
+; MIPSELR6-NEXT: and $2, $2, $8
+; MIPSELR6-NEXT: and $7, $7, $8
; MIPSELR6-NEXT: sltu $5, $2, $7
; MIPSELR6-NEXT: selnez $3, $2, $5
; MIPSELR6-NEXT: seleqz $5, $7, $5
@@ -2773,6 +2793,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MMEL-NEXT: $BB7_1: # %entry
; MMEL-NEXT: # =>This Inner Loop Header: Depth=1
; MMEL-NEXT: ll $2, 0($6)
+; MMEL-NEXT: and $2, $2, $8
+; MMEL-NEXT: and $7, $7, $8
; MMEL-NEXT: sltu $5, $2, $7
; MMEL-NEXT: or $3, $2, $zero
; MMEL-NEXT: movz $3, $7, $5
@@ -2810,6 +2832,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MMELR6-NEXT: $BB7_1: # %entry
; MMELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MMELR6-NEXT: ll $2, 0($6)
+; MMELR6-NEXT: and $2, $2, $8
+; MMELR6-NEXT: and $7, $7, $8
; MMELR6-NEXT: sltu $5, $2, $7
; MMELR6-NEXT: selnez $3, $2, $5
; MMELR6-NEXT: seleqz $5, $7, $5
@@ -2927,6 +2951,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS64EL-NEXT: .LBB7_1: # %entry
; MIPS64EL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EL-NEXT: ll $2, 0($6)
+; MIPS64EL-NEXT: and $2, $2, $8
+; MIPS64EL-NEXT: and $7, $7, $8
; MIPS64EL-NEXT: sltu $5, $2, $7
; MIPS64EL-NEXT: move $3, $2
; MIPS64EL-NEXT: movz $3, $7, $5
@@ -2966,6 +2992,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS64ELR6-NEXT: .LBB7_1: # %entry
; MIPS64ELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64ELR6-NEXT: ll $2, 0($6)
+; MIPS64ELR6-NEXT: and $2, $2, $8
+; MIPS64ELR6-NEXT: and $7, $7, $8
; MIPS64ELR6-NEXT: sltu $5, $2, $7
; MIPS64ELR6-NEXT: selnez $3, $2, $5
; MIPS64ELR6-NEXT: seleqz $5, $7, $5
@@ -4244,6 +4272,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS32-NEXT: $BB10_1: # %entry
; MIPS32-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS32-NEXT: ll $2, 0($6)
+; MIPS32-NEXT: and $2, $2, $8
+; MIPS32-NEXT: and $7, $7, $8
; MIPS32-NEXT: sltu $5, $2, $7
; MIPS32-NEXT: move $3, $2
; MIPS32-NEXT: movn $3, $7, $5
@@ -4284,6 +4314,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MIPSEL-NEXT: $BB10_1: # %entry
; MIPSEL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSEL-NEXT: ll $2, 0($6)
+; MIPSEL-NEXT: and $2, $2, $8
+; MIPSEL-NEXT: and $7, $7, $8
; MIPSEL-NEXT: sltu $5, $2, $7
; MIPSEL-NEXT: move $3, $2
; MIPSEL-NEXT: movn $3, $7, $5
@@ -4323,6 +4355,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MIPSELR6-NEXT: $BB10_1: # %entry
; MIPSELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSELR6-NEXT: ll $2, 0($6)
+; MIPSELR6-NEXT: and $2, $2, $8
+; MIPSELR6-NEXT: and $7, $7, $8
; MIPSELR6-NEXT: sltu $5, $2, $7
; MIPSELR6-NEXT: seleqz $3, $2, $5
; MIPSELR6-NEXT: selnez $5, $7, $5
@@ -4361,6 +4395,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MMEL-NEXT: $BB10_1: # %entry
; MMEL-NEXT: # =>This Inner Loop Header: Depth=1
; MMEL-NEXT: ll $2, 0($6)
+; MMEL-NEXT: and $2, $2, $8
+; MMEL-NEXT: and $7, $7, $8
; MMEL-NEXT: sltu $5, $2, $7
; MMEL-NEXT: or $3, $2, $zero
; MMEL-NEXT: movn $3, $7, $5
@@ -4398,6 +4434,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MMELR6-NEXT: $BB10_1: # %entry
; MMELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MMELR6-NEXT: ll $2, 0($6)
+; MMELR6-NEXT: and $2, $2, $8
+; MMELR6-NEXT: and $7, $7, $8
; MMELR6-NEXT: sltu $5, $2, $7
; MMELR6-NEXT: seleqz $3, $2, $5
; MMELR6-NEXT: selnez $5, $7, $5
@@ -4515,6 +4553,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS64EL-NEXT: .LBB10_1: # %entry
; MIPS64EL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EL-NEXT: ll $2, 0($6)
+; MIPS64EL-NEXT: and $2, $2, $8
+; MIPS64EL-NEXT: and $7, $7, $8
; MIPS64EL-NEXT: sltu $5, $2, $7
; MIPS64EL-NEXT: move $3, $2
; MIPS64EL-NEXT: movn $3, $7, $5
@@ -4554,6 +4594,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS64ELR6-NEXT: .LBB10_1: # %entry
; MIPS64ELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64ELR6-NEXT: ll $2, 0($6)
+; MIPS64ELR6-NEXT: and $2, $2, $8
+; MIPS64ELR6-NEXT: and $7, $7, $8
; MIPS64ELR6-NEXT: sltu $5, $2, $7
; MIPS64ELR6-NEXT: seleqz $3, $2, $5
; MIPS64ELR6-NEXT: selnez $5, $7, $5
@@ -4753,6 +4795,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS32-NEXT: $BB11_1: # %entry
; MIPS32-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS32-NEXT: ll $2, 0($6)
+; MIPS32-NEXT: and $2, $2, $8
+; MIPS32-NEXT: and $7, $7, $8
; MIPS32-NEXT: sltu $5, $2, $7
; MIPS32-NEXT: move $3, $2
; MIPS32-NEXT: movz $3, $7, $5
@@ -4793,6 +4837,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MIPSEL-NEXT: $BB11_1: # %entry
; MIPSEL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSEL-NEXT: ll $2, 0($6)
+; MIPSEL-NEXT: and $2, $2, $8
+; MIPSEL-NEXT: and $7, $7, $8
; MIPSEL-NEXT: sltu $5, $2, $7
; MIPSEL-NEXT: move $3, $2
; MIPSEL-NEXT: movz $3, $7, $5
@@ -4832,6 +4878,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MIPSELR6-NEXT: $BB11_1: # %entry
; MIPSELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSELR6-NEXT: ll $2, 0($6)
+; MIPSELR6-NEXT: and $2, $2, $8
+; MIPSELR6-NEXT: and $7, $7, $8
; MIPSELR6-NEXT: sltu $5, $2, $7
; MIPSELR6-NEXT: selnez $3, $2, $5
; MIPSELR6-NEXT: seleqz $5, $7, $5
@@ -4870,6 +4918,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MMEL-NEXT: $BB11_1: # %entry
; MMEL-NEXT: # =>This Inner Loop Header: Depth=1
; MMEL-NEXT: ll $2, 0($6)
+; MMEL-NEXT: and $2, $2, $8
+; MMEL-NEXT: and $7, $7, $8
; MMEL-NEXT: sltu $5, $2, $7
; MMEL-NEXT: or $3, $2, $zero
; MMEL-NEXT: movz $3, $7, $5
@@ -4907,6 +4957,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MMELR6-NEXT: $BB11_1: # %entry
; MMELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MMELR6-NEXT: ll $2, 0($6)
+; MMELR6-NEXT: and $2, $2, $8
+; MMELR6-NEXT: and $7, $7, $8
; MMELR6-NEXT: sltu $5, $2, $7
; MMELR6-NEXT: selnez $3, $2, $5
; MMELR6-NEXT: seleqz $5, $7, $5
@@ -5024,6 +5076,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS64EL-NEXT: .LBB11_1: # %entry
; MIPS64EL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EL-NEXT: ll $2, 0($6)
+; MIPS64EL-NEXT: and $2, $2, $8
+; MIPS64EL-NEXT: and $7, $7, $8
; MIPS64EL-NEXT: sltu $5, $2, $7
; MIPS64EL-NEXT: move $3, $2
; MIPS64EL-NEXT: movz $3, $7, $5
@@ -5063,6 +5117,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS64ELR6-NEXT: .LBB11_1: # %entry
; MIPS64ELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64ELR6-NEXT: ll $2, 0($6)
+; MIPS64ELR6-NEXT: and $2, $2, $8
+; MIPS64ELR6-NEXT: and $7, $7, $8
; MIPS64ELR6-NEXT: sltu $5, $2, $7
; MIPS64ELR6-NEXT: selnez $3, $2, $5
; MIPS64ELR6-NEXT: seleqz $5, $7, $5
diff --git a/llvm/test/CodeGen/Mips/avoid-zero-copy.mir b/llvm/test/CodeGen/Mips/avoid-zero-copy.mir
index 5c7cffd..e3990bd 100644
--- a/llvm/test/CodeGen/Mips/avoid-zero-copy.mir
+++ b/llvm/test/CodeGen/Mips/avoid-zero-copy.mir
@@ -19,6 +19,8 @@
...
---
name: a
+frameInfo:
+ adjustsStack: true
body: |
bb.0 (%ir-block.0):
liveins: $a0_64, $t9_64, $ra_64, $fp_64, $gp_64
diff --git a/llvm/test/CodeGen/Mips/msa/emergency-spill.mir b/llvm/test/CodeGen/Mips/msa/emergency-spill.mir
index e1c7b21..2089464 100644
--- a/llvm/test/CodeGen/Mips/msa/emergency-spill.mir
+++ b/llvm/test/CodeGen/Mips/msa/emergency-spill.mir
@@ -90,7 +90,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 16
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll b/llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll
index 6a27c9f..45c7ab9 100644
--- a/llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll
+++ b/llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll
@@ -405,8 +405,9 @@ define void @uitofp(i32 %a) {
; MIPS64-N32-NEXT: addiu $1, $1, %lo(%neg(%gp_rel(uitofp)))
; MIPS64-N32-NEXT: lui $2, 17200
; MIPS64-N32-NEXT: sw $2, 12($sp)
-; MIPS64-N32-NEXT: sll $2, $4, 0
-; MIPS64-N32-NEXT: sw $2, 8($sp)
+; MIPS64R5-N32-NEXT: sll $2, $4, 0
+; MIPS64R5-N32-NEXT: sw $2, 8($sp)
+; MIPSR6-N32-NEXT: sw $4, 8($sp)
; MIPS64-N32-NEXT: lw $2, %got_page(.LCPI5_0)($1)
; MIPS64-N32-NEXT: ldc1 $f0, %got_ofst(.LCPI5_0)($2)
; MIPS64-N32-NEXT: ldc1 $f1, 8($sp)
@@ -430,8 +431,9 @@ define void @uitofp(i32 %a) {
; MIPS64-N64-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(uitofp)))
; MIPS64-N64-NEXT: lui $2, 17200
; MIPS64-N64-NEXT: sw $2, 12($sp)
-; MIPS64-N64-NEXT: sll $2, $4, 0
-; MIPS64-N64-NEXT: sw $2, 8($sp)
+; MIPS64R5-N64-NEXT: sll $2, $4, 0
+; MIPS64R5-N64-NEXT: sw $2, 8($sp)
+; MIPSR6-N64-NEXT: sw $4, 8($sp)
; MIPS64-N64-NEXT: ld $2, %got_page(.LCPI5_0)($1)
; MIPS64-N64-NEXT: ldc1 $f0, %got_ofst(.LCPI5_0)($2)
; MIPS64-N64-NEXT: ldc1 $f1, 8($sp)
diff --git a/llvm/test/CodeGen/Mips/no-unaligned-access-r6.ll b/llvm/test/CodeGen/Mips/no-unaligned-access-r6.ll
new file mode 100644
index 0000000..0695868
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/no-unaligned-access-r6.ll
@@ -0,0 +1,69 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+;; Test the strict-align feature which is similar to AArch64/arm64-strict-align.ll.
+
+; RUN: llc --mtriple=mipsisa32r6 < %s | FileCheck %s --check-prefix=MIPS32R6-UNALIGNED
+; RUN: llc --mtriple=mipsisa32r6 --mattr=-strict-align < %s | FileCheck %s --check-prefix=MIPS32R6-UNALIGNED
+; RUN: llc --mtriple=mipsisa32r6 --mattr=+strict-align < %s | FileCheck %s --check-prefix=MIPS32R6-ALIGNED
+
+; RUN: llc --mtriple=mipsisa64r6 < %s | FileCheck %s --check-prefix=MIPS64R6-UNALIGNED
+; RUN: llc --mtriple=mipsisa64r6 --mattr=-strict-align < %s | FileCheck %s --check-prefix=MIPS64R6-UNALIGNED
+; RUN: llc --mtriple=mipsisa64r6 --mattr=+strict-align < %s | FileCheck %s --check-prefix=MIPS64R6-ALIGNED
+
+define i32 @f0(ptr %p) nounwind {
+; MIPS32R6-UNALIGNED-LABEL: f0:
+; MIPS32R6-UNALIGNED: # %bb.0:
+; MIPS32R6-UNALIGNED-NEXT: lw $2, 0($4)
+; MIPS32R6-UNALIGNED-NEXT: jrc $ra
+;
+; MIPS32R6-ALIGNED-LABEL: f0:
+; MIPS32R6-ALIGNED: # %bb.0:
+; MIPS32R6-ALIGNED-NEXT: lhu $1, 2($4)
+; MIPS32R6-ALIGNED-NEXT: lhu $2, 0($4)
+; MIPS32R6-ALIGNED-NEXT: sll $2, $2, 16
+; MIPS32R6-ALIGNED-NEXT: jr $ra
+; MIPS32R6-ALIGNED-NEXT: or $2, $2, $1
+;
+; MIPS64R6-UNALIGNED-LABEL: f0:
+; MIPS64R6-UNALIGNED: # %bb.0:
+; MIPS64R6-UNALIGNED-NEXT: lw $2, 0($4)
+; MIPS64R6-UNALIGNED-NEXT: jrc $ra
+;
+; MIPS64R6-ALIGNED-LABEL: f0:
+; MIPS64R6-ALIGNED: # %bb.0:
+; MIPS64R6-ALIGNED-NEXT: lhu $1, 2($4)
+; MIPS64R6-ALIGNED-NEXT: lhu $2, 0($4)
+; MIPS64R6-ALIGNED-NEXT: sll $2, $2, 16
+; MIPS64R6-ALIGNED-NEXT: jr $ra
+; MIPS64R6-ALIGNED-NEXT: or $2, $2, $1
+ %tmp = load i32, ptr %p, align 2
+ ret i32 %tmp
+}
+
+define i64 @f1(ptr %p) nounwind {
+; MIPS32R6-UNALIGNED-LABEL: f1:
+; MIPS32R6-UNALIGNED: # %bb.0:
+; MIPS32R6-UNALIGNED-NEXT: lw $2, 0($4)
+; MIPS32R6-UNALIGNED-NEXT: lw $3, 4($4)
+; MIPS32R6-UNALIGNED-NEXT: jrc $ra
+;
+; MIPS32R6-ALIGNED-LABEL: f1:
+; MIPS32R6-ALIGNED: # %bb.0:
+; MIPS32R6-ALIGNED-NEXT: lw $2, 0($4)
+; MIPS32R6-ALIGNED-NEXT: lw $3, 4($4)
+; MIPS32R6-ALIGNED-NEXT: jrc $ra
+;
+; MIPS64R6-UNALIGNED-LABEL: f1:
+; MIPS64R6-UNALIGNED: # %bb.0:
+; MIPS64R6-UNALIGNED-NEXT: ld $2, 0($4)
+; MIPS64R6-UNALIGNED-NEXT: jrc $ra
+;
+; MIPS64R6-ALIGNED-LABEL: f1:
+; MIPS64R6-ALIGNED: # %bb.0:
+; MIPS64R6-ALIGNED-NEXT: lwu $1, 4($4)
+; MIPS64R6-ALIGNED-NEXT: lwu $2, 0($4)
+; MIPS64R6-ALIGNED-NEXT: dsll $2, $2, 32
+; MIPS64R6-ALIGNED-NEXT: jr $ra
+; MIPS64R6-ALIGNED-NEXT: or $2, $2, $1
+ %tmp = load i64, ptr %p, align 4
+ ret i64 %tmp
+}
diff --git a/llvm/test/CodeGen/NVPTX/atomics-sm70.ll b/llvm/test/CodeGen/NVPTX/atomics-sm70.ll
new file mode 100644
index 0000000..9cc45fb
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/atomics-sm70.ll
@@ -0,0 +1,142 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -march=nvptx -mcpu=sm_70 -mattr=+ptx63 | FileCheck %s --check-prefixes=CHECK
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx63 | FileCheck %s --check-prefixes=CHECK64
+; RUN: llc < %s -march=nvptx -mcpu=sm_70 -mattr=+ptx62 | FileCheck %s --check-prefixes=CHECKPTX62
+; RUN: %if ptxas && !ptxas-12.0 %{ llc < %s -march=nvptx -mcpu=sm_70 -mattr=+ptx63 | %ptxas-verify -arch=sm_70 %}
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx63 | %ptxas-verify -arch=sm_70 %}
+; RUN: %if ptxas && !ptxas-12.0 %{ llc < %s -march=nvptx -mcpu=sm_70 -mattr=+ptx62 | %ptxas-verify -arch=sm_70 %}
+
+target triple = "nvptx64-nvidia-cuda"
+
+define void @test(ptr %dp0, ptr addrspace(1) %dp1, ptr addrspace(3) %dp3, half %val) {
+; CHECK-LABEL: test(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<7>;
+; CHECK-NEXT: .reg .b32 %r<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [test_param_0];
+; CHECK-NEXT: ld.param.b16 %rs1, [test_param_3];
+; CHECK-NEXT: atom.add.noftz.f16 %rs2, [%r1], %rs1;
+; CHECK-NEXT: ld.param.u32 %r2, [test_param_1];
+; CHECK-NEXT: mov.b16 %rs3, 0x3C00;
+; CHECK-NEXT: atom.add.noftz.f16 %rs4, [%r1], %rs3;
+; CHECK-NEXT: ld.param.u32 %r3, [test_param_2];
+; CHECK-NEXT: atom.global.add.noftz.f16 %rs5, [%r2], %rs1;
+; CHECK-NEXT: atom.shared.add.noftz.f16 %rs6, [%r3], %rs1;
+; CHECK-NEXT: ret;
+;
+; CHECK64-LABEL: test(
+; CHECK64: {
+; CHECK64-NEXT: .reg .b16 %rs<7>;
+; CHECK64-NEXT: .reg .b64 %rd<4>;
+; CHECK64-EMPTY:
+; CHECK64-NEXT: // %bb.0:
+; CHECK64-NEXT: ld.param.u64 %rd1, [test_param_0];
+; CHECK64-NEXT: ld.param.b16 %rs1, [test_param_3];
+; CHECK64-NEXT: atom.add.noftz.f16 %rs2, [%rd1], %rs1;
+; CHECK64-NEXT: ld.param.u64 %rd2, [test_param_1];
+; CHECK64-NEXT: mov.b16 %rs3, 0x3C00;
+; CHECK64-NEXT: atom.add.noftz.f16 %rs4, [%rd1], %rs3;
+; CHECK64-NEXT: ld.param.u64 %rd3, [test_param_2];
+; CHECK64-NEXT: atom.global.add.noftz.f16 %rs5, [%rd2], %rs1;
+; CHECK64-NEXT: atom.shared.add.noftz.f16 %rs6, [%rd3], %rs1;
+; CHECK64-NEXT: ret;
+;
+; CHECKPTX62-LABEL: test(
+; CHECKPTX62: {
+; CHECKPTX62-NEXT: .reg .pred %p<5>;
+; CHECKPTX62-NEXT: .reg .b16 %rs<19>;
+; CHECKPTX62-NEXT: .reg .b32 %r<58>;
+; CHECKPTX62-EMPTY:
+; CHECKPTX62-NEXT: // %bb.0:
+; CHECKPTX62-NEXT: ld.param.b16 %rs1, [test_param_3];
+; CHECKPTX62-NEXT: ld.param.u32 %r23, [test_param_2];
+; CHECKPTX62-NEXT: ld.param.u32 %r22, [test_param_1];
+; CHECKPTX62-NEXT: ld.param.u32 %r24, [test_param_0];
+; CHECKPTX62-NEXT: and.b32 %r1, %r24, -4;
+; CHECKPTX62-NEXT: and.b32 %r25, %r24, 3;
+; CHECKPTX62-NEXT: shl.b32 %r2, %r25, 3;
+; CHECKPTX62-NEXT: mov.b32 %r26, 65535;
+; CHECKPTX62-NEXT: shl.b32 %r27, %r26, %r2;
+; CHECKPTX62-NEXT: not.b32 %r3, %r27;
+; CHECKPTX62-NEXT: ld.u32 %r54, [%r1];
+; CHECKPTX62-NEXT: $L__BB0_1: // %atomicrmw.start
+; CHECKPTX62-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECKPTX62-NEXT: shr.u32 %r28, %r54, %r2;
+; CHECKPTX62-NEXT: cvt.u16.u32 %rs2, %r28;
+; CHECKPTX62-NEXT: add.rn.f16 %rs4, %rs2, %rs1;
+; CHECKPTX62-NEXT: cvt.u32.u16 %r29, %rs4;
+; CHECKPTX62-NEXT: shl.b32 %r30, %r29, %r2;
+; CHECKPTX62-NEXT: and.b32 %r31, %r54, %r3;
+; CHECKPTX62-NEXT: or.b32 %r32, %r31, %r30;
+; CHECKPTX62-NEXT: atom.cas.b32 %r6, [%r1], %r54, %r32;
+; CHECKPTX62-NEXT: setp.ne.s32 %p1, %r6, %r54;
+; CHECKPTX62-NEXT: mov.u32 %r54, %r6;
+; CHECKPTX62-NEXT: @%p1 bra $L__BB0_1;
+; CHECKPTX62-NEXT: // %bb.2: // %atomicrmw.end
+; CHECKPTX62-NEXT: ld.u32 %r55, [%r1];
+; CHECKPTX62-NEXT: $L__BB0_3: // %atomicrmw.start9
+; CHECKPTX62-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECKPTX62-NEXT: shr.u32 %r33, %r55, %r2;
+; CHECKPTX62-NEXT: cvt.u16.u32 %rs6, %r33;
+; CHECKPTX62-NEXT: mov.b16 %rs8, 0x3C00;
+; CHECKPTX62-NEXT: add.rn.f16 %rs9, %rs6, %rs8;
+; CHECKPTX62-NEXT: cvt.u32.u16 %r34, %rs9;
+; CHECKPTX62-NEXT: shl.b32 %r35, %r34, %r2;
+; CHECKPTX62-NEXT: and.b32 %r36, %r55, %r3;
+; CHECKPTX62-NEXT: or.b32 %r37, %r36, %r35;
+; CHECKPTX62-NEXT: atom.cas.b32 %r9, [%r1], %r55, %r37;
+; CHECKPTX62-NEXT: setp.ne.s32 %p2, %r9, %r55;
+; CHECKPTX62-NEXT: mov.u32 %r55, %r9;
+; CHECKPTX62-NEXT: @%p2 bra $L__BB0_3;
+; CHECKPTX62-NEXT: // %bb.4: // %atomicrmw.end8
+; CHECKPTX62-NEXT: and.b32 %r10, %r22, -4;
+; CHECKPTX62-NEXT: shl.b32 %r38, %r22, 3;
+; CHECKPTX62-NEXT: and.b32 %r11, %r38, 24;
+; CHECKPTX62-NEXT: shl.b32 %r40, %r26, %r11;
+; CHECKPTX62-NEXT: not.b32 %r12, %r40;
+; CHECKPTX62-NEXT: ld.global.u32 %r56, [%r10];
+; CHECKPTX62-NEXT: $L__BB0_5: // %atomicrmw.start27
+; CHECKPTX62-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECKPTX62-NEXT: shr.u32 %r41, %r56, %r11;
+; CHECKPTX62-NEXT: cvt.u16.u32 %rs11, %r41;
+; CHECKPTX62-NEXT: add.rn.f16 %rs13, %rs11, %rs1;
+; CHECKPTX62-NEXT: cvt.u32.u16 %r42, %rs13;
+; CHECKPTX62-NEXT: shl.b32 %r43, %r42, %r11;
+; CHECKPTX62-NEXT: and.b32 %r44, %r56, %r12;
+; CHECKPTX62-NEXT: or.b32 %r45, %r44, %r43;
+; CHECKPTX62-NEXT: atom.global.cas.b32 %r15, [%r10], %r56, %r45;
+; CHECKPTX62-NEXT: setp.ne.s32 %p3, %r15, %r56;
+; CHECKPTX62-NEXT: mov.u32 %r56, %r15;
+; CHECKPTX62-NEXT: @%p3 bra $L__BB0_5;
+; CHECKPTX62-NEXT: // %bb.6: // %atomicrmw.end26
+; CHECKPTX62-NEXT: and.b32 %r16, %r23, -4;
+; CHECKPTX62-NEXT: shl.b32 %r46, %r23, 3;
+; CHECKPTX62-NEXT: and.b32 %r17, %r46, 24;
+; CHECKPTX62-NEXT: shl.b32 %r48, %r26, %r17;
+; CHECKPTX62-NEXT: not.b32 %r18, %r48;
+; CHECKPTX62-NEXT: ld.shared.u32 %r57, [%r16];
+; CHECKPTX62-NEXT: $L__BB0_7: // %atomicrmw.start45
+; CHECKPTX62-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECKPTX62-NEXT: shr.u32 %r49, %r57, %r17;
+; CHECKPTX62-NEXT: cvt.u16.u32 %rs15, %r49;
+; CHECKPTX62-NEXT: add.rn.f16 %rs17, %rs15, %rs1;
+; CHECKPTX62-NEXT: cvt.u32.u16 %r50, %rs17;
+; CHECKPTX62-NEXT: shl.b32 %r51, %r50, %r17;
+; CHECKPTX62-NEXT: and.b32 %r52, %r57, %r18;
+; CHECKPTX62-NEXT: or.b32 %r53, %r52, %r51;
+; CHECKPTX62-NEXT: atom.shared.cas.b32 %r21, [%r16], %r57, %r53;
+; CHECKPTX62-NEXT: setp.ne.s32 %p4, %r21, %r57;
+; CHECKPTX62-NEXT: mov.u32 %r57, %r21;
+; CHECKPTX62-NEXT: @%p4 bra $L__BB0_7;
+; CHECKPTX62-NEXT: // %bb.8: // %atomicrmw.end44
+; CHECKPTX62-NEXT: ret;
+ %r1 = atomicrmw fadd ptr %dp0, half %val seq_cst
+ %r2 = atomicrmw fadd ptr %dp0, half 1.0 seq_cst
+ %r3 = atomicrmw fadd ptr addrspace(1) %dp1, half %val seq_cst
+ %r4 = atomicrmw fadd ptr addrspace(3) %dp3, half %val seq_cst
+ ret void
+}
+
+attributes #1 = { argmemonly nounwind }
diff --git a/llvm/test/CodeGen/NVPTX/atomics.ll b/llvm/test/CodeGen/NVPTX/atomics.ll
index e99d0fd..6f2b5dc 100644
--- a/llvm/test/CodeGen/NVPTX/atomics.ll
+++ b/llvm/test/CodeGen/NVPTX/atomics.ll
@@ -175,6 +175,13 @@ define float @atomicrmw_add_f32_generic(ptr %addr, float %val) {
ret float %ret
}
+; CHECK-LABEL: atomicrmw_add_f16_generic
+define half @atomicrmw_add_f16_generic(ptr %addr, half %val) {
+; CHECK: atom.cas
+ %ret = atomicrmw fadd ptr %addr, half %val seq_cst
+ ret half %ret
+}
+
; CHECK-LABEL: atomicrmw_add_f32_addrspace1
define float @atomicrmw_add_f32_addrspace1(ptr addrspace(1) %addr, float %val) {
; CHECK: atom.global.add.f32
diff --git a/llvm/test/CodeGen/NVPTX/b52037.ll b/llvm/test/CodeGen/NVPTX/b52037.ll
index d9322da..5d1c390 100644
--- a/llvm/test/CodeGen/NVPTX/b52037.ll
+++ b/llvm/test/CodeGen/NVPTX/b52037.ll
@@ -47,7 +47,7 @@ bb:
%tmp5 = load ptr, ptr %tmp4, align 8
%tmp9 = getelementptr inbounds %struct.zot, ptr %tmp, i64 0, i32 2, i32 1
store ptr %tmp5, ptr %tmp9, align 8
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @global_1, i64 0, inrange i32 0, i64 3), ptr %tmp, align 16
+ store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @global_1, i64 0, i32 0, i64 3), ptr %tmp, align 16
%tmp.i1 = tail call i64 @foo()
%tmp44.i16 = getelementptr inbounds i16, ptr %tmp5, i64 undef
%tmp45.i17 = load i16, ptr %tmp44.i16, align 2
diff --git a/llvm/test/CodeGen/NVPTX/bswap.ll b/llvm/test/CodeGen/NVPTX/bswap.ll
new file mode 100644
index 0000000..3f929ec
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/bswap.ll
@@ -0,0 +1,77 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 | %ptxas-verify %}
+
+target triple = "nvptx64-nvidia-cuda"
+
+define i16 @bswap16(i16 %a) {
+; CHECK-LABEL: bswap16(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<5>;
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u16 %rs1, [bswap16_param_0];
+; CHECK-NEXT: shr.u16 %rs2, %rs1, 8;
+; CHECK-NEXT: shl.b16 %rs3, %rs1, 8;
+; CHECK-NEXT: or.b16 %rs4, %rs3, %rs2;
+; CHECK-NEXT: cvt.u32.u16 %r1, %rs4;
+; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1;
+; CHECK-NEXT: ret;
+ %b = tail call i16 @llvm.bswap.i16(i16 %a)
+ ret i16 %b
+}
+
+
+define i32 @bswap32(i32 %a) {
+; CHECK-LABEL: bswap32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [bswap32_param_0];
+; CHECK-NEXT: prmt.b32 %r2, %r1, 0, 291;
+; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2;
+; CHECK-NEXT: ret;
+ %b = tail call i32 @llvm.bswap.i32(i32 %a)
+ ret i32 %b
+}
+
+
+define <2 x i16> @bswapv2i16(<2 x i16> %a) #0 {
+; CHECK-LABEL: bswapv2i16(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [bswapv2i16_param_0];
+; CHECK-NEXT: prmt.b32 %r2, %r1, 0, 8961;
+; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2;
+; CHECK-NEXT: ret;
+ %b = tail call <2 x i16> @llvm.bswap.v2i16(<2 x i16> %a)
+ ret <2 x i16> %b
+}
+
+define i64 @bswap64(i64 %a) {
+; CHECK-LABEL: bswap64(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd1, [bswap64_param_0];
+; CHECK-NEXT: { .reg .b32 tmp; mov.b64 {%r1, tmp}, %rd1; }
+; CHECK-NEXT: prmt.b32 %r2, %r1, 0, 291;
+; CHECK-NEXT: { .reg .b32 tmp; mov.b64 {tmp, %r3}, %rd1; }
+; CHECK-NEXT: prmt.b32 %r4, %r3, 0, 291;
+; CHECK-NEXT: mov.b64 %rd2, {%r4, %r2};
+; CHECK-NEXT: st.param.b64 [func_retval0+0], %rd2;
+; CHECK-NEXT: ret;
+ %b = tail call i64 @llvm.bswap.i64(i64 %a)
+ ret i64 %b
+}
+
+declare i16 @llvm.bswap.i16(i16)
+declare i32 @llvm.bswap.i32(i32)
+declare <2 x i16> @llvm.bswap.v2i16(<2 x i16>)
+declare i64 @llvm.bswap.i64(i64)
diff --git a/llvm/test/CodeGen/NVPTX/common-linkage.ll b/llvm/test/CodeGen/NVPTX/common-linkage.ll
new file mode 100644
index 0000000..976074e
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/common-linkage.ll
@@ -0,0 +1,29 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -mattr=+ptx43 | FileCheck %s --check-prefixes CHECK,PTX43
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -mattr=+ptx50 | FileCheck %s --check-prefixes CHECK,PTX50
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=+ptx43 | %ptxas-verify %}
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=+ptx50 | %ptxas-verify %}
+
+; PTX43: .weak .global .align 4 .u32 g
+; PTX50: .common .global .align 4 .u32 g
+@g = common addrspace(1) global i32 0, align 4
+
+; CHECK: .weak .const .align 4 .u32 c
+@c = common addrspace(4) global i32 0, align 4
+
+; CHECK: .weak .shared .align 4 .u32 s
+@s = common addrspace(3) global i32 0, align 4
+
+define i32 @f1() {
+ %1 = load i32, ptr addrspace(1) @g
+ ret i32 %1
+}
+
+define i32 @f4() {
+ %1 = load i32, ptr addrspace(4) @c
+ ret i32 %1
+}
+
+define i32 @f3() {
+ %1 = load i32, ptr addrspace(3) @s
+ ret i32 %1
+}
diff --git a/llvm/test/CodeGen/NVPTX/dynamic_stackalloc.ll b/llvm/test/CodeGen/NVPTX/dynamic_stackalloc.ll
index 3ef55ca..09297fb 100644
--- a/llvm/test/CodeGen/NVPTX/dynamic_stackalloc.ll
+++ b/llvm/test/CodeGen/NVPTX/dynamic_stackalloc.ll
@@ -1,10 +1,44 @@
-; RUN: not llc -march=nvptx < %s 2>&1 | FileCheck %s
-; RUN: not llc -march=nvptx64 < %s 2>&1 | FileCheck %s
+; RUN: not llc < %s -march=nvptx -mattr=+ptx72 -mcpu=sm_52 2>&1 | FileCheck %s --check-prefixes=CHECK-FAILS
+; RUN: not llc < %s -march=nvptx -mattr=+ptx73 -mcpu=sm_50 2>&1 | FileCheck %s --check-prefixes=CHECK-FAILS
-; CHECK: in function test_dynamic_stackalloc{{.*}}: dynamic alloca unsupported by NVPTX backend
+; RUN: llc < %s -march=nvptx -mattr=+ptx73 -mcpu=sm_52 | FileCheck %s --check-prefixes=CHECK,CHECK-32
+; RUN: llc < %s -march=nvptx64 -mattr=+ptx73 -mcpu=sm_52 | FileCheck %s --check-prefixes=CHECK,CHECK-64
+; RUN: %if ptxas && !ptxas-12.0 %{ llc < %s -march=nvptx -mattr=+ptx73 -mcpu=sm_52 | %ptxas-verify %}
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mattr=+ptx73 -mcpu=sm_52 | %ptxas-verify %}
-define void @test_dynamic_stackalloc(i64 %n) {
- %alloca = alloca i32, i64 %n
- store volatile i32 0, ptr %alloca
- ret void
+; CHECK-FAILS: in function test_dynamic_stackalloc{{.*}}: Support for dynamic alloca introduced in PTX ISA version 7.3 and requires target sm_52.
+
+; CHECK-LABEL: .visible .func (.param .b32 func_retval0) test_dynamic_stackalloc(
+; CHECK-NOT: __local_depot
+
+; CHECK-32: ld.param.u32 %r[[SIZE:[0-9]]], [test_dynamic_stackalloc_param_0];
+; CHECK-32-NEXT: mad.lo.s32 %r[[SIZE2:[0-9]]], %r[[SIZE]], 1, 7;
+; CHECK-32-NEXT: and.b32 %r[[SIZE3:[0-9]]], %r[[SIZE2]], -8;
+; CHECK-32-NEXT: alloca.u32 %r[[ALLOCA:[0-9]]], %r[[SIZE3]], 16;
+; CHECK-32-NEXT: cvta.local.u32 %r[[ALLOCA]], %r[[ALLOCA]];
+; CHECK-32-NEXT: { // callseq 0, 0
+; CHECK-32-NEXT: .reg .b32 temp_param_reg;
+; CHECK-32-NEXT: .param .b32 param0;
+; CHECK-32-NEXT: st.param.b32 [param0+0], %r[[ALLOCA]];
+
+; CHECK-64: ld.param.u64 %rd[[SIZE:[0-9]]], [test_dynamic_stackalloc_param_0];
+; CHECK-64-NEXT: add.s64 %rd[[SIZE2:[0-9]]], %rd[[SIZE]], 7;
+; CHECK-64-NEXT: and.b64 %rd[[SIZE3:[0-9]]], %rd[[SIZE2]], -8;
+; CHECK-64-NEXT: alloca.u64 %rd[[ALLOCA:[0-9]]], %rd[[SIZE3]], 16;
+; CHECK-64-NEXT: cvta.local.u64 %rd[[ALLOCA]], %rd[[ALLOCA]];
+; CHECK-64-NEXT: { // callseq 0, 0
+; CHECK-64-NEXT: .reg .b32 temp_param_reg;
+; CHECK-64-NEXT: .param .b64 param0;
+; CHECK-64-NEXT: st.param.b64 [param0+0], %rd[[ALLOCA]];
+
+; CHECK-NEXT: .param .b32 retval0;
+; CHECK-NEXT: call.uni (retval0),
+; CHECK-NEXT: bar,
+
+define i32 @test_dynamic_stackalloc(i64 %n) {
+ %alloca = alloca i8, i64 %n, align 16
+ %call = call i32 @bar(ptr %alloca)
+ ret i32 %call
}
+
+declare i32 @bar(ptr)
diff --git a/llvm/test/CodeGen/NVPTX/weak-global.ll b/llvm/test/CodeGen/NVPTX/weak-global.ll
index dd0160d..c5467aa 100644
--- a/llvm/test/CodeGen/NVPTX/weak-global.ll
+++ b/llvm/test/CodeGen/NVPTX/weak-global.ll
@@ -1,7 +1,10 @@
-; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
-; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 | %ptxas-verify %}
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=+ptx43 | FileCheck %s --check-prefix PTX43
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=+ptx50 | FileCheck %s --check-prefix PTX50
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=+ptx43 | %ptxas-verify %}
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=+ptx50 | %ptxas-verify %}
-; CHECK: .weak .global .align 4 .u32 g
+; PTX43: .weak .global .align 4 .u32 g
+; PTX50: .common .global .align 4 .u32 g
@g = common addrspace(1) global i32 zeroinitializer
define i32 @func0() {
diff --git a/llvm/test/CodeGen/PowerPC/aix-codemodel-attr.ll b/llvm/test/CodeGen/PowerPC/aix-codemodel-attr.ll
new file mode 100644
index 0000000..ef1156e
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-codemodel-attr.ll
@@ -0,0 +1,166 @@
+; RUN: llc --verify-machineinstrs -mtriple powerpc-ibm-aix --code-model=small < \
+; RUN: %s | FileCheck --check-prefixes=CHECK,CHECK32,CHECK-SMALL,CHECK-SMALL32 %s
+
+; RUN: llc --verify-machineinstrs -mtriple powerpc-ibm-aix --code-model=large < \
+; RUN: %s | FileCheck --check-prefixes=CHECK,CHECK32,CHECK-LARGE,CHECK-LARGE32 %s
+
+; RUN: llc --verify-machineinstrs -mtriple powerpc64-ibm-aix --code-model=small < \
+; RUN: %s | FileCheck --check-prefixes=CHECK,CHECK64,CHECK-SMALL,CHECK-SMALL64 %s
+
+; RUN: llc --verify-machineinstrs -mtriple powerpc64-ibm-aix --code-model=large < \
+; RUN: %s | FileCheck --check-prefixes=CHECK,CHECK64,CHECK-LARGE,CHECK-LARGE64 %s
+
+@a = external dso_local global i32, code_model "small", align 4
+@b = external dso_local global i32, code_model "large", align 4
+@c = dso_local global i32 55, code_model "small", align 4
+@d = dso_local global i32 41, code_model "large", align 4
+@e = external dso_local global i32, align 4
+@f = dso_local global i32 2748, align 4
+
+@large_aliasee = global i32 10, code_model "large", align 4
+@small_aliasee = global i32 171, code_model "small", align 4
+@normal_aliasee = global i32 2748, align 4
+
+@al = alias i32, ptr @large_aliasee
+@as = alias i32, ptr @small_aliasee
+@an = alias i32, ptr @normal_aliasee
+
+define i32 @A() local_unnamed_addr {
+entry:
+ %0 = load i32, ptr @a, align 4
+ ret i32 %0
+}
+; CHECK32: lwz [[SCRATCH:[0-9]+]], L..C[[TL_A:[0-9]+]](2) # @a
+; CHECK64: ld [[SCRATCH:[0-9]+]], L..C[[TL_A:[0-9]+]](2) # @a
+; CHECK: lwz 3, 0([[SCRATCH]])
+; CHECK: blr
+
+define i32 @B() local_unnamed_addr {
+entry:
+ %0 = load i32, ptr @b, align 4
+ ret i32 %0
+}
+; CHECK: addis [[HI:[0-9]+]], L..C[[TL_B:[0-9]+]]@u(2)
+; CHECK32: lwz [[ADDR:[0-9]+]], L..C[[TL_B]]@l([[HI]])
+; CHECK64: ld [[ADDR:[0-9]+]], L..C[[TL_B]]@l([[HI]])
+; CHECK: lwz 3, 0([[ADDR]])
+; CHECK: blr
+
+define i32 @C() local_unnamed_addr {
+entry:
+ %0 = load i32, ptr @c, align 4
+ ret i32 %0
+}
+; CHECK32: lwz [[SCRATCH:[0-9]+]], L..C[[TL_C:[0-9]+]](2) # @c
+; CHECK64: ld [[SCRATCH:[0-9]+]], L..C[[TL_C:[0-9]+]](2) # @c
+; CHECK: lwz 3, 0([[SCRATCH]])
+; CHECK: blr
+
+define i32 @D() local_unnamed_addr {
+entry:
+ %0 = load i32, ptr @d, align 4
+ ret i32 %0
+}
+; CHECK: addis [[HI:[0-9]+]], L..C[[TL_D:[0-9]+]]@u(2)
+; CHECK32: lwz [[ADDR:[0-9]+]], L..C[[TL_D]]@l([[HI]])
+; CHECK64: ld [[ADDR:[0-9]+]], L..C[[TL_D]]@l([[HI]])
+; CHECK: lwz 3, 0([[ADDR]])
+; CHECK: blr
+
+define i32 @E() {
+entry:
+ %0 = load i32, ptr @e, align 4
+ ret i32 %0
+}
+; CHECK-LARGE: addis [[HI:[0-9]+]], L..C[[TL_E:[0-9]+]]@u(2)
+; CHECK-LARGE32: lwz [[SCRATCH:[0-9]+]], L..C[[TL_E]]@l([[HI]])
+; CHECK-SMALL32: lwz [[SCRATCH:[0-9]+]], L..C[[TL_E:[0-9]+]](2)
+; CHECK-LARGE64: ld [[SCRATCH:[0-9]+]], L..C[[TL_E]]@l([[HI]])
+; CHECK-SMALL64: ld [[SCRATCH:[0-9]+]], L..C[[TL_E:[0-9]+]](2)
+; CHECK: lwz 3, 0([[SCRATCH]])
+; CHECK: blr
+
+define i32 @F() {
+entry:
+ %0 = load i32, ptr @f, align 4
+ ret i32 %0
+}
+; CHECK-LARGE: addis [[HI:[0-9]+]], L..C[[TL_F:[0-9]+]]@u(2)
+; CHECK-LARGE32: lwz [[SCRATCH:[0-9]+]], L..C[[TL_F]]@l([[HI]])
+; CHECK-SMALL32: lwz [[SCRATCH:[0-9]+]], L..C[[TL_F:[0-9]+]](2)
+; CHECK-LARGE64: ld [[SCRATCH:[0-9]+]], L..C[[TL_F]]@l([[HI]])
+; CHECK-SMALL64: ld [[SCRATCH:[0-9]+]], L..C[[TL_F:[0-9]+]](2)
+; CHECK: lwz 3, 0([[SCRATCH]])
+; CHECK: blr
+
+define noundef nonnull ptr @addr_a() local_unnamed_addr {
+entry:
+ ret ptr @a
+}
+; CHECK32: lwz 3, L..C[[TL_A]](2) # @a
+; CHECK64: ld 3, L..C[[TL_A]](2) # @a
+; CHECK: blr
+
+define noundef nonnull ptr @addr_b() local_unnamed_addr {
+entry:
+ ret ptr @b
+}
+; CHECK: addis [[HI:[0-9]+]], L..C[[TL_B]]@u(2)
+; CHECK32: lwz 3, L..C[[TL_B]]@l([[HI]])
+; CHECK64: ld 3, L..C[[TL_B]]@l([[HI]])
+; CHECK: blr
+
+
+define noundef nonnull ptr @addr_c() local_unnamed_addr {
+entry:
+ ret ptr @c
+}
+; CHECK32: lwz 3, L..C[[TL_C]](2) # @c
+; CHECK64: ld 3, L..C[[TL_C]](2) # @c
+; CHECK: blr
+
+define noundef nonnull ptr @addr_d() local_unnamed_addr {
+entry:
+ ret ptr @d
+}
+; CHECK: addis [[HI:[0-9]+]], L..C[[TL_D]]@u(2)
+; CHECK32: lwz 3, L..C[[TL_D]]@l([[HI]])
+; CHECK64: ld 3, L..C[[TL_D]]@l([[HI]])
+; CHECK: blr
+
+define i32 @G() {
+ %tmp = load i32, ptr @al
+ ret i32 %tmp
+}
+; CHECK: addis [[HI:[0-9]+]], L..C[[TL_AL:[0-9]+]]@u(2)
+; CHECK32: lwz [[ADDR:[0-9]+]], L..C[[TL_AL]]@l([[HI]])
+; CHECK64: ld [[ADDR:[0-9]+]], L..C[[TL_AL]]@l([[HI]])
+; CHECK: lwz 3, 0([[ADDR]])
+
+define i32 @H() {
+ %tmp = load i32, ptr @as
+ ret i32 %tmp
+}
+; CHECK32: lwz [[ADDR:[0-9]+]], L..C[[TL_AS:[0-9]+]](2)
+; CHECK64: ld [[ADDR:[0-9]+]], L..C[[TL_AS:[0-9]+]](2)
+; CHECK: lwz 3, 0([[ADDR]])
+
+;; Check TOC entires have correct storage mapping class
+; CHECK: L..C[[TL_A]]:
+; CHECK: .tc a[TC],a[UA]
+; CHECK: L..C[[TL_B]]:
+; CHECK: .tc b[TE],b[UA]
+; CHECK: L..C[[TL_C]]:
+; CHECK: .tc c[TC],c[RW]
+; CHECK: L..C[[TL_D]]:
+; CHECK: .tc d[TE],d[RW]
+; CHECK: L..C[[TL_E]]:
+; CHECK-SMALL: .tc e[TC],e[UA]
+; CHECK-LARGE: .tc e[TE],e[UA]
+; CHECK: L..C[[TL_F]]:
+; CHECK-SMALL: .tc f[TC],f[RW]
+; CHECK-LARGE: .tc f[TE],f[RW]
+; CHECK: L..C[[TL_AL]]:
+; CHECK: .tc al[TE],al
+; CHECK: L..C[[TL_AS]]:
+; CHECK: .tc as[TC],as
diff --git a/llvm/test/CodeGen/PowerPC/aix-overflow-toc-data.py b/llvm/test/CodeGen/PowerPC/aix-overflow-toc-data.py
new file mode 100644
index 0000000..276c6da
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-overflow-toc-data.py
@@ -0,0 +1,59 @@
+# UNSUPPORTED: expensive_checks, debug
+
+# RUN: %python %s > %t.ll
+# RUN: llc -mtriple powerpc-ibm-aix-xcoff -code-model=small -mcpu=pwr7 -mattr=-altivec -O0 < %t.ll | \
+# RUN: FileCheck --check-prefix=ASM32 %s
+
+# RUN: llc -mtriple powerpc64-ibm-aix-xcoff -code-model=small -mcpu=pwr7 -mattr=-altivec -O0 < %t.ll | \
+# RUN: FileCheck --check-prefix=ASM64 %s
+
+# RUN: llc -mtriple powerpc-ibm-aix-xcoff -code-model=small -mcpu=pwr7 -mattr=-altivec -O0 \
+# RUN: -filetype=obj -o %t.o < %t.ll
+# RUN: llvm-objdump --no-print-imm-hex -D -r --symbol-description %t.o | FileCheck -D#NFA=2 --check-prefix=DIS32 %s
+
+# RUN: llc -mtriple powerpc64-ibm-aix-xcoff -code-model=small -mcpu=pwr7 -mattr=-altivec -O0 \
+# RUN: -filetype=obj -o %t.o < %t.ll
+# RUN: llvm-objdump --no-print-imm-hex -D -r --symbol-description %t.o | FileCheck -D#NFA=2 --check-prefix=DIS64 %s
+
+numentries = 8195
+for x in range(0, numentries):
+ print("@a%d = global i32 0, align 4 #0" % (x))
+
+print("define void @foo() {")
+print("entry:")
+for x in range(0, numentries):
+ print("store i32 1, i32* @a%d, align 4" % (x))
+print("ret void")
+print("}")
+
+print('attributes #0 = { "toc-data" }')
+
+# 32-bit assembly check
+# ASM32: la 4, a0[TD](2)
+# ASM32: la 4, a1[TD](2)
+
+# ASM32: la 4, a8191[TD](2)
+# ASM32: la 4, a8192[TD](2)
+# ASM32: la 4, a8193[TD](2)
+
+# 64-bit assembly check
+# ASM64: la 4, a0[TD](2)
+# ASM64: la 4, a1[TD](2)
+
+# ASM64: la 4, a8191[TD](2)
+# ASM64: la 4, a8192[TD](2)
+# ASM64: la 4, a8193[TD](2)
+
+# DIS32: fffc: 38 82 7f fc addi 4, 2, 32764
+# DIS32: 0000fffe: R_TOC (idx: [[#NFA+16391]]) a8191[TD]
+# DIS32: 10004: 38 82 80 00 addi 4, 2, -32768
+# DIS32: 00010006: R_TOC (idx: [[#NFA+16393]]) a8192[TD]
+# DIS32: 1000c: 38 82 80 04 addi 4, 2, -32764
+# DIS32: 0001000e: R_TOC (idx: [[#NFA+16395]]) a8193[TD]
+
+# DIS64: fffc: 38 82 7f fc addi 4, 2, 32764
+# DIS64: 0000fffe: R_TOC (idx: [[#NFA+16391]]) a8191[TD]
+# DIS64: 10004: 38 82 80 00 addi 4, 2, -32768
+# DIS64: 00010006: R_TOC (idx: [[#NFA+16393]]) a8192[TD]
+# DIS64: 1000c: 38 82 80 04 addi 4, 2, -32764
+# DIS64: 0001000e: R_TOC (idx: [[#NFA+16395]]) a8193[TD]
diff --git a/llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-largeaccess.ll b/llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-largeaccess.ll
new file mode 100644
index 0000000..eb16bae
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-largeaccess.ll
@@ -0,0 +1,632 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff < %s \
+; RUN: | FileCheck %s --check-prefix=SMALL-LOCAL-DYNAMIC-SMALLCM64
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large \
+; RUN: < %s | FileCheck %s \
+; RUN: --check-prefix=SMALL-LOCAL-DYNAMIC-LARGECM64
+
+; Test disassembly of object.
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff -xcoff-traceback-table=false \
+; RUN: --code-model=large -filetype=obj -o %t.o < %s
+; RUN: llvm-objdump -D -r --symbol-description %t.o | FileCheck -D#NFA=2 --check-prefix=DIS %s
+
+@ElementIntTLSv1 = thread_local(localdynamic) global [8187 x i32] zeroinitializer, align 4 ; Within 32K
+@ElementIntTLS2 = thread_local(localdynamic) global [4000 x i32] zeroinitializer, align 4
+@ElementIntTLS3 = thread_local(localdynamic) global [4000 x i32] zeroinitializer, align 4
+@ElementIntTLS4 = thread_local(localdynamic) global [4000 x i32] zeroinitializer, align 4
+@ElementIntTLS5 = thread_local(localdynamic) global [4000 x i32] zeroinitializer, align 4
+@ElementIntTLSv2 = thread_local(localdynamic) global [9000 x i32] zeroinitializer, align 4 ; Beyond 32K
+
+@ElementLongTLS6 = external thread_local(localdynamic) global [60 x i64], align 8
+@ElementLongTLS2 = thread_local(localdynamic) global [3000 x i64] zeroinitializer, align 8 ; Within 32K
+@MyTLSGDVar = thread_local global [800 x i64] zeroinitializer, align 8
+@ElementLongTLS3 = thread_local(localdynamic) global [3000 x i64] zeroinitializer, align 8
+@ElementLongTLS4 = thread_local(localdynamic) global [3000 x i64] zeroinitializer, align 8
+@ElementLongTLS5 = thread_local(localdynamic) global [3000 x i64] zeroinitializer, align 8
+@ElementLongTLS = thread_local(localdynamic) local_unnamed_addr global [7800 x i64] zeroinitializer, align 8 ; Beyond 32K
+
+declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull) #1
+
+; All accesses use a "faster" local-dynamic sequence directly off the module handle.
+; Exercise PPCXCOFFObjectWriter::getRelocTypeAndSignSize/fixup_ppc_half16.
+define signext i32 @test1() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: test1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r6, L..C1(r2) # target-flags(ppc-tlsld) @ElementIntTLS2
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r7, L..C2(r2) # target-flags(ppc-tlsld) @ElementIntTLS3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r8, L..C3(r2) # target-flags(ppc-tlsld) @ElementIntTLS4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r9, L..C4(r2) # target-flags(ppc-tlsld) @ElementIntTLS5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r5, L..C5(r2) # target-flags(ppc-tlsld) @ElementIntTLSv1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r6, r3, r6
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r7, r3, r7
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r8, r3, r8
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r9, r3, r9
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stwux r4, r3, r5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r4, 24(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 2
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r3, 320(r6)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r3, 324(r7)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 88
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r4, 328(r8)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r3, 332(r9)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 102
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: test1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r7, L..C2@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r8, L..C3@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r9, L..C4@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r7, L..C2@l(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r8, L..C3@l(r8)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r9, L..C4@l(r9)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r5, L..C1@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C5@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 1
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r6, L..C5@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r7, r3, r7
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r8, r3, r8
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r9, r3, r9
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r6, r3, r6
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stwux r4, r3, r5
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r4, 24(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 2
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r3, 320(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r3, 324(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 88
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r4, 328(r8)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r3, 332(r9)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 102
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+entry:
+ %tls1 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLSv1)
+ store i32 1, ptr %tls1, align 4
+ %arrayidx1 = getelementptr inbounds [8187 x i32], ptr %tls1, i64 0, i64 6
+ store i32 4, ptr %arrayidx1, align 4
+ %tls2 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS2)
+ %arrayidx2 = getelementptr inbounds [4000 x i32], ptr %tls2, i64 0, i64 80
+ store i32 2, ptr %arrayidx2, align 4
+ %tls3 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS3)
+ %arrayidx3 = getelementptr inbounds [4000 x i32], ptr %tls3, i64 0, i64 81
+ store i32 3, ptr %arrayidx3, align 4
+ %tls4 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS4)
+ %arrayidx4 = getelementptr inbounds [4000 x i32], ptr %tls4, i64 0, i64 82
+ store i32 4, ptr %arrayidx4, align 4
+ %tls5 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS5)
+ %arrayidx5 = getelementptr inbounds [4000 x i32], ptr %tls5, i64 0, i64 83
+ store i32 88, ptr %arrayidx5, align 4
+ %load1 = load i32, ptr %tls1, align 4
+ %load2 = load i32, ptr %arrayidx1, align 4
+ %load3 = load i32, ptr %arrayidx2, align 4
+ %load4 = load i32, ptr %arrayidx3, align 4
+ %load5 = load i32, ptr %arrayidx4, align 4
+ %add = add i32 %load1, 88
+ %add6 = add i32 %add, %load2
+ %add8 = add i32 %add6, %load3
+ %add10 = add i32 %add8, %load4
+ %add12 = add i32 %add10, %load5
+ ret i32 %add12
+}
+
+; All accesses use a "faster" local-dynamic sequence directly off the module handle.
+; Exercise PPCXCOFFObjectWriter::getRelocTypeAndSignSize/fixup_ppc_half16ds.
+define i64 @test2() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: test2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C6(r2) # target-flags(ppc-tlsld) @ElementLongTLS6
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mr r6, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 212
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r4, r6, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r3, 424(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C7(r2) # target-flags(ppc-tlsld) @ElementLongTLS2
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 203
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r4, 1200(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C8(r2) # target-flags(ppc-tlsgdm) @MyTLSGDVar
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C9(r2) # target-flags(ppc-tlsgd) @MyTLSGDVar
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_addr[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 44
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r4, 440(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C10(r2) # target-flags(ppc-tlsld) @ElementLongTLS3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 6
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r4, 2000(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C11(r2) # target-flags(ppc-tlsld) @ElementLongTLS4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 100
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r4, r6, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r3, 6800(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C12(r2) # target-flags(ppc-tlsld) @ElementLongTLS5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 882
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r4, 8400(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 1191
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: test2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r7, L..C6@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 212
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mr r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C6@l(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r4, 424(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C7@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 203
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C7@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r4, 1200(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C8@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r4, L..C9@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C8@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C9@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_addr[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 44
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r4, 440(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C10@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 6
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C10@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r4, 2000(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C11@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 100
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C11@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r4, 6800(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C12@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 882
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C12@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r4, 8400(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 1191
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+entry:
+ %tls1 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @ElementLongTLS6)
+ %arrayidx = getelementptr inbounds [60 x i64], ptr %tls1, i64 0, i64 53
+ store i64 212, ptr %arrayidx, align 8
+ %tls2 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @ElementLongTLS2)
+ %arrayidx1 = getelementptr inbounds [3000 x i64], ptr %tls2, i64 0, i64 150
+ store i64 203, ptr %arrayidx1, align 8
+ %tls3 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @MyTLSGDVar)
+ %arrayidx2 = getelementptr inbounds [800 x i64], ptr %tls3, i64 0, i64 55
+ store i64 44, ptr %arrayidx2, align 8
+ %tls4 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @ElementLongTLS3)
+ %arrayidx3 = getelementptr inbounds [3000 x i64], ptr %tls4, i64 0, i64 250
+ store i64 6, ptr %arrayidx3, align 8
+ %tls5 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @ElementLongTLS4)
+ %arrayidx4 = getelementptr inbounds [3000 x i64], ptr %tls5, i64 0, i64 850
+ store i64 100, ptr %arrayidx4, align 8
+ %tls6 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @ElementLongTLS5)
+ %arrayidx5 = getelementptr inbounds [3000 x i64], ptr %tls6, i64 0, i64 1050
+ store i64 882, ptr %arrayidx5, align 8
+ %load1 = load i64, ptr %arrayidx1, align 8
+ %load2 = load i64, ptr %arrayidx3, align 8
+ %load3 = load i64, ptr %arrayidx4, align 8
+ %add = add i64 %load1, 882
+ %add9 = add i64 %add, %load2
+ %add11 = add i64 %add9, %load3
+ ret i64 %add11
+}
+
+; Example of one access using the regular local-dynamic access from the TOC.
+define signext i32 @test3() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: test3:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r6, L..C1(r2) # target-flags(ppc-tlsld) @ElementIntTLS2
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r7, L..C2(r2) # target-flags(ppc-tlsld) @ElementIntTLS3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r8, L..C3(r2) # target-flags(ppc-tlsld) @ElementIntTLS4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r9, L..C4(r2) # target-flags(ppc-tlsld) @ElementIntTLS5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r5, L..C13(r2) # target-flags(ppc-tlsld) @ElementIntTLSv2
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r6, r3, r6
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r7, r3, r7
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r8, r3, r8
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r9, r3, r9
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stwux r4, r3, r5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r4, 24(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 2
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r3, 320(r6)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r3, 324(r7)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 88
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r4, 328(r8)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r3, 332(r9)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 102
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: test3:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C13@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r7, L..C2@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r8, L..C3@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r9, L..C4@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r7, L..C2@l(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r8, L..C3@l(r8)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r9, L..C4@l(r9)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r5, L..C13@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C5@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 1
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r6, L..C5@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r7, r3, r7
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r8, r3, r8
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r9, r3, r9
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r6, r3, r6
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stwux r4, r3, r5
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r4, 24(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 2
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r3, 320(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r3, 324(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 88
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r4, 328(r8)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r3, 332(r9)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 102
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+entry:
+ %tls1 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLSv2)
+ store i32 1, ptr %tls1, align 4
+ %arrayidx1 = getelementptr inbounds [9000 x i32], ptr %tls1, i64 0, i64 6
+ store i32 4, ptr %arrayidx1, align 4
+ %tls2 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS2)
+ %arrayidx2 = getelementptr inbounds [4000 x i32], ptr %tls2, i64 0, i64 80
+ store i32 2, ptr %arrayidx2, align 4
+ %tls3 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS3)
+ %arrayidx3 = getelementptr inbounds [4000 x i32], ptr %tls3, i64 0, i64 81
+ store i32 3, ptr %arrayidx3, align 4
+ %tls4 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS4)
+ %arrayidx4 = getelementptr inbounds [4000 x i32], ptr %tls4, i64 0, i64 82
+ store i32 4, ptr %arrayidx4, align 4
+ %tls5 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS5)
+ %arrayidx5 = getelementptr inbounds [4000 x i32], ptr %tls5, i64 0, i64 83
+ store i32 88, ptr %arrayidx5, align 4
+ %load1 = load i32, ptr %tls1, align 4
+ %load2 = load i32, ptr %arrayidx1, align 4
+ %load3 = load i32, ptr %arrayidx2, align 4
+ %load4 = load i32, ptr %arrayidx3, align 4
+ %load5 = load i32, ptr %arrayidx4, align 4
+ %add = add i32 %load1, 88
+ %add9 = add i32 %add, %load2
+ %add11 = add i32 %add9, %load3
+ %add13 = add i32 %add11, %load4
+ %add15 = add i32 %add13, %load5
+ ret i32 %add15
+}
+
+; DIS: file format aix5coff64-rs6000
+; DIS: Disassembly of section .text:
+; DIS: 0000000000000000 (idx: [[#NFA+9]]) .test1:
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mflr 0
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stdu 1, -48(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 6, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+25]]) ElementIntTLSv1[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 0, 64(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 7, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+27]]) ElementIntTLS3[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 0(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 8, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+29]]) ElementIntTLS4[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 9, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+31]]) ElementIntTLS5[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 7, 16(7)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+27]]) ElementIntTLS3[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 8, 24(8)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+29]]) ElementIntTLS4[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 9, 32(9)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+31]]) ElementIntTLS5[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} bla 0x0
+; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+1]]) .__tls_get_mod[PR]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 5, 8(6)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+25]]) ElementIntTLSv1[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 6, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+33]]) ElementIntTLS2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 1
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 6, 40(6)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+33]]) ElementIntTLS2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 7, 3, 7
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 8, 3, 8
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 9, 3, 9
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 6, 3, 6
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stwux 4, 3, 5
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 4
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 4, 24(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 2
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 3, 320(6)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 3, 324(7)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 88
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 4, 328(8)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 3, 332(9)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 102
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addi 1, 1, 48
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 0, 16(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mtlr 0
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} blr
+
+; DIS: 0000000000000090 (idx: [[#NFA+11]]) .test2:
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mflr 0
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stdu 1, -48(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 0, 64(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 7, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+35]]) ElementLongTLS6[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 0(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} bla 0x0
+; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+1]]) .__tls_get_mod[PR]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 212
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mr 6, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 48(7)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+35]]) ElementLongTLS6[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 3, 6, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 4, 424(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+37]]) ElementLongTLS2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 203
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 56(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+37]]) ElementLongTLS2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 3, 6, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 4, 1200(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+39]]) .MyTLSGDVar[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 4, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+41]]) MyTLSGDVar[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 64(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+39]]) .MyTLSGDVar[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 4, 72(4)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+41]]) MyTLSGDVar[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} bla 0x0
+; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+3]]) .__tls_get_addr[PR]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 44
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 4, 440(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+43]]) ElementLongTLS3[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 6
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 80(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+43]]) ElementLongTLS3[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 3, 6, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 4, 2000(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+45]]) ElementLongTLS4[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 100
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 88(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+45]]) ElementLongTLS4[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 3, 6, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 4, 6800(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+47]]) ElementLongTLS5[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 882
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 96(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+47]]) ElementLongTLS5[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 3, 6, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 4, 8400(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 1191
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addi 1, 1, 48
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 0, 16(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mtlr 0
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} blr
+
+; DIS: 0000000000000140 (idx: [[#NFA+13]]) .test3:
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mflr 0
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stdu 1, -48(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 6, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+49]]) ElementIntTLSv2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 0, 64(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 7, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+27]]) ElementIntTLS3[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 0(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 8, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+29]]) ElementIntTLS4[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 9, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+31]]) ElementIntTLS5[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 7, 16(7)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+27]]) ElementIntTLS3[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 8, 24(8)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+29]]) ElementIntTLS4[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 9, 32(9)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+31]]) ElementIntTLS5[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} bla 0x0
+; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+1]]) .__tls_get_mod[PR]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 5, 104(6)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+49]]) ElementIntTLSv2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 6, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+33]]) ElementIntTLS2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 1
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 6, 40(6)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+33]]) ElementIntTLS2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 7, 3, 7
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 8, 3, 8
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 9, 3, 9
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 6, 3, 6
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stwux 4, 3, 5
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 4
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 4, 24(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 2
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 3, 320(6)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 3, 324(7)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 88
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 4, 328(8)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 3, 332(9)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 102
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addi 1, 1, 48
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 0, 16(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mtlr 0
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} blr
+
+; DIS: Disassembly of section .data:
+
+; DIS: 00000000000001d0 (idx: 17) test1[DS]:
+; DIS-NEXT: 1d0: 00 00 00 00
+; DIS-NEXT: 00000000000001d0: R_POS (idx: [[#NFA+9]]) .test1
+; DIS-NEXT: 1d4: 00 00 00 00
+; DIS-NEXT: 1d8: 00 00 00 00
+; DIS-NEXT: 00000000000001d8: R_POS (idx: [[#NFA+21]]) TOC[TC0]
+; DIS-NEXT: 1dc: 00 00 02 18
+
+; DIS: 00000000000001e8 (idx: 19) test2[DS]:
+; DIS-NEXT: 1e8: 00 00 00 00
+; DIS-NEXT: 00000000000001e8: R_POS (idx: [[#NFA+11]]) .test2
+; DIS-NEXT: 1ec: 00 00 00 90
+; DIS-NEXT: 1f0: 00 00 00 00
+; DIS-NEXT: 00000000000001f0: R_POS (idx: [[#NFA+21]]) TOC[TC0]
+; DIS-NEXT: 1f4: 00 00 02 18
+
+; DIS: 0000000000000200 (idx: 21) test3[DS]:
+; DIS-NEXT: 200: 00 00 00 00
+; DIS-NEXT: 0000000000000200: R_POS (idx: [[#NFA+13]]) .test3
+; DIS-NEXT: 204: 00 00 01 40
+; DIS-NEXT: 208: 00 00 00 00
+; DIS-NEXT: 0000000000000208: R_POS (idx: [[#NFA+21]]) TOC[TC0]
+; DIS-NEXT: 20c: 00 00 02 18
+
+; DIS: 0000000000000218 (idx: 25) _$TLSML[TC]:
+; DIS-NEXT: 218: 00 00 00 00
+; DIS-NEXT: 0000000000000218: R_TLSML (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: 21c: 00 00 00 00
+
+; DIS: 0000000000000220 (idx: 27) ElementIntTLSv1[TE]:
+; DIS-NEXT: 220: 00 00 00 00
+; DIS-NEXT: 0000000000000220: R_TLS_LD (idx: [[#NFA+51]]) ElementIntTLSv1[TL]
+; DIS-NEXT: 224: 00 00 00 00
+
+; DIS: 0000000000000228 (idx: 29) ElementIntTLS3[TE]:
+; DIS-NEXT: 228: 00 00 00 00
+; DIS-NEXT: 0000000000000228: R_TLS_LD (idx: [[#NFA+55]]) ElementIntTLS3[TL]
+; DIS-NEXT: 22c: 00 00 be 6c
+
+; DIS: 0000000000000230 (idx: 31) ElementIntTLS4[TE]:
+; DIS-NEXT: 230: 00 00 00 00
+; DIS-NEXT: 0000000000000230: R_TLS_LD (idx: [[#NFA+57]]) ElementIntTLS4[TL]
+; DIS-NEXT: 234: 00 00 fc ec
+
+; DIS: 0000000000000238 (idx: 33) ElementIntTLS5[TE]:
+; DIS-NEXT: 238: 00 00 00 00
+; DIS-NEXT: 0000000000000238: R_TLS_LD (idx: [[#NFA+59]]) ElementIntTLS5[TL]
+; DIS-NEXT: 23c: 00 01 3b 6c
+
+; DIS: 0000000000000240 (idx: 35) ElementIntTLS2[TE]:
+; DIS-NEXT: 240: 00 00 00 00
+; DIS-NEXT: 0000000000000240: R_TLS_LD (idx: [[#NFA+53]]) ElementIntTLS2[TL]
+; DIS-NEXT: 244: 00 00 7f ec
+
+; DIS: 0000000000000248 (idx: 37) ElementLongTLS6[TE]:
+; DIS-NEXT: 248: 00 00 00 00
+; DIS-NEXT: 0000000000000248: R_TLS_LD (idx: [[#NFA+5]]) ElementLongTLS6[UL]
+; DIS-NEXT: 24c: 00 00 00 00
+
+; DIS: 0000000000000250 (idx: 39) ElementLongTLS2[TE]:
+; DIS-NEXT: 250: 00 00 00 00
+; DIS-NEXT: 0000000000000250: R_TLS_LD (idx: [[#NFA+63]]) ElementLongTLS2[TL]
+; DIS-NEXT: 254: 00 02 06 90
+
+; DIS: 0000000000000258 (idx: 41) .MyTLSGDVar[TE]:
+; DIS-NEXT: 258: 00 00 00 00
+; DIS-NEXT: 0000000000000258: R_TLSM (idx: [[#NFA+65]]) MyTLSGDVar[TL]
+; DIS-NEXT: 25c: 00 00 00 00
+
+; DIS: 0000000000000260 (idx: 43) MyTLSGDVar[TE]:
+; DIS-NEXT: 260: 00 00 00 00
+; DIS-NEXT: 0000000000000260: R_TLS (idx: [[#NFA+65]]) MyTLSGDVar[TL]
+; DIS-NEXT: 264: 00 02 64 50
+
+; DIS: 0000000000000268 (idx: 45) ElementLongTLS3[TE]:
+; DIS-NEXT: 268: 00 00 00 00
+; DIS-NEXT: 0000000000000268: R_TLS_LD (idx: [[#NFA+67]]) ElementLongTLS3[TL]
+; DIS-NEXT: 26c: 00 02 7d 50
+
+; DIS: 0000000000000270 (idx: 47) ElementLongTLS4[TE]:
+; DIS-NEXT: 270: 00 00 00 00
+; DIS-NEXT: 0000000000000270: R_TLS_LD (idx: [[#NFA+69]]) ElementLongTLS4[TL]
+; DIS-NEXT: 274: 00 02 db 10
+
+; DIS: 0000000000000278 (idx: 49) ElementLongTLS5[TE]:
+; DIS-NEXT: 278: 00 00 00 00
+; DIS-NEXT: 0000000000000278: R_TLS_LD (idx: [[#NFA+71]]) ElementLongTLS5[TL]
+; DIS-NEXT: 27c: 00 03 38 d0
+
+; DIS: 0000000000000280 (idx: 51) ElementIntTLSv2[TE]:
+; DIS-NEXT: 280: 00 00 00 00
+; DIS-NEXT: 0000000000000280: R_TLS_LD (idx: [[#NFA+61]]) ElementIntTLSv2[TL]
+; DIS-NEXT: 284: 00 01 79 ec
+
+; DIS: Disassembly of section .tdata:
+; DIS: 0000000000000000 (idx: [[#NFA+51]]) ElementIntTLSv1[TL]:
+; DIS: 0000000000007fec (idx: [[#NFA+53]]) ElementIntTLS2[TL]:
+; DIS: 000000000000be6c (idx: [[#NFA+55]]) ElementIntTLS3[TL]:
+; DIS: 000000000000fcec (idx: [[#NFA+57]]) ElementIntTLS4[TL]:
+; DIS: 0000000000013b6c (idx: [[#NFA+59]]) ElementIntTLS5[TL]:
+; DIS: 00000000000179ec (idx: [[#NFA+61]]) ElementIntTLSv2[TL]:
+; DIS: 0000000000020690 (idx: [[#NFA+63]]) ElementLongTLS2[TL]:
+; DIS: 0000000000026450 (idx: [[#NFA+65]]) MyTLSGDVar[TL]:
+; DIS: 0000000000027d50 (idx: [[#NFA+67]]) ElementLongTLS3[TL]:
+; DIS: 000000000002db10 (idx: [[#NFA+69]]) ElementLongTLS4[TL]:
+; DIS: 00000000000338d0 (idx: [[#NFA+71]]) ElementLongTLS5[TL]:
+; DIS: 0000000000039690 (idx: [[#NFA+73]]) ElementLongTLS[TL]:
diff --git a/llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-types.ll b/llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-types.ll
new file mode 100644
index 0000000..d996d86
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-types.ll
@@ -0,0 +1,1066 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff < %s \
+; RUN: | FileCheck %s --check-prefix=SMALL-LOCAL-DYNAMIC-SMALLCM64
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large \
+; RUN: < %s | FileCheck %s \
+; RUN: --check-prefix=SMALL-LOCAL-DYNAMIC-LARGECM64
+; RUN: llc -O0 -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff < %s \
+; RUN: | FileCheck %s --check-prefix=SMALL-LOCAL-DYNAMIC-SMALLCM64-O0
+; RUN: llc -O0 -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large \
+; RUN: < %s | FileCheck %s \
+; RUN: --check-prefix=SMALL-LOCAL-DYNAMIC-LARGECM64-O0
+
+declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull) #1
+@tlv_int_init = local_unnamed_addr global i32 87, align 4
+
+@tlv_char = thread_local(localdynamic) global i8 1, align 1
+@tlv_short = thread_local(localdynamic) global i8 1, align 2
+@tlv_int = thread_local(localdynamic) global i32 1, align 4
+@internal_tlv_int = internal thread_local(localdynamic) global i32 1, align 4
+@tlv_long = thread_local(localdynamic) global i64 1, align 8
+@internal_tlv_long = internal thread_local(localdynamic) global i64 1, align 8
+@tlv_float = thread_local(localdynamic) global float 1.000000e+00, align 4
+@internal_tlv_double = internal thread_local(localdynamic) global double 1.000000e+00, align 8
+
+%struct.anon = type { i32 }
+@ThreadLocalStruct = thread_local(localdynamic) global %struct.anon zeroinitializer, align 1
+@a = thread_local(localdynamic) global [87 x i32] zeroinitializer, align 4
+
+define nonnull ptr @AddrTest1() local_unnamed_addr {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: AddrTest1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C1(r2) # target-flags(ppc-tlsld) @a
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r3, r3, 12
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: AddrTest1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C1@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r3, r3, 12
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: AddrTest1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C1(r2) # target-flags(ppc-tlsld) @a
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r3, r3, 12
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: AddrTest1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C0@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r3, r3, 12
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @a)
+ %arrayidx = getelementptr inbounds [87 x i32], ptr %tlv_addr, i64 0, i64 3
+ ret ptr %arrayidx
+}
+
+define signext i32 @testUnaligned() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testUnaligned:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C2(r2) # target-flags(ppc-tlsld) @ThreadLocalStruct
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lwax r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testUnaligned:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C2@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C2@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lwax r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testUnaligned:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C2(r2) # target-flags(ppc-tlsld) @ThreadLocalStruct
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwa r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testUnaligned:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C2@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C2@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwa r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = call align 1 ptr @llvm.threadlocal.address.p0(ptr align 1 @ThreadLocalStruct)
+ %x = getelementptr inbounds %struct.anon, ptr %tlv_addr, i32 0, i32 0
+ %value = load i32, ptr %x, align 1
+ ret i32 %value
+}
+
+define void @testChar(i8 noundef signext %x) {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testChar:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mr r6, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C3(r2) # target-flags(ppc-tlsld) @tlv_char
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stbx r6, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testChar:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mr r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r7, L..C3@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C3@l(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stbx r6, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testChar:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: # kill: def $r3 killed $r3 killed $x3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stw r3, 60(r1) # 4-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mr r4, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwz r3, 60(r1) # 4-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r5, L..C3(r2) # target-flags(ppc-tlsld) @tlv_char
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r4, r4, r5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stb r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testChar:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: # kill: def $r3 killed $r3 killed $x3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stw r3, 60(r1) # 4-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C3@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 48(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r5, 48(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mr r4, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwz r3, 60(r1) # 4-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r5, L..C3@l(r5)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r4, r4, r5
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stb r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 1 ptr @llvm.threadlocal.address.p0(ptr align 1 @tlv_char)
+ store i8 %x, ptr %tlv_addr, align 1
+ ret void
+}
+
+define void @testShort(i16 noundef signext %x) {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testShort:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mr r6, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C4(r2) # target-flags(ppc-tlsld) @tlv_short
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: sthx r6, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testShort:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mr r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r7, L..C4@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C4@l(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: sthx r6, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testShort:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: # kill: def $r3 killed $r3 killed $x3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stw r3, 60(r1) # 4-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mr r4, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwz r3, 60(r1) # 4-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r5, L..C4(r2) # target-flags(ppc-tlsld) @tlv_short
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r4, r4, r5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: sth r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testShort:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: # kill: def $r3 killed $r3 killed $x3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stw r3, 60(r1) # 4-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C4@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 48(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r5, 48(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mr r4, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwz r3, 60(r1) # 4-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r5, L..C4@l(r5)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r4, r4, r5
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: sth r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 2 ptr @llvm.threadlocal.address.p0(ptr align 2 @tlv_short)
+ store i16 %x, ptr %tlv_addr, align 2
+ ret void
+}
+
+define signext i32 @testInt1() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testInt1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C5(r2) # target-flags(ppc-tlsld) @tlv_int
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lwax r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testInt1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C5@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C5@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lwax r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testInt1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C5(r2) # target-flags(ppc-tlsld) @tlv_int
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwa r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testInt1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C5@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C5@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwa r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @tlv_int)
+ %value = load i32, ptr %tlv_addr, align 4
+ ret i32 %value
+}
+
+define signext i32 @testInt2() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testInt2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C6(r2) # target-flags(ppc-tlsld) @internal_tlv_int
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lwzx r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C7(r2) # @tlv_int_init
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lwz r4, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r3, r4, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: extsw r3, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testInt2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C6@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C6@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lwzx r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r4, L..C7@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C7@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lwz r4, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r4, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: extsw r3, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testInt2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C6(r2) # target-flags(ppc-tlsld) @internal_tlv_int
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwz r4, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C7(r2) # @tlv_int_init
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwz r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: extsw r3, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testInt2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C6@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C6@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwz r4, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C7@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C7@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwz r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: extsw r3, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @internal_tlv_int)
+ %tlv_val = load i32, ptr %tlv_addr, align 4
+ %global_val = load i32, ptr @tlv_int_init, align 4
+ %sum = add nsw i32 %global_val, %tlv_val
+ ret i32 %sum
+}
+
+define signext i64 @testLong1() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testLong1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C8(r2) # target-flags(ppc-tlsld) @tlv_long
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ldx r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testLong1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C8@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C8@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ldx r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testLong1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C8(r2) # target-flags(ppc-tlsld) @tlv_long
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testLong1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C8@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C8@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @tlv_long)
+ %value = load i64, ptr %tlv_addr, align 4
+ ret i64 %value
+}
+
+define void @testLong2(i64 noundef signext %x) {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testLong2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C9(r2) # target-flags(ppc-tlsld) @internal_tlv_long
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ldx r5, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r5, r5, 9
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdx r5, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testLong2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C9@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C9@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ldx r5, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r5, r5, 9
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdx r5, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testLong2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C9(r2) # target-flags(ppc-tlsld) @internal_tlv_long
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r4, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r3, r3, 9
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testLong2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C9@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C9@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r4, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r3, r3, 9
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @internal_tlv_long)
+ %value = load i64, ptr %tlv_addr, align 8
+ %add = add nsw i64 %value, 9
+ store i64 %add, ptr %tlv_addr, align 8
+ ret void
+}
+
+define i32 @testLong3() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testLong3:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C8(r2) # target-flags(ppc-tlsld) @tlv_long
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ldx r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testLong3:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C8@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C8@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ldx r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testLong3:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C8(r2) # target-flags(ppc-tlsld) @tlv_long
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: # kill: def $r3 killed $r3 killed $x3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: clrldi r3, r3, 32
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testLong3:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C8@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C8@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: # kill: def $r3 killed $r3 killed $x3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: clrldi r3, r3, 32
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @tlv_long)
+ %value = load i64, ptr %tlv_addr, align 8
+ %conv = trunc i64 %value to i32
+ ret i32 %conv
+}
+
+define void @testFloat1(float noundef %x) {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testFloat1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: vspltisw v2, 1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: vspltisw v3, 8
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: xvcvsxwdp vs0, vs34
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C10(r2) # target-flags(ppc-tlsld) @tlv_float
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lfsx f1, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: fadds f0, f1, f0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: xvcvsxwdp vs1, vs35
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: fadds f0, f0, f1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stfsx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testFloat1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: vspltisw v2, 1
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C10@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: vspltisw v3, 8
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: xvcvsxwdp vs0, vs34
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C10@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lfsx f1, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: fadds f0, f1, f0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: xvcvsxwdp vs1, vs35
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: fadds f0, f0, f1
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stfsx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testFloat1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C10(r2) # target-flags(ppc-tlsld) @tlv_float
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lfs f0, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C11(r2) # %const.1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lfs f1, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: fadds f0, f0, f1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C12(r2) # %const.0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lfs f1, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: fadds f0, f0, f1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stfs f0, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testFloat1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C10@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C10@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lfs f0, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r4, L..C11@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C11@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lfs f1, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: fadds f0, f0, f1
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r4, L..C12@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C12@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lfs f1, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: fadds f0, f0, f1
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stfs f0, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @tlv_float)
+ %value = load float, ptr %tlv_addr, align 4
+ %inc = fadd float %value, 1.000000e+00
+ %add = fadd float %inc, 8.000000e+00
+ store float %add, ptr %tlv_addr, align 4
+ ret void
+}
+
+define i32 @testFloat2() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testFloat2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C10(r2) # target-flags(ppc-tlsld) @tlv_float
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lfsx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r3, r1, 60
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: xscvdpsxws f0, f0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stfiwx f0, 0, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lwz r3, 60(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testFloat2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C10@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C10@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lfsx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r3, r1, 60
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: xscvdpsxws f0, f0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stfiwx f0, 0, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lwz r3, 60(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testFloat2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C10(r2) # target-flags(ppc-tlsld) @tlv_float
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lfs f0, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: fctiwz f0, f0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stfd f0, 56(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwa r3, 60(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: clrldi r3, r3, 32
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testFloat2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C10@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 48(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 48(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C10@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lfs f0, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: fctiwz f0, f0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stfd f0, 56(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwa r3, 60(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: clrldi r3, r3, 32
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @tlv_float)
+ %value = load float, ptr %tlv_addr, align 4
+ %conv = fptosi float %value to i32
+ ret i32 %conv
+}
+
+define void @testDouble1(double noundef %x) {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testDouble1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C11(r2) # target-flags(ppc-tlsld) @internal_tlv_double
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stfdx f1, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testDouble1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C11@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C11@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stfdx f1, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testDouble1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C13(r2) # target-flags(ppc-tlsld) @internal_tlv_double
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stxsdx f1, 0, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testDouble1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C13@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C13@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stxsdx f1, 0, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @internal_tlv_double)
+ store double %x, ptr %tlv_addr, align 8
+ ret void
+}
+
+define i32 @testDouble2() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testDouble2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C11(r2) # target-flags(ppc-tlsld) @internal_tlv_double
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lfdx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r3, r1, 60
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: xscvdpsxws f0, f0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stfiwx f0, 0, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lwz r3, 60(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testDouble2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C11@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C11@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lfdx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r3, r1, 60
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: xscvdpsxws f0, f0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stfiwx f0, 0, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lwz r3, 60(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testDouble2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C13(r2) # target-flags(ppc-tlsld) @internal_tlv_double
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lfdx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: xscvdpsxws f0, f0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r3, r1, 52
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stfiwx f0, 0, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwz r3, 52(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: clrldi r3, r3, 32
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testDouble2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 96(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C13@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C13@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lfdx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: xscvdpsxws f0, f0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r3, r1, 68
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stfiwx f0, 0, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwz r3, 68(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: clrldi r3, r3, 32
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 80
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @internal_tlv_double)
+ %value = load double, ptr %tlv_addr, align 8
+ %conv = fptosi double %value to i32
+ ret i32 %conv
+}
diff --git a/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-funcattr.ll b/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-funcattr.ll
new file mode 100644
index 0000000..38b35dc
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-funcattr.ll
@@ -0,0 +1,105 @@
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff < %s \
+; RUN: | FileCheck %s --check-prefixes=COMMONCM,CHECK-SMALLCM64
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large \
+; RUN: < %s | FileCheck %s --check-prefixes=COMMONCM,CHECK-LARGECM64
+
+@mySmallTLS = thread_local(localexec) global [7800 x i64] zeroinitializer, align 8 #0
+@mySmallTLS2 = thread_local(localexec) global [3000 x i64] zeroinitializer, align 8 #0
+@mySmallTLS3 = thread_local(localexec) global [3000 x i64] zeroinitializer, align 8
+declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull)
+
+; All accesses use a "faster" local-exec sequence directly off the thread pointer,
+; except for mySmallTLS, as this variable is over the 32KB size limit.
+define i64 @StoreLargeAccess1() #1 {
+; COMMONCM-LABEL: StoreLargeAccess1:
+; COMMONCM-NEXT: # %bb.0: # %entry
+; CHECK-SMALLCM64: ld r3, L..C0(r2) # target-flags(ppc-tprel) @mySmallTLS
+; CHECK-SMALLCM64-NEXT: li r4, 0
+; CHECK-SMALLCM64-NEXT: li r5, 23
+; CHECK-LARGECM64: addis r3, L..C0@u(r2)
+; CHECK-LARGECM64-NEXT: li r4, 0
+; CHECK-LARGECM64-NEXT: li r5, 23
+; CHECK-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; COMMONCM: ori r4, r4, 53328
+; COMMONCM-NEXT: add r3, r13, r3
+; COMMONCM-NEXT: stdx r5, r3, r4
+; COMMONCM-NEXT: li r3, 55
+; COMMONCM-NEXT: li r4, 64
+; COMMONCM-NEXT: std r3, (mySmallTLS2[TL]@le+696)-65536(r13)
+; COMMONCM-NEXT: li r3, 142
+; COMMONCM-NEXT: std r4, (mySmallTLS3[TL]@le+20000)-131072(r13)
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS)
+ %arrayidx = getelementptr inbounds i8, ptr %tls0, i32 53328
+ store i64 23, ptr %arrayidx, align 8
+ %tls1 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS2)
+ %arrayidx1 = getelementptr inbounds i8, ptr %tls1, i32 696
+ store i64 55, ptr %arrayidx1, align 8
+ %tls2 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS3)
+ %arrayidx2 = getelementptr inbounds i8, ptr %tls2, i32 20000
+ store i64 64, ptr %arrayidx2, align 8
+ %load1 = load i64, ptr %arrayidx, align 8
+ %load2 = load i64, ptr %arrayidx1, align 8
+ %add1 = add i64 %load1, 64
+ %add2 = add i64 %add1, %load2
+ ret i64 %add2
+}
+
+; Since this function does not have the 'aix-small-local-exec-tls` attribute,
+; only some local-exec variables should have the small-local-exec TLS access
+; sequence (as opposed to all of them).
+define i64 @StoreLargeAccess2() {
+; COMMONCM-LABEL: StoreLargeAccess2:
+; COMMONCM-NEXT: # %bb.0: # %entry
+; CHECK-SMALLCM64: ld r5, L..C0(r2) # target-flags(ppc-tprel) @mySmallTLS
+; CHECK-SMALLCM64-NEXT: li r3, 0
+; CHECK-SMALLCM64-NEXT: li r4, 23
+; CHECK-SMALLCM64-NEXT: ori r3, r3, 53328
+; CHECK-SMALLCM64-NEXT: add r5, r13, r5
+; CHECK-SMALLCM64-NEXT: stdx r4, r5, r3
+; CHECK-SMALLCM64-NEXT: ld r5, L..C1(r2) # target-flags(ppc-tprel) @mySmallTLS3
+; CHECK-SMALLCM64-NEXT: li r3, 55
+; CHECK-SMALLCM64-NEXT: li r4, 64
+; CHECK-SMALLCM64-NEXT: std r3, mySmallTLS2[TL]@le+696(r13)
+; CHECK-SMALLCM64-NEXT: li r3, 142
+; CHECK-SMALLCM64-NEXT: add r5, r13, r5
+; CHECK-SMALLCM64-NEXT: std r4, 20000(r5)
+; CHECK-LARGECM64: addis r3, L..C0@u(r2)
+; CHECK-LARGECM64-NEXT: li r4, 0
+; CHECK-LARGECM64-NEXT: li r5, 23
+; CHECK-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; CHECK-LARGECM64-NEXT: ori r4, r4, 53328
+; CHECK-LARGECM64-NEXT: add r3, r13, r3
+; CHECK-LARGECM64-NEXT: stdx r5, r3, r4
+; CHECK-LARGECM64-NEXT: addis r3, L..C1@u(r2)
+; CHECK-LARGECM64-NEXT: li r4, 55
+; CHECK-LARGECM64-NEXT: li r5, 64
+; CHECK-LARGECM64-NEXT: ld r3, L..C1@l(r3)
+; CHECK-LARGECM64-NEXT: std r4, mySmallTLS2[TL]@le+696(r13)
+; CHECK-LARGECM64-NEXT: add r3, r13, r3
+; CHECK-LARGECM64-NEXT: std r5, 20000(r3)
+; CHECK-LARGECM64-NEXT: li r3, 142
+; COMMONCM-NEXT: blr
+;
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS)
+ %arrayidx = getelementptr inbounds i8, ptr %tls0, i32 53328
+ store i64 23, ptr %arrayidx, align 8
+ %tls1 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS2)
+ %arrayidx1 = getelementptr inbounds i8, ptr %tls1, i32 696
+ store i64 55, ptr %arrayidx1, align 8
+ %tls2 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS3)
+ %arrayidx2 = getelementptr inbounds i8, ptr %tls2, i32 20000
+ store i64 64, ptr %arrayidx2, align 8
+ %load1 = load i64, ptr %arrayidx, align 8
+ %load2 = load i64, ptr %arrayidx1, align 8
+ %add1 = add i64 %load1, 64
+ %add2 = add i64 %add1, %load2
+ ret i64 %add2
+}
+
+attributes #0 = { "aix-small-tls" }
+attributes #1 = { "target-features"="+aix-small-local-exec-tls" }
diff --git a/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-loadaddr.ll b/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-loadaddr.ll
new file mode 100644
index 0000000..c8537fb
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-loadaddr.ll
@@ -0,0 +1,222 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff -mattr=-aix-small-local-exec-tls \
+; RUN: < %s | FileCheck %s --check-prefixes=COMMONCM,SMALLCM64
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large \
+; RUN: -mattr=-aix-small-local-exec-tls < %s | \
+; RUN: FileCheck %s --check-prefixes=COMMONCM,LARGECM64
+
+; Test that the 'aix-small-tls' global variable attribute generates the
+; optimized small-local-exec TLS sequence. Global variables without this
+; attribute should still generate a TOC-based local-exec access sequence.
+
+declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull)
+
+@a = thread_local(localexec) global [87 x i8] zeroinitializer, align 1 #0
+@a_noattr = thread_local(localexec) global [87 x i8] zeroinitializer, align 1
+@b = thread_local(localexec) global [87 x i16] zeroinitializer, align 2 #0
+@b_noattr = thread_local(localexec) global [87 x i16] zeroinitializer, align 2
+@c = thread_local(localexec) global [87 x i32] zeroinitializer, align 4 #0
+@c_noattr = thread_local(localexec) global [87 x i32] zeroinitializer, align 4
+@d = thread_local(localexec) global [87 x i64] zeroinitializer, align 8 #0
+@d_noattr = thread_local(localexec) global [87 x i64] zeroinitializer, align 8 #0
+
+@e = thread_local(localexec) global [87 x double] zeroinitializer, align 8 #0
+@e_noattr = thread_local(localexec) global [87 x double] zeroinitializer, align 8
+@f = thread_local(localexec) global [87 x float] zeroinitializer, align 4 #0
+@f_noattr = thread_local(localexec) global [87 x float] zeroinitializer, align 4
+
+define nonnull ptr @AddrTest1() {
+; COMMONCM-LABEL: AddrTest1:
+; COMMONCM: # %bb.0: # %entry
+; COMMONCM-NEXT: addi r3, r13, a[TL]@le+1
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 1 ptr @llvm.threadlocal.address.p0(ptr align 1 @a)
+ %arrayidx = getelementptr inbounds [87 x i8], ptr %tls0, i64 0, i64 1
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest1_NoAttr() {
+; SMALLCM64-LABEL: AddrTest1_NoAttr:
+; SMALLCM64: # %bb.0: # %entry
+; SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tprel) @a_noattr
+; SMALLCM64-NEXT: add r3, r13, r3
+; SMALLCM64-NEXT: addi r3, r3, 1
+; SMALLCM64-NEXT: blr
+;
+; LARGECM64-LABEL: AddrTest1_NoAttr:
+; LARGECM64: # %bb.0: # %entry
+; LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; LARGECM64-NEXT: add r3, r13, r3
+; LARGECM64-NEXT: addi r3, r3, 1
+; LARGECM64-NEXT: blr
+entry:
+ %tls0 = tail call align 1 ptr @llvm.threadlocal.address.p0(ptr align 1 @a_noattr)
+ %arrayidx = getelementptr inbounds [87 x i8], ptr %tls0, i64 0, i64 1
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest2() {
+; COMMONCM-LABEL: AddrTest2:
+; COMMONCM: # %bb.0: # %entry
+; COMMONCM-NEXT: addi r3, r13, b[TL]@le+4
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 2 ptr @llvm.threadlocal.address.p0(ptr align 2 @b)
+ %arrayidx = getelementptr inbounds [87 x i16], ptr %tls0, i64 0, i64 2
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest2_NoAttr() {
+; SMALLCM64-LABEL: AddrTest2_NoAttr:
+; SMALLCM64: # %bb.0: # %entry
+; SMALLCM64-NEXT: ld r3, L..C1(r2) # target-flags(ppc-tprel) @b_noattr
+; SMALLCM64-NEXT: add r3, r13, r3
+; SMALLCM64-NEXT: addi r3, r3, 4
+; SMALLCM64-NEXT: blr
+;
+; LARGECM64-LABEL: AddrTest2_NoAttr:
+; LARGECM64: # %bb.0: # %entry
+; LARGECM64-NEXT: addis r3, L..C1@u(r2)
+; LARGECM64-NEXT: ld r3, L..C1@l(r3)
+; LARGECM64-NEXT: add r3, r13, r3
+; LARGECM64-NEXT: addi r3, r3, 4
+; LARGECM64-NEXT: blr
+entry:
+ %tls0 = tail call align 2 ptr @llvm.threadlocal.address.p0(ptr align 2 @b_noattr)
+ %arrayidx = getelementptr inbounds [87 x i16], ptr %tls0, i64 0, i64 2
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest3() {
+; COMMONCM-LABEL: AddrTest3:
+; COMMONCM: # %bb.0: # %entry
+; COMMONCM-NEXT: addi r3, r13, c[TL]@le+12
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @c)
+ %arrayidx = getelementptr inbounds [87 x i32], ptr %tls0, i64 0, i64 3
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest3_NoAttr() {
+; SMALLCM64-LABEL: AddrTest3_NoAttr:
+; SMALLCM64: # %bb.0: # %entry
+; SMALLCM64-NEXT: ld r3, L..C2(r2) # target-flags(ppc-tprel) @c_noattr
+; SMALLCM64-NEXT: add r3, r13, r3
+; SMALLCM64-NEXT: addi r3, r3, 12
+; SMALLCM64-NEXT: blr
+;
+; LARGECM64-LABEL: AddrTest3_NoAttr:
+; LARGECM64: # %bb.0: # %entry
+; LARGECM64-NEXT: addis r3, L..C2@u(r2)
+; LARGECM64-NEXT: ld r3, L..C2@l(r3)
+; LARGECM64-NEXT: add r3, r13, r3
+; LARGECM64-NEXT: addi r3, r3, 12
+; LARGECM64-NEXT: blr
+entry:
+ %tls0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @c_noattr)
+ %arrayidx = getelementptr inbounds [87 x i32], ptr %tls0, i64 0, i64 3
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest4() {
+; COMMONCM-LABEL: AddrTest4:
+; COMMONCM: # %bb.0: # %entry
+; COMMONCM-NEXT: addi r3, r13, c[TL]@le+56
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @c)
+ %arrayidx = getelementptr inbounds [87 x i64], ptr %tls0, i64 0, i64 7
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest4_NoAttr() {
+; SMALLCM64-LABEL: AddrTest4_NoAttr:
+; SMALLCM64: # %bb.0: # %entry
+; SMALLCM64-NEXT: ld r3, L..C2(r2) # target-flags(ppc-tprel) @c_noattr
+; SMALLCM64-NEXT: add r3, r13, r3
+; SMALLCM64-NEXT: addi r3, r3, 56
+; SMALLCM64-NEXT: blr
+;
+; LARGECM64-LABEL: AddrTest4_NoAttr:
+; LARGECM64: # %bb.0: # %entry
+; LARGECM64-NEXT: addis r3, L..C2@u(r2)
+; LARGECM64-NEXT: ld r3, L..C2@l(r3)
+; LARGECM64-NEXT: add r3, r13, r3
+; LARGECM64-NEXT: addi r3, r3, 56
+; LARGECM64-NEXT: blr
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @c_noattr)
+ %arrayidx = getelementptr inbounds [87 x i64], ptr %tls0, i64 0, i64 7
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest5() {
+; COMMONCM-LABEL: AddrTest5:
+; COMMONCM: # %bb.0: # %entry
+; COMMONCM-NEXT: addi r3, r13, e[TL]@le+48
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @e)
+ %arrayidx = getelementptr inbounds [87 x double], ptr %tls0, i64 0, i64 6
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest5_NoAttr() {
+; SMALLCM64-LABEL: AddrTest5_NoAttr:
+; SMALLCM64: # %bb.0: # %entry
+; SMALLCM64-NEXT: ld r3, L..C3(r2) # target-flags(ppc-tprel) @e_noattr
+; SMALLCM64-NEXT: add r3, r13, r3
+; SMALLCM64-NEXT: addi r3, r3, 48
+; SMALLCM64-NEXT: blr
+;
+; LARGECM64-LABEL: AddrTest5_NoAttr:
+; LARGECM64: # %bb.0: # %entry
+; LARGECM64-NEXT: addis r3, L..C3@u(r2)
+; LARGECM64-NEXT: ld r3, L..C3@l(r3)
+; LARGECM64-NEXT: add r3, r13, r3
+; LARGECM64-NEXT: addi r3, r3, 48
+; LARGECM64-NEXT: blr
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @e_noattr)
+ %arrayidx = getelementptr inbounds [87 x double], ptr %tls0, i64 0, i64 6
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest6() {
+; COMMONCM-LABEL: AddrTest6:
+; COMMONCM: # %bb.0: # %entry
+; COMMONCM-NEXT: addi r3, r13, f[TL]@le+16
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @f)
+ %arrayidx = getelementptr inbounds [87 x float], ptr %tls0, i64 0, i64 4
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest6_NoAttr() {
+; SMALLCM64-LABEL: AddrTest6_NoAttr:
+; SMALLCM64: # %bb.0: # %entry
+; SMALLCM64-NEXT: ld r3, L..C4(r2) # target-flags(ppc-tprel) @f_noattr
+; SMALLCM64-NEXT: add r3, r13, r3
+; SMALLCM64-NEXT: addi r3, r3, 16
+; SMALLCM64-NEXT: blr
+;
+; LARGECM64-LABEL: AddrTest6_NoAttr:
+; LARGECM64: # %bb.0: # %entry
+; LARGECM64-NEXT: addis r3, L..C4@u(r2)
+; LARGECM64-NEXT: ld r3, L..C4@l(r3)
+; LARGECM64-NEXT: add r3, r13, r3
+; LARGECM64-NEXT: addi r3, r3, 16
+; LARGECM64-NEXT: blr
+entry:
+ %tls0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @f_noattr)
+ %arrayidx = getelementptr inbounds [87 x float], ptr %tls0, i64 0, i64 4
+ ret ptr %arrayidx
+}
+
+attributes #0 = { "aix-small-tls" }
diff --git a/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-targetattr.ll b/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-targetattr.ll
new file mode 100644
index 0000000..1e4a3b9
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-targetattr.ll
@@ -0,0 +1,53 @@
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff -mattr=+aix-small-local-exec-tls < %s \
+; RUN: | FileCheck %s --check-prefixes=COMMONCM,SMALL-LOCAL-EXEC-SMALLCM64
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large \
+; RUN: -mattr=+aix-small-local-exec-tls < %s | FileCheck %s \
+; RUN: --check-prefixes=COMMONCM,SMALL-LOCAL-EXEC-LARGECM64
+
+@mySmallTLS = thread_local(localexec) global [7800 x i64] zeroinitializer, align 8 #0
+@mySmallTLS2 = thread_local(localexec) global [3000 x i64] zeroinitializer, align 8 #0
+@mySmallTLS3 = thread_local(localexec) global [3000 x i64] zeroinitializer, align 8
+declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull)
+
+; Although some global variables are annotated with 'aix-small-tls', because the
+; aix-small-local-exec-tls target attribute is turned on, all accesses will use
+; a "faster" local-exec sequence directly off the thread pointer.
+define i64 @StoreLargeAccess1() {
+; COMMONCM-LABEL: StoreLargeAccess1:
+; COMMONCM-NEXT: # %bb.0: # %entry
+; SMALL-LOCAL-EXEC-SMALLCM64: ld r3, L..C0(r2) # target-flags(ppc-tprel) @mySmallTLS
+; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: li r4, 0
+; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: li r5, 23
+; SMALL-LOCAL-EXEC-LARGECM64: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-EXEC-LARGECM64-NEXT: li r4, 0
+; SMALL-LOCAL-EXEC-LARGECM64-NEXT: li r5, 23
+; SMALL-LOCAL-EXEC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; COMMONCM: ori r4, r4, 53328
+; COMMONCM-NEXT: add r3, r13, r3
+; COMMONCM-NEXT: stdx r5, r3, r4
+; COMMONCM-NEXT: li r3, 55
+; COMMONCM-NEXT: li r4, 64
+; COMMONCM-NEXT: std r3, (mySmallTLS2[TL]@le+696)-65536(r13)
+; COMMONCM-NEXT: li r3, 142
+; COMMONCM-NEXT: std r4, (mySmallTLS3[TL]@le+20000)-131072(r13)
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS)
+ %arrayidx = getelementptr inbounds i8, ptr %tls0, i32 53328
+ store i64 23, ptr %arrayidx, align 8
+ %tls1 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS2)
+ %arrayidx1 = getelementptr inbounds i8, ptr %tls1, i32 696
+ store i64 55, ptr %arrayidx1, align 8
+ %tls2 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS3)
+ %arrayidx2 = getelementptr inbounds i8, ptr %tls2, i32 20000
+ store i64 64, ptr %arrayidx2, align 8
+ %load1 = load i64, ptr %arrayidx, align 8
+ %load2 = load i64, ptr %arrayidx1, align 8
+ %add1 = add i64 %load1, 64
+ %add2 = add i64 %add1, %load2
+ ret i64 %add2
+}
+
+attributes #0 = { "aix-small-tls" }
diff --git a/llvm/test/CodeGen/PowerPC/aix-xcoff-funcsect-explicitsect.ll b/llvm/test/CodeGen/PowerPC/aix-xcoff-funcsect-explicitsect.ll
new file mode 100644
index 0000000..4e94228
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-xcoff-funcsect-explicitsect.ll
@@ -0,0 +1,142 @@
+; RUN: llc -verify-machineinstrs -mcpu=pwr4 -mattr=-altivec -mtriple powerpc-ibm-aix-xcoff \
+; RUN: -xcoff-traceback-table=false -filetype=obj -function-sections -o %t.o < %s
+; RUN: llvm-readobj -s %t.o | FileCheck %s
+
+define dso_local signext i32 @foo1() section "sect" {
+entry:
+ ret i32 1
+}
+
+define dso_local signext i32 @foo2() section "sect2" {
+entry:
+ ret i32 2
+}
+
+define dso_local signext i32 @foo3() section "sect2" {
+entry:
+ ret i32 3
+}
+
+define dso_local signext i32 @foo4() {
+entry:
+ ret i32 4
+}
+
+; CHECK: Symbol {{[{][[:space:]] *}}Index: [[#INDX:]]{{[[:space:]] *}}Name: sect
+; CHECK-NEXT: Value (RelocatableAddress): 0x0
+; CHECK-NEXT: Section: .text
+; CHECK-NEXT: Type: 0x0
+; CHECK-NEXT: StorageClass: C_HIDEXT (0x6B)
+; CHECK-NEXT: NumberOfAuxEntries: 1
+; CHECK-NEXT: CSECT Auxiliary Entry {
+; CHECK-NEXT: Index: [[#INDX+1]]
+; CHECK-NEXT: SectionLen: 8
+; CHECK-NEXT: ParameterHashIndex: 0x0
+; CHECK-NEXT: TypeChkSectNum: 0x0
+; CHECK-NEXT: SymbolAlignmentLog2: 5
+; CHECK-NEXT: SymbolType: XTY_SD (0x1)
+; CHECK-NEXT: StorageMappingClass: XMC_PR (0x0)
+; CHECK-NEXT: StabInfoIndex: 0x0
+; CHECK-NEXT: StabSectNum: 0x0
+; CHECK-NEXT: }
+; CHECK-NEXT: }
+; CHECK-NEXT: Symbol {
+; CHECK-NEXT: Index: [[#INDX+2]]
+; CHECK-NEXT: Name: .foo1
+; CHECK-NEXT: Value (RelocatableAddress): 0x0
+; CHECK-NEXT: Section: .text
+; CHECK-NEXT: Type: 0x0
+; CHECK-NEXT: StorageClass: C_EXT (0x2)
+; CHECK-NEXT: NumberOfAuxEntries: 1
+; CHECK-NEXT: CSECT Auxiliary Entry {
+; CHECK-NEXT: Index: [[#INDX+3]]
+; CHECK-NEXT: ContainingCsectSymbolIndex: [[#INDX]]
+; CHECK-NEXT: ParameterHashIndex: 0x0
+; CHECK-NEXT: TypeChkSectNum: 0x0
+; CHECK-NEXT: SymbolAlignmentLog2: 0
+; CHECK-NEXT: SymbolType: XTY_LD (0x2)
+; CHECK-NEXT: StorageMappingClass: XMC_PR (0x0)
+; CHECK-NEXT: StabInfoIndex: 0x0
+; CHECK-NEXT: StabSectNum: 0x0
+; CHECK-NEXT: }
+; CHECK-NEXT: }
+; CHECK-NEXT: Symbol {
+; CHECK-NEXT: Index: [[#INDX+4]]
+; CHECK-NEXT: Name: sect2
+; CHECK-NEXT: Value (RelocatableAddress): 0x20
+; CHECK-NEXT: Section: .text
+; CHECK-NEXT: Type: 0x0
+; CHECK-NEXT: StorageClass: C_HIDEXT (0x6B)
+; CHECK-NEXT: NumberOfAuxEntries: 1
+; CHECK-NEXT: CSECT Auxiliary Entry {
+; CHECK-NEXT: Index: [[#INDX+5]]
+; CHECK-NEXT: SectionLen: 24
+; CHECK-NEXT: ParameterHashIndex: 0x0
+; CHECK-NEXT: TypeChkSectNum: 0x0
+; CHECK-NEXT: SymbolAlignmentLog2: 5
+; CHECK-NEXT: SymbolType: XTY_SD (0x1)
+; CHECK-NEXT: StorageMappingClass: XMC_PR (0x0)
+; CHECK-NEXT: StabInfoIndex: 0x0
+; CHECK-NEXT: StabSectNum: 0x0
+; CHECK-NEXT: }
+; CHECK-NEXT: }
+; CHECK-NEXT: Symbol {
+; CHECK-NEXT: Index: [[#INDX+6]]
+; CHECK-NEXT: Name: .foo2
+; CHECK-NEXT: Value (RelocatableAddress): 0x20
+; CHECK-NEXT: Section: .text
+; CHECK-NEXT: Type: 0x0
+; CHECK-NEXT: StorageClass: C_EXT (0x2)
+; CHECK-NEXT: NumberOfAuxEntries: 1
+; CHECK-NEXT: CSECT Auxiliary Entry {
+; CHECK-NEXT: Index: [[#INDX+7]]
+; CHECK-NEXT: ContainingCsectSymbolIndex: [[#INDX+4]]
+; CHECK-NEXT: ParameterHashIndex: 0x0
+; CHECK-NEXT: TypeChkSectNum: 0x0
+; CHECK-NEXT: SymbolAlignmentLog2: 0
+; CHECK-NEXT: SymbolType: XTY_LD (0x2)
+; CHECK-NEXT: StorageMappingClass: XMC_PR (0x0)
+; CHECK-NEXT: StabInfoIndex: 0x0
+; CHECK-NEXT: StabSectNum: 0x0
+; CHECK-NEXT: }
+; CHECK-NEXT: }
+; CHECK-NEXT: Symbol {
+; CHECK-NEXT: Index: [[#INDX+8]]
+; CHECK-NEXT: Name: .foo3
+; CHECK-NEXT: Value (RelocatableAddress): 0x30
+; CHECK-NEXT: Section: .text
+; CHECK-NEXT: Type: 0x0
+; CHECK-NEXT: StorageClass: C_EXT (0x2)
+; CHECK-NEXT: NumberOfAuxEntries: 1
+; CHECK-NEXT: CSECT Auxiliary Entry {
+; CHECK-NEXT: Index: [[#INDX+9]]
+; CHECK-NEXT: ContainingCsectSymbolIndex: [[#INDX+4]]
+; CHECK-NEXT: ParameterHashIndex: 0x0
+; CHECK-NEXT: TypeChkSectNum: 0x0
+; CHECK-NEXT: SymbolAlignmentLog2: 0
+; CHECK-NEXT: SymbolType: XTY_LD (0x2)
+; CHECK-NEXT: StorageMappingClass: XMC_PR (0x0)
+; CHECK-NEXT: StabInfoIndex: 0x0
+; CHECK-NEXT: StabSectNum: 0x0
+; CHECK-NEXT: }
+; CHECK-NEXT: }
+; CHECK-NEXT: Symbol {
+; CHECK-NEXT: Index: [[#INDX+10]]
+; CHECK-NEXT: Name: .foo4
+; CHECK-NEXT: Value (RelocatableAddress): 0x40
+; CHECK-NEXT: Section: .text
+; CHECK-NEXT: Type: 0x0
+; CHECK-NEXT: StorageClass: C_EXT (0x2)
+; CHECK-NEXT: NumberOfAuxEntries: 1
+; CHECK-NEXT: CSECT Auxiliary Entry {
+; CHECK-NEXT: Index: 16
+; CHECK-NEXT: SectionLen: 8
+; CHECK-NEXT: ParameterHashIndex: 0x0
+; CHECK-NEXT: TypeChkSectNum: 0x0
+; CHECK-NEXT: SymbolAlignmentLog2: 5
+; CHECK-NEXT: SymbolType: XTY_SD (0x1)
+; CHECK-NEXT: StorageMappingClass: XMC_PR (0x0)
+; CHECK-NEXT: StabInfoIndex: 0x0
+; CHECK-NEXT: StabSectNum: 0x0
+; CHECK-NEXT: }
+; CHECK-NEXT: }
diff --git a/llvm/test/CodeGen/PowerPC/ctrloop-constrained-fp.ll b/llvm/test/CodeGen/PowerPC/ctrloop-constrained-fp.ll
index c1d1461..50ebe04 100644
--- a/llvm/test/CodeGen/PowerPC/ctrloop-constrained-fp.ll
+++ b/llvm/test/CodeGen/PowerPC/ctrloop-constrained-fp.ll
@@ -2,7 +2,7 @@
; RUN: llc -mtriple powerpc64le < %s | FileCheck %s
; Check constrained ops converted to call
-define void @test(ptr %cast) {
+define void @test(ptr %cast) strictfp {
; CHECK-LABEL: test:
; CHECK: # %bb.0: # %root
; CHECK-NEXT: mflr 0
@@ -51,7 +51,7 @@ for.body:
}
; Check constrained ops converted to native instruction
-define void @test2(ptr %cast) {
+define void @test2(ptr %cast) strictfp {
; CHECK-LABEL: test2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 255
diff --git a/llvm/test/CodeGen/PowerPC/fp-classify.ll b/llvm/test/CodeGen/PowerPC/fp-classify.ll
index 7de35b8..f527b3c 100644
--- a/llvm/test/CodeGen/PowerPC/fp-classify.ll
+++ b/llvm/test/CodeGen/PowerPC/fp-classify.ll
@@ -57,30 +57,18 @@ entry:
define zeroext i1 @abs_isinfq(fp128 %x) {
; P8-LABEL: abs_isinfq:
; P8: # %bb.0: # %entry
-; P8-NEXT: mflr 0
-; P8-NEXT: stdu 1, -48(1)
-; P8-NEXT: std 0, 64(1)
-; P8-NEXT: .cfi_def_cfa_offset 48
-; P8-NEXT: .cfi_offset lr, 16
; P8-NEXT: xxswapd 0, 34
-; P8-NEXT: addi 3, 1, 32
+; P8-NEXT: addi 3, 1, -16
+; P8-NEXT: li 5, 32767
; P8-NEXT: stxvd2x 0, 0, 3
-; P8-NEXT: lbz 4, 47(1)
-; P8-NEXT: clrlwi 4, 4, 25
-; P8-NEXT: stb 4, 47(1)
-; P8-NEXT: lxvd2x 0, 0, 3
-; P8-NEXT: addis 3, 2, .LCPI2_0@toc@ha
-; P8-NEXT: addi 3, 3, .LCPI2_0@toc@l
-; P8-NEXT: xxswapd 34, 0
-; P8-NEXT: lxvd2x 0, 0, 3
-; P8-NEXT: xxswapd 35, 0
-; P8-NEXT: bl __eqkf2
-; P8-NEXT: nop
-; P8-NEXT: cntlzw 3, 3
-; P8-NEXT: srwi 3, 3, 5
-; P8-NEXT: addi 1, 1, 48
-; P8-NEXT: ld 0, 16(1)
-; P8-NEXT: mtlr 0
+; P8-NEXT: rldic 5, 5, 48, 1
+; P8-NEXT: ld 4, -8(1)
+; P8-NEXT: ld 3, -16(1)
+; P8-NEXT: clrldi 4, 4, 1
+; P8-NEXT: xor 4, 4, 5
+; P8-NEXT: or 3, 3, 4
+; P8-NEXT: cntlzd 3, 3
+; P8-NEXT: rldicl 3, 3, 58, 63
; P8-NEXT: blr
;
; P9-LABEL: abs_isinfq:
@@ -99,12 +87,13 @@ entry:
define zeroext i1 @abs_isinfornanf(float %x) {
; P8-LABEL: abs_isinfornanf:
; P8: # %bb.0: # %entry
-; P8-NEXT: addis 3, 2, .LCPI3_0@toc@ha
-; P8-NEXT: xsabsdp 0, 1
-; P8-NEXT: lfs 1, .LCPI3_0@toc@l(3)
-; P8-NEXT: li 3, 1
-; P8-NEXT: fcmpu 0, 0, 1
-; P8-NEXT: isellt 3, 0, 3
+; P8-NEXT: xscvdpspn 0, 1
+; P8-NEXT: lis 4, 32639
+; P8-NEXT: ori 4, 4, 65535
+; P8-NEXT: mffprwz 3, 0
+; P8-NEXT: clrlwi 3, 3, 1
+; P8-NEXT: sub 3, 4, 3
+; P8-NEXT: rldicl 3, 3, 1, 63
; P8-NEXT: blr
;
; P9-LABEL: abs_isinfornanf:
@@ -123,12 +112,15 @@ entry:
define zeroext i1 @abs_isinfornan(double %x) {
; P8-LABEL: abs_isinfornan:
; P8: # %bb.0: # %entry
-; P8-NEXT: addis 3, 2, .LCPI4_0@toc@ha
-; P8-NEXT: xsabsdp 0, 1
-; P8-NEXT: lfs 1, .LCPI4_0@toc@l(3)
-; P8-NEXT: li 3, 1
-; P8-NEXT: fcmpu 0, 0, 1
-; P8-NEXT: isellt 3, 0, 3
+; P8-NEXT: mffprd 3, 1
+; P8-NEXT: li 4, -33
+; P8-NEXT: rldicl 4, 4, 47, 1
+; P8-NEXT: sradi 5, 4, 63
+; P8-NEXT: clrldi 3, 3, 1
+; P8-NEXT: rldicl 6, 3, 1, 63
+; P8-NEXT: subc 3, 4, 3
+; P8-NEXT: adde 3, 6, 5
+; P8-NEXT: xori 3, 3, 1
; P8-NEXT: blr
;
; P9-LABEL: abs_isinfornan:
@@ -147,53 +139,18 @@ entry:
define zeroext i1 @abs_isinfornanq(fp128 %x) {
; P8-LABEL: abs_isinfornanq:
; P8: # %bb.0: # %entry
-; P8-NEXT: mflr 0
-; P8-NEXT: stdu 1, -112(1)
-; P8-NEXT: std 0, 128(1)
-; P8-NEXT: .cfi_def_cfa_offset 112
-; P8-NEXT: .cfi_offset lr, 16
-; P8-NEXT: .cfi_offset r30, -16
-; P8-NEXT: .cfi_offset v30, -48
-; P8-NEXT: .cfi_offset v31, -32
-; P8-NEXT: li 3, 64
; P8-NEXT: xxswapd 0, 34
-; P8-NEXT: std 30, 96(1) # 8-byte Folded Spill
-; P8-NEXT: stvx 30, 1, 3 # 16-byte Folded Spill
-; P8-NEXT: li 3, 80
-; P8-NEXT: stvx 31, 1, 3 # 16-byte Folded Spill
-; P8-NEXT: addi 3, 1, 48
+; P8-NEXT: addi 3, 1, -16
+; P8-NEXT: li 4, -3
; P8-NEXT: stxvd2x 0, 0, 3
-; P8-NEXT: lbz 4, 63(1)
-; P8-NEXT: clrlwi 4, 4, 25
-; P8-NEXT: stb 4, 63(1)
-; P8-NEXT: lxvd2x 0, 0, 3
-; P8-NEXT: addis 3, 2, .LCPI5_0@toc@ha
-; P8-NEXT: addi 3, 3, .LCPI5_0@toc@l
-; P8-NEXT: xxswapd 63, 0
-; P8-NEXT: lxvd2x 0, 0, 3
-; P8-NEXT: vmr 2, 31
-; P8-NEXT: xxswapd 62, 0
-; P8-NEXT: vmr 3, 30
-; P8-NEXT: bl __eqkf2
-; P8-NEXT: nop
-; P8-NEXT: cntlzw 3, 3
-; P8-NEXT: vmr 2, 31
-; P8-NEXT: vmr 3, 30
-; P8-NEXT: srwi 30, 3, 5
-; P8-NEXT: bl __unordkf2
-; P8-NEXT: nop
-; P8-NEXT: cntlzw 3, 3
-; P8-NEXT: li 4, 80
-; P8-NEXT: lvx 31, 1, 4 # 16-byte Folded Reload
-; P8-NEXT: li 4, 64
-; P8-NEXT: srwi 3, 3, 5
-; P8-NEXT: lvx 30, 1, 4 # 16-byte Folded Reload
+; P8-NEXT: rldicl 4, 4, 47, 1
+; P8-NEXT: ld 3, -8(1)
+; P8-NEXT: sradi 5, 4, 63
+; P8-NEXT: clrldi 3, 3, 1
+; P8-NEXT: rldicl 6, 3, 1, 63
+; P8-NEXT: subc 3, 4, 3
+; P8-NEXT: adde 3, 6, 5
; P8-NEXT: xori 3, 3, 1
-; P8-NEXT: or 3, 3, 30
-; P8-NEXT: ld 30, 96(1) # 8-byte Folded Reload
-; P8-NEXT: addi 1, 1, 112
-; P8-NEXT: ld 0, 16(1)
-; P8-NEXT: mtlr 0
; P8-NEXT: blr
;
; P9-LABEL: abs_isinfornanq:
diff --git a/llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir b/llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir
index 3a312d2..f3ef95b 100644
--- a/llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir
+++ b/llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir
@@ -130,7 +130,7 @@ body: |
%22:g8rc_and_g8rc_nox0 = ADDIStocHA8 $x2, @c
%10:g8rc_and_g8rc_nox0 = ADDIStocHA8 $x2, @e
%13:g8rc_and_g8rc_nox0 = ADDIStocHA8 $x2, @a
- %14:g8rc_and_g8rc_nox0 = ADDItocL killed %13, @a, implicit $x2
+ %14:g8rc_and_g8rc_nox0 = ADDItocL8 killed %13, @a, implicit $x2
bb.2.while.body:
successors: %bb.4(0x30000000), %bb.3(0x50000000)
diff --git a/llvm/test/CodeGen/PowerPC/rldimi.ll b/llvm/test/CodeGen/PowerPC/rldimi.ll
index 322975f..78ea9aa 100644
--- a/llvm/test/CodeGen/PowerPC/rldimi.ll
+++ b/llvm/test/CodeGen/PowerPC/rldimi.ll
@@ -59,8 +59,8 @@ entry:
ret i64 %8
}
-define i64 @rldimi_intrinsic(i64 %a) {
-; CHECK-LABEL: rldimi_intrinsic:
+define i64 @rldimi4(i64 %a) {
+; CHECK-LABEL: rldimi4:
; CHECK: # %bb.0:
; CHECK-NEXT: rldimi 3, 3, 8, 0
; CHECK-NEXT: rldimi 3, 3, 16, 0
@@ -72,4 +72,71 @@ define i64 @rldimi_intrinsic(i64 %a) {
ret i64 %r3
}
+define i64 @rldimi5(i64 %a, i64 %b) {
+; CHECK-LABEL: rldimi5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: rldimi 4, 3, 8, 40
+; CHECK-NEXT: mr 3, 4
+; CHECK-NEXT: blr
+ %r = call i64 @llvm.ppc.rldimi(i64 %a, i64 %b, i32 8, i64 16776960) ; 0xffff << 8
+ ret i64 %r
+}
+
+define i64 @rldimi6(i64 %a, i64 %b) {
+; CHECK-LABEL: rldimi6:
+; CHECK: # %bb.0:
+; CHECK-NEXT: rotldi 3, 3, 1
+; CHECK-NEXT: rldimi 4, 3, 7, 41
+; CHECK-NEXT: mr 3, 4
+; CHECK-NEXT: blr
+ %r = call i64 @llvm.ppc.rldimi(i64 %a, i64 %b, i32 8, i64 8388480) ; 0xffff << 7
+ ret i64 %r
+}
+
+define i64 @rldimi7(i64 %a, i64 %b) {
+; CHECK-LABEL: rldimi7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: rotldi 3, 3, 63
+; CHECK-NEXT: rldimi 4, 3, 9, 39
+; CHECK-NEXT: mr 3, 4
+; CHECK-NEXT: blr
+ %r = call i64 @llvm.ppc.rldimi(i64 %a, i64 %b, i32 8, i64 33553920) ; 0xffff << 9
+ ret i64 %r
+}
+
+define i64 @rldimi8(i64 %a, i64 %b) {
+; CHECK-LABEL: rldimi8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mr 3, 4
+; CHECK-NEXT: blr
+ %r = call i64 @llvm.ppc.rldimi(i64 %a, i64 %b, i32 0, i64 0)
+ ret i64 %r
+}
+
+define i64 @rldimi9(i64 %a, i64 %b) {
+; CHECK-LABEL: rldimi9:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mr 3, 4
+; CHECK-NEXT: blr
+ %r = call i64 @llvm.ppc.rldimi(i64 %a, i64 %b, i32 63, i64 0)
+ ret i64 %r
+}
+
+define i64 @rldimi10(i64 %a, i64 %b) {
+; CHECK-LABEL: rldimi10:
+; CHECK: # %bb.0:
+; CHECK-NEXT: blr
+ %r = call i64 @llvm.ppc.rldimi(i64 %a, i64 %b, i32 0, i64 -1)
+ ret i64 %r
+}
+
+define i64 @rldimi11(i64 %a, i64 %b) {
+; CHECK-LABEL: rldimi11:
+; CHECK: # %bb.0:
+; CHECK-NEXT: rotldi 3, 3, 8
+; CHECK-NEXT: blr
+ %r = call i64 @llvm.ppc.rldimi(i64 %a, i64 %b, i32 8, i64 -1)
+ ret i64 %r
+}
+
declare i64 @llvm.ppc.rldimi(i64, i64, i32 immarg, i64 immarg)
diff --git a/llvm/test/CodeGen/PowerPC/rlwimi.ll b/llvm/test/CodeGen/PowerPC/rlwimi.ll
index 8b126cd..8da7695 100644
--- a/llvm/test/CodeGen/PowerPC/rlwimi.ll
+++ b/llvm/test/CodeGen/PowerPC/rlwimi.ll
@@ -107,11 +107,51 @@ entry:
define i32 @test9(i32 %a, i32 %b) {
; CHECK-LABEL: test9:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: rlwimi 3, 4, 8, 20, 26
+; CHECK-NEXT: rlwimi 4, 3, 8, 20, 26
+; CHECK-NEXT: mr 3, 4
; CHECK-NEXT: blr
entry:
%r = call i32 @llvm.ppc.rlwimi(i32 %a, i32 %b, i32 8, i32 4064)
ret i32 %r
}
+define i32 @test10(i32 %a, i32 %b) {
+; CHECK-LABEL: test10:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: blr
+entry:
+ %r = call i32 @llvm.ppc.rlwimi(i32 %a, i32 %b, i32 0, i32 -1)
+ ret i32 %r
+}
+
+define i32 @test11(i32 %a, i32 %b) {
+; CHECK-LABEL: test11:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: rotlwi 3, 3, 8
+; CHECK-NEXT: blr
+entry:
+ %r = call i32 @llvm.ppc.rlwimi(i32 %a, i32 %b, i32 8, i32 -1)
+ ret i32 %r
+}
+
+define i32 @test12(i32 %a, i32 %b) {
+; CHECK-LABEL: test12:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: mr 3, 4
+; CHECK-NEXT: blr
+entry:
+ %r = call i32 @llvm.ppc.rlwimi(i32 %a, i32 %b, i32 0, i32 0)
+ ret i32 %r
+}
+
+define i32 @test13(i32 %a, i32 %b) {
+; CHECK-LABEL: test13:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: rlwimi 3, 4, 0, 27, 19
+; CHECK-NEXT: blr
+entry:
+ %r = call i32 @llvm.ppc.rlwimi(i32 %a, i32 %b, i32 0, i32 4064)
+ ret i32 %r
+}
+
declare i32 @llvm.ppc.rlwimi(i32, i32, i32 immarg, i32 immarg)
diff --git a/llvm/test/CodeGen/PowerPC/rlwinm.ll b/llvm/test/CodeGen/PowerPC/rlwinm.ll
index c6d4e5b..363eb17 100644
--- a/llvm/test/CodeGen/PowerPC/rlwinm.ll
+++ b/llvm/test/CodeGen/PowerPC/rlwinm.ll
@@ -97,4 +97,24 @@ entry:
ret i32 %r
}
+define i32 @test10(i32 %a, i32 %s) {
+; CHECK-LABEL: test10:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li 3, 0
+; CHECK-NEXT: blr
+entry:
+ %r = call i32 @llvm.ppc.rlwnm(i32 %a, i32 %s, i32 0)
+ ret i32 %r
+}
+
+define i32 @test11(i32 %a, i32 %s) {
+; CHECK-LABEL: test11:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: rotlw 3, 3, 4
+; CHECK-NEXT: blr
+entry:
+ %r = call i32 @llvm.ppc.rlwnm(i32 %a, i32 %s, i32 -1)
+ ret i32 %r
+}
+
declare i32 @llvm.ppc.rlwnm(i32, i32, i32 immarg)
diff --git a/llvm/test/CodeGen/PowerPC/scalar-double-ldst.ll b/llvm/test/CodeGen/PowerPC/scalar-double-ldst.ll
index 6f68679..798637b 100644
--- a/llvm/test/CodeGen/PowerPC/scalar-double-ldst.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar-double-ldst.ll
@@ -7281,3 +7281,61 @@ entry:
store double %str, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
+
+define dso_local void @st_reversed_double_from_i8(ptr %ptr) {
+; CHECK-P10-LABEL: st_reversed_double_from_i8:
+; CHECK-P10: # %bb.0: # %entry
+; CHECK-P10-NEXT: li r4, 8
+; CHECK-P10-NEXT: lxsibzx f0, 0, r3
+; CHECK-P10-NEXT: xxspltidp vs2, -1023410176
+; CHECK-P10-NEXT: lxsibzx f1, r3, r4
+; CHECK-P10-NEXT: xscvuxddp f0, f0
+; CHECK-P10-NEXT: xscvuxddp f1, f1
+; CHECK-P10-NEXT: xsadddp f0, f0, f2
+; CHECK-P10-NEXT: xsadddp f1, f1, f2
+; CHECK-P10-NEXT: stfd f1, 0(r3)
+; CHECK-P10-NEXT: stfd f0, 8(r3)
+; CHECK-P10-NEXT: blr
+;
+; CHECK-P9-LABEL: st_reversed_double_from_i8:
+; CHECK-P9: # %bb.0: # %entry
+; CHECK-P9-NEXT: li r4, 8
+; CHECK-P9-NEXT: lxsibzx f0, 0, r3
+; CHECK-P9-NEXT: lxsibzx f1, r3, r4
+; CHECK-P9-NEXT: addis r4, r2, .LCPI300_0@toc@ha
+; CHECK-P9-NEXT: lfs f2, .LCPI300_0@toc@l(r4)
+; CHECK-P9-NEXT: xscvuxddp f0, f0
+; CHECK-P9-NEXT: xscvuxddp f1, f1
+; CHECK-P9-NEXT: xsadddp f0, f0, f2
+; CHECK-P9-NEXT: xsadddp f1, f1, f2
+; CHECK-P9-NEXT: stfd f0, 8(r3)
+; CHECK-P9-NEXT: stfd f1, 0(r3)
+; CHECK-P9-NEXT: blr
+;
+; CHECK-P8-LABEL: st_reversed_double_from_i8:
+; CHECK-P8: # %bb.0: # %entry
+; CHECK-P8-NEXT: lbz r4, 0(r3)
+; CHECK-P8-NEXT: lbz r5, 8(r3)
+; CHECK-P8-NEXT: mtfprwz f0, r4
+; CHECK-P8-NEXT: mtfprwz f1, r5
+; CHECK-P8-NEXT: addis r4, r2, .LCPI300_0@toc@ha
+; CHECK-P8-NEXT: lfs f2, .LCPI300_0@toc@l(r4)
+; CHECK-P8-NEXT: xscvuxddp f0, f0
+; CHECK-P8-NEXT: xscvuxddp f1, f1
+; CHECK-P8-NEXT: xsadddp f0, f0, f2
+; CHECK-P8-NEXT: xsadddp f1, f1, f2
+; CHECK-P8-NEXT: stfd f1, 0(r3)
+; CHECK-P8-NEXT: stfd f0, 8(r3)
+; CHECK-P8-NEXT: blr
+entry:
+ %idx = getelementptr inbounds i8, ptr %ptr, i64 8
+ %i0 = load i8, ptr %ptr, align 1
+ %i1 = load i8, ptr %idx, align 1
+ %f0 = uitofp i8 %i0 to double
+ %f1 = uitofp i8 %i1 to double
+ %a0 = fadd double %f0, -1.280000e+02
+ %a1 = fadd double %f1, -1.280000e+02
+ store double %a1, ptr %ptr, align 8
+ store double %a0, ptr %idx, align 8
+ ret void
+}
diff --git a/llvm/test/CodeGen/PowerPC/scalar-float-ldst.ll b/llvm/test/CodeGen/PowerPC/scalar-float-ldst.ll
index 824dd4c..f396057 100644
--- a/llvm/test/CodeGen/PowerPC/scalar-float-ldst.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar-float-ldst.ll
@@ -7271,3 +7271,61 @@ entry:
store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
+
+define dso_local void @st_reversed_float_from_i8(ptr %ptr) {
+; CHECK-P10-LABEL: st_reversed_float_from_i8:
+; CHECK-P10: # %bb.0: # %entry
+; CHECK-P10-NEXT: li r4, 8
+; CHECK-P10-NEXT: lxsibzx f0, 0, r3
+; CHECK-P10-NEXT: xxspltidp vs2, -1023410176
+; CHECK-P10-NEXT: lxsibzx f1, r3, r4
+; CHECK-P10-NEXT: xscvuxdsp f0, f0
+; CHECK-P10-NEXT: xscvuxdsp f1, f1
+; CHECK-P10-NEXT: xsaddsp f0, f0, f2
+; CHECK-P10-NEXT: xsaddsp f1, f1, f2
+; CHECK-P10-NEXT: stfs f0, 8(r3)
+; CHECK-P10-NEXT: stfs f1, 0(r3)
+; CHECK-P10-NEXT: blr
+;
+; CHECK-P9-LABEL: st_reversed_float_from_i8:
+; CHECK-P9: # %bb.0: # %entry
+; CHECK-P9-NEXT: li r4, 8
+; CHECK-P9-NEXT: lxsibzx f0, 0, r3
+; CHECK-P9-NEXT: lxsibzx f1, r3, r4
+; CHECK-P9-NEXT: addis r4, r2, .LCPI300_0@toc@ha
+; CHECK-P9-NEXT: lfs f2, .LCPI300_0@toc@l(r4)
+; CHECK-P9-NEXT: xscvuxdsp f0, f0
+; CHECK-P9-NEXT: xscvuxdsp f1, f1
+; CHECK-P9-NEXT: xsaddsp f0, f0, f2
+; CHECK-P9-NEXT: xsaddsp f1, f1, f2
+; CHECK-P9-NEXT: stfs f0, 8(r3)
+; CHECK-P9-NEXT: stfs f1, 0(r3)
+; CHECK-P9-NEXT: blr
+;
+; CHECK-P8-LABEL: st_reversed_float_from_i8:
+; CHECK-P8: # %bb.0: # %entry
+; CHECK-P8-NEXT: lbz r4, 0(r3)
+; CHECK-P8-NEXT: lbz r5, 8(r3)
+; CHECK-P8-NEXT: mtfprwz f0, r4
+; CHECK-P8-NEXT: mtfprwz f1, r5
+; CHECK-P8-NEXT: addis r4, r2, .LCPI300_0@toc@ha
+; CHECK-P8-NEXT: lfs f2, .LCPI300_0@toc@l(r4)
+; CHECK-P8-NEXT: xscvuxdsp f0, f0
+; CHECK-P8-NEXT: xscvuxdsp f1, f1
+; CHECK-P8-NEXT: xsaddsp f0, f0, f2
+; CHECK-P8-NEXT: xsaddsp f1, f1, f2
+; CHECK-P8-NEXT: stfs f1, 0(r3)
+; CHECK-P8-NEXT: stfs f0, 8(r3)
+; CHECK-P8-NEXT: blr
+entry:
+ %idx = getelementptr inbounds i8, ptr %ptr, i64 8
+ %i0 = load i8, ptr %ptr, align 1
+ %i1 = load i8, ptr %idx, align 1
+ %f0 = uitofp i8 %i0 to float
+ %f1 = uitofp i8 %i1 to float
+ %a0 = fadd float %f0, -1.280000e+02
+ %a1 = fadd float %f1, -1.280000e+02
+ store float %a1, ptr %ptr, align 8
+ store float %a0, ptr %idx, align 8
+ ret void
+}
diff --git a/llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll b/llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll
index c8278e5..8748767 100644
--- a/llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll
+++ b/llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll
@@ -29,9 +29,7 @@ define dso_local signext i32 @main(i32 signext %argc, ptr nocapture readnone %ar
; CHECK-NEXT: nop
; CHECK-NEXT: # kill: def $r3 killed $r3 killed $x3
; CHECK-NEXT: cmpwi 3, 0
-; CHECK-NEXT: crmove 20, 10
; CHECK-NEXT: crorc 20, 10, 2
-; CHECK-NEXT: crmove 21, 2
; CHECK-NEXT: bc 4, 20, .LBB0_4
; CHECK-NEXT: # %bb.2: # %if.end5
; CHECK-NEXT: addis 3, 2, .L.str@toc@ha
@@ -76,11 +74,9 @@ define dso_local signext i32 @main(i32 signext %argc, ptr nocapture readnone %ar
; BE-NEXT: addi 3, 31, 128
; BE-NEXT: bl _setjmp
; BE-NEXT: nop
-; BE-NEXT: crmove 20, 10
; BE-NEXT: # kill: def $r3 killed $r3 killed $x3
; BE-NEXT: cmpwi 3, 0
; BE-NEXT: crorc 20, 10, 2
-; BE-NEXT: crmove 21, 2
; BE-NEXT: bc 4, 20, .LBB0_4
; BE-NEXT: # %bb.2: # %if.end5
; BE-NEXT: addis 3, 2, .L.str@toc@ha
diff --git a/llvm/test/CodeGen/PowerPC/toc-data-large-array.ll b/llvm/test/CodeGen/PowerPC/toc-data-large-array.ll
new file mode 100644
index 0000000..90f40d9
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/toc-data-large-array.ll
@@ -0,0 +1,16 @@
+; RUN: not --crash llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s --check-prefix CHECK-ERROR
+; RUN: not --crash llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s --check-prefix CHECK-ERROR
+
+@a = global [5 x i16] zeroinitializer, align 2 #0
+
+; Function Attrs: noinline
+define i16 @foo() #1 {
+entry:
+ %0 = load i16, ptr @a, align 2
+ ret i16 %0
+}
+
+attributes #0 = { "toc-data" }
+attributes #1 = { noinline }
+
+; CHECK-ERROR: LLVM ERROR: A GlobalVariable with size larger than a TOC entry is not currently supported by the toc data transformation.
diff --git a/llvm/test/CodeGen/PowerPC/toc-data-large-array2.ll b/llvm/test/CodeGen/PowerPC/toc-data-large-array2.ll
new file mode 100644
index 0000000..f870e99
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/toc-data-large-array2.ll
@@ -0,0 +1,8 @@
+; RUN: not --crash llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s --check-prefix CHECK-ERROR
+; RUN: not --crash llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s --check-prefix CHECK-ERROR
+
+@a = global [5 x i16] zeroinitializer, align 2 #0
+
+attributes #0 = { "toc-data" }
+
+; CHECK-ERROR: LLVM ERROR: A GlobalVariable with size larger than a TOC entry is not currently supported by the toc data transformation.
diff --git a/llvm/test/CodeGen/PowerPC/toc-data-struct-array.ll b/llvm/test/CodeGen/PowerPC/toc-data-struct-array.ll
new file mode 100644
index 0000000..a5c9a8b
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/toc-data-struct-array.ll
@@ -0,0 +1,110 @@
+; RUN: llc -mtriple powerpc-ibm-aix-xcoff < %s | FileCheck %s --check-prefix CHECK
+; RUN: llc -mtriple powerpc64-ibm-aix-xcoff < %s | FileCheck %s --check-prefix CHECK
+
+; RUN: llc -filetype=obj -mtriple powerpc-ibm-aix-xcoff < %s -o %t32.o
+; RUN: llvm-readobj %t32.o --syms | FileCheck %s --check-prefix=OBJ32
+; RUN: llc -filetype=obj -mtriple powerpc64-ibm-aix-xcoff < %s -o %t64.o
+; RUN: llvm-readobj %t64.o --syms | FileCheck %s --check-prefix=OBJ64
+
+%struct.small_struct = type { i16 }
+
+@a = global %struct.small_struct zeroinitializer, align 2 #0
+@b = global [2 x i16] zeroinitializer, align 2 #0
+
+; Function Attrs: noinline
+define i16 @foo() #1 {
+entry:
+ %0 = load i16, ptr @a, align 2
+ %1 = load i16, ptr @b, align 2
+ %add = add nsw i16 %0, %1
+ ret i16 %add
+}
+
+attributes #0 = { "toc-data" }
+attributes #1 = { noinline }
+
+; CHECK: .toc
+; CHECK-NEXT: .csect a[TD],2
+; CHECK-NEXT: .globl a[TD] # @a
+; CHECK-NEXT: .align 1
+; CHECK-NEXT: .space 2
+; CHECK-NEXT: .csect b[TD],2
+; CHECK-NEXT: .globl b[TD] # @b
+; CHECK-NEXT: .align 1
+; CHECK-NEXT: .space 4
+
+; OBJ32: Symbol {
+; OBJ32: Name: a
+; OBJ32-NEXT: Value (RelocatableAddress): 0x3C
+; OBJ32-NEXT: Section: .data
+; OBJ32-NEXT: Type: 0x0
+; OBJ32-NEXT: StorageClass: C_EXT (0x2)
+; OBJ32-NEXT: NumberOfAuxEntries: 1
+; OBJ32-NEXT: CSECT Auxiliary Entry {
+; OBJ32-NEXT: Index: {{[0-9]+}}
+; OBJ32-NEXT: SectionLen: 2
+; OBJ32-NEXT: ParameterHashIndex: 0x0
+; OBJ32-NEXT: TypeChkSectNum: 0x0
+; OBJ32-NEXT: SymbolAlignmentLog2: 2
+; OBJ32-NEXT: SymbolType: XTY_SD (0x1)
+; OBJ32-NEXT: StorageMappingClass: XMC_TD (0x10)
+; OBJ32-NEXT: StabInfoIndex: 0x0
+; OBJ32-NEXT: StabSectNum: 0x0
+; OBJ32-NEXT: }
+; OBJ32-NEXT: }
+; OBJ32-NEXT: Symbol {
+; OBJ32: Name: b
+; OBJ32-NEXT: Value (RelocatableAddress): 0x40
+; OBJ32-NEXT: Section: .data
+; OBJ32-NEXT: Type: 0x0
+; OBJ32-NEXT: StorageClass: C_EXT (0x2)
+; OBJ32-NEXT: NumberOfAuxEntries: 1
+; OBJ32-NEXT: CSECT Auxiliary Entry {
+; OBJ32-NEXT: Index: {{[0-9]+}}
+; OBJ32-NEXT: SectionLen: 4
+; OBJ32-NEXT: ParameterHashIndex: 0x0
+; OBJ32-NEXT: TypeChkSectNum: 0x0
+; OBJ32-NEXT: SymbolAlignmentLog2: 2
+; OBJ32-NEXT: SymbolType: XTY_SD (0x1)
+; OBJ32-NEXT: StorageMappingClass: XMC_TD (0x10)
+; OBJ32-NEXT: StabInfoIndex: 0x0
+; OBJ32-NEXT: StabSectNum: 0x0
+; OBJ32-NEXT: }
+; OBJ32-NEXT: }
+
+; OBJ64: Symbol {
+; OBJ64: Name: a
+; OBJ64-NEXT: Value (RelocatableAddress): 0x48
+; OBJ64-NEXT: Section: .data
+; OBJ64-NEXT: Type: 0x0
+; OBJ64-NEXT: StorageClass: C_EXT (0x2)
+; OBJ64-NEXT: NumberOfAuxEntries: 1
+; OBJ64-NEXT: CSECT Auxiliary Entry {
+; OBJ64-NEXT: Index: {{[0-9]+}}
+; OBJ64-NEXT: SectionLen: 2
+; OBJ64-NEXT: ParameterHashIndex: 0x0
+; OBJ64-NEXT: TypeChkSectNum: 0x0
+; OBJ64-NEXT: SymbolAlignmentLog2: 2
+; OBJ64-NEXT: SymbolType: XTY_SD (0x1)
+; OBJ64-NEXT: StorageMappingClass: XMC_TD (0x10)
+; OBJ64-NEXT: Auxiliary Type: AUX_CSECT (0xFB)
+; OBJ64-NEXT: }
+; OBJ64-NEXT: }
+; OBJ64-NEXT: Symbol {
+; OBJ64: Name: b
+; OBJ64-NEXT: Value (RelocatableAddress): 0x4C
+; OBJ64-NEXT: Section: .data
+; OBJ64-NEXT: Type: 0x0
+; OBJ64-NEXT: StorageClass: C_EXT (0x2)
+; OBJ64-NEXT: NumberOfAuxEntries: 1
+; OBJ64-NEXT: CSECT Auxiliary Entry {
+; OBJ64-NEXT: Index: {{[0-9]+}}
+; OBJ64-NEXT: SectionLen: 4
+; OBJ64-NEXT: ParameterHashIndex: 0x0
+; OBJ64-NEXT: TypeChkSectNum: 0x0
+; OBJ64-NEXT: SymbolAlignmentLog2: 2
+; OBJ64-NEXT: SymbolType: XTY_SD (0x1)
+; OBJ64-NEXT: StorageMappingClass: XMC_TD (0x10)
+; OBJ64-NEXT: Auxiliary Type: AUX_CSECT (0xFB)
+; OBJ64-NEXT: }
+; OBJ64-NEXT: }
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/select.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/select.mir
new file mode 100644
index 0000000..42bf321
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/select.mir
@@ -0,0 +1,345 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV64I %s
+
+---
+name: select_nxv1i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 2 x s8>) = G_SELECT %0(<vscale x 2 x s1>), %1, %1
+ $v8 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: select_nxv4i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 8 x s8>) = G_SELECT %0(<vscale x 8 x s1>), %1, %1
+ $v8 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: select_nxv16i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
+ ; RV32I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: select_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
+ ; RV64I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 32 x s8>) = G_SELECT %0(<vscale x 32 x s1>), %1, %1
+ $v8m4 = COPY %2(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: select_nxv64i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv64i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv64i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 1 x s16>) = G_SELECT %0(<vscale x 1 x s1>), %1, %1
+ $v8 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: select_nxv2i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 4 x s16>) = G_SELECT %0(<vscale x 4 x s1>), %1, %1
+ $v8 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: select_nxv8i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
+ ; RV32I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: select_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
+ ; RV64I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 16 x s16>) = G_SELECT %0(<vscale x 16 x s1>), %1, %1
+ $v8m4 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: select_nxv32i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv32i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_MF2_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv32i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_MF2_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 1 x s32>) = G_SELECT %0(<vscale x 1 x s1>), %1, %1
+ $v8 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: select_nxv2i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv2i32
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
+ ; RV32I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: select_nxv2i32
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
+ ; RV64I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 4 x s32>) = G_SELECT %0(<vscale x 4 x s1>), %1, %1
+ $v8m2 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: select_nxv8i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv8i32
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
+ ; RV32I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: select_nxv8i32
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
+ ; RV64I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 16 x s32>) = G_SELECT %0(<vscale x 16 x s1>), %1, %1
+ $v8m8 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: select_nxv1i64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv1i64
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 6 /* e64 */
+ ; RV32I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: select_nxv1i64
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 6 /* e64 */
+ ; RV64I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 2 x s64>) = G_SELECT %0(<vscale x 2 x s1>), %1, %1
+ $v8m2 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: select_nxv4i64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv4i64
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 6 /* e64 */
+ ; RV32I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: select_nxv4i64
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 6 /* e64 */
+ ; RV64I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 8 x s64>) = G_SELECT %0(<vscale x 8 x s1>), %1, %1
+ $v8m8 = COPY %2(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale32.mir
new file mode 100644
index 0000000..27dfb3f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale32.mir
@@ -0,0 +1,300 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v,+m -run-pass=instruction-select \
+# RUN: -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name: test_1_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_1_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: $x10 = COPY [[SRLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:gprb(s32) = G_READ_VLENB
+ %2:gprb(s32) = G_CONSTANT i32 3
+ %0:gprb(s32) = G_LSHR %1, %2(s32)
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_2_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_2_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 2
+ ; CHECK-NEXT: $x10 = COPY [[SRLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:gprb(s32) = G_READ_VLENB
+ %2:gprb(s32) = G_CONSTANT i32 2
+ %0:gprb(s32) = G_LSHR %1, %2(s32)
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_3_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_3_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:gprb(s32) = G_READ_VLENB
+ %2:gprb(s32) = G_CONSTANT i32 3
+ %3:gprb(s32) = G_LSHR %1, %2(s32)
+ %4:gprb(s32) = G_CONSTANT i32 3
+ %0:gprb(s32) = G_MUL %3, %4
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_4_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_4_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 1
+ ; CHECK-NEXT: $x10 = COPY [[SRLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:gprb(s32) = G_READ_VLENB
+ %2:gprb(s32) = G_CONSTANT i32 1
+ %0:gprb(s32) = G_LSHR %1, %2(s32)
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_8_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_8_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: $x10 = COPY [[PseudoReadVLENB]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s32) = G_READ_VLENB
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_16_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_16_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[PseudoReadVLENB]], 1
+ ; CHECK-NEXT: $x10 = COPY [[SLLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:gprb(s32) = G_READ_VLENB
+ %2:gprb(s32) = G_CONSTANT i32 1
+ %0:gprb(s32) = G_SHL %1, %2(s32)
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_40_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_40_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 5
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[PseudoReadVLENB]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:gprb(s32) = G_READ_VLENB
+ %2:gprb(s32) = G_CONSTANT i32 5
+ %0:gprb(s32) = G_MUL %1, %2
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_1_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_1_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 1
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 1
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_2_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_2_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 2
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 2
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_3_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_3_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 3
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_4_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_4_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 4
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 4
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_8_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_8_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 8
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_16_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_16_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 16
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 16
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_40_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_40_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 40
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 40
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale64.mir
new file mode 100644
index 0000000..4a96be2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale64.mir
@@ -0,0 +1,139 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+v,+m -run-pass=instruction-select \
+# RUN: -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name: test_1
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_1
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: $x10 = COPY [[SRLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ %1:gprb(s64) = G_CONSTANT i64 3
+ %2:gprb(s64) = G_LSHR %0, %1(s64)
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_2
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_2
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 2
+ ; CHECK-NEXT: $x10 = COPY [[SRLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ %1:gprb(s64) = G_CONSTANT i64 2
+ %2:gprb(s64) = G_LSHR %0, %1(s64)
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_3
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_3
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ %1:gprb(s64) = G_CONSTANT i64 3
+ %2:gprb(s64) = G_LSHR %0, %1(s64)
+ %3:gprb(s64) = G_CONSTANT i64 3
+ %4:gprb(s64) = G_MUL %2, %3
+ $x10 = COPY %4(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_4
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_4
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 1
+ ; CHECK-NEXT: $x10 = COPY [[SRLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ %1:gprb(s64) = G_CONSTANT i64 1
+ %2:gprb(s64) = G_LSHR %0, %1(s64)
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_8
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: $x10 = COPY [[PseudoReadVLENB]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ $x10 = COPY %0(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_16
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[PseudoReadVLENB]], 1
+ ; CHECK-NEXT: $x10 = COPY [[SLLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ %1:gprb(s64) = G_CONSTANT i64 1
+ %2:gprb(s64) = G_SHL %0, %1(s64)
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_40
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_40
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 5
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[PseudoReadVLENB]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ %1:gprb(s64) = G_CONSTANT i64 5
+ %2:gprb(s64) = G_MUL %0, %1
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/trap.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/trap.mir
index 11789a0..5f52030 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/trap.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/trap.mir
@@ -14,7 +14,7 @@ body: |
; CHECK-LABEL: name: test_trap
; CHECK: UNIMP
; CHECK-NEXT: PseudoRET
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
PseudoRET
...
@@ -28,7 +28,7 @@ body: |
; CHECK-LABEL: name: test_debugtrap
; CHECK: EBREAK
; CHECK-NEXT: PseudoRET
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.debugtrap)
+ G_DEBUGTRAP
PseudoRET
...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll
new file mode 100644
index 0000000..31b3c3f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll
@@ -0,0 +1,948 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s
+
+define <vscale x 1 x i8> @vload_nx1i8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx1i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx1i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 1 x i8>, ptr %pa
+ ret <vscale x 1 x i8> %va
+}
+
+define <vscale x 2 x i8> @vload_nx2i8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx2i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 2 x i8>, ptr %pa
+ ret <vscale x 2 x i8> %va
+}
+
+define <vscale x 4 x i8> @vload_nx4i8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx4i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx4i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 4 x i8>, ptr %pa
+ ret <vscale x 4 x i8> %va
+}
+
+define <vscale x 8 x i8> @vload_nx8i8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx8i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx8i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 8 x i8>, ptr %pa
+ ret <vscale x 8 x i8> %va
+}
+
+define <vscale x 16 x i8> @vload_nx16i8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx16i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx16i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 16 x i8>, ptr %pa
+ ret <vscale x 16 x i8> %va
+}
+
+define <vscale x 32 x i8> @vload_nx32i8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx32i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ ; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: vload_nx32i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ ; RV64-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %va = load <vscale x 32 x i8>, ptr %pa
+ ret <vscale x 32 x i8> %va
+}
+
+define <vscale x 64 x i8> @vload_nx64i8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx64i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ ; RV32-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: vload_nx64i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ ; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %va = load <vscale x 64 x i8>, ptr %pa
+ ret <vscale x 64 x i8> %va
+}
+
+define <vscale x 1 x i16> @vload_nx1i16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx1i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx1i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 1 x i16>, ptr %pa
+ ret <vscale x 1 x i16> %va
+}
+
+define <vscale x 2 x i16> @vload_nx2i16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx2i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 2 x i16>, ptr %pa
+ ret <vscale x 2 x i16> %va
+}
+
+define <vscale x 4 x i16> @vload_nx4i16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx4i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx4i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 4 x i16>, ptr %pa
+ ret <vscale x 4 x i16> %va
+}
+
+define <vscale x 8 x i16> @vload_nx8i16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx8i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx8i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 8 x i16>, ptr %pa
+ ret <vscale x 8 x i16> %va
+}
+
+define <vscale x 16 x i16> @vload_nx16i16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx16i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ ; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: vload_nx16i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ ; RV64-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %va = load <vscale x 16 x i16>, ptr %pa
+ ret <vscale x 16 x i16> %va
+}
+
+define <vscale x 32 x i16> @vload_nx32i16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx32i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ ; RV32-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: vload_nx32i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ ; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %va = load <vscale x 32 x i16>, ptr %pa
+ ret <vscale x 32 x i16> %va
+}
+
+define <vscale x 1 x i32> @vload_nx1i32(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx1i32
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx1i32
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 1 x i32>, ptr %pa
+ ret <vscale x 1 x i32> %va
+}
+
+define <vscale x 2 x i32> @vload_nx2i32(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i32
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx2i32
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 2 x i32>, ptr %pa
+ ret <vscale x 2 x i32> %va
+}
+
+define <vscale x 4 x i32> @vload_nx4i32(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx4i32
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx4i32
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 4 x i32>, ptr %pa
+ ret <vscale x 4 x i32> %va
+}
+
+define <vscale x 8 x i32> @vload_nx8i32(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx8i32
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ ; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: vload_nx8i32
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ ; RV64-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %va = load <vscale x 8 x i32>, ptr %pa
+ ret <vscale x 8 x i32> %va
+}
+
+define <vscale x 16 x i32> @vload_nx16i32(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx16i32
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ ; RV32-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: vload_nx16i32
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ ; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %va = load <vscale x 16 x i32>, ptr %pa
+ ret <vscale x 16 x i32> %va
+}
+
+define <vscale x 1 x i64> @vload_nx1i64(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx1i64
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx1i64
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 1 x i64>, ptr %pa
+ ret <vscale x 1 x i64> %va
+}
+
+define <vscale x 2 x i64> @vload_nx2i64(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i64
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx2i64
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 2 x i64>, ptr %pa
+ ret <vscale x 2 x i64> %va
+}
+
+define <vscale x 4 x i64> @vload_nx4i64(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx4i64
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ ; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: vload_nx4i64
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ ; RV64-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %va = load <vscale x 4 x i64>, ptr %pa
+ ret <vscale x 4 x i64> %va
+}
+
+define <vscale x 8 x i64> @vload_nx8i64(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx8i64
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ ; RV32-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: vload_nx8i64
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ ; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %va = load <vscale x 8 x i64>, ptr %pa
+ ret <vscale x 8 x i64> %va
+}
+
+define <vscale x 16 x i8> @vload_nx16i8_align1(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx16i8_align1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx16i8_align1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 16 x i8>, ptr %pa, align 1
+ ret <vscale x 16 x i8> %va
+}
+
+define <vscale x 16 x i8> @vload_nx16i8_align2(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx16i8_align2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx16i8_align2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 16 x i8>, ptr %pa, align 2
+ ret <vscale x 16 x i8> %va
+}
+
+define <vscale x 16 x i8> @vload_nx16i8_align16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx16i8_align16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx16i8_align16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 16 x i8>, ptr %pa, align 16
+ ret <vscale x 16 x i8> %va
+}
+
+define <vscale x 16 x i8> @vload_nx16i8_align64(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx16i8_align64
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx16i8_align64
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 16 x i8>, ptr %pa, align 64
+ ret <vscale x 16 x i8> %va
+}
+
+define <vscale x 4 x i16> @vload_nx4i16_align1(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx4i16_align1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 1)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx4i16_align1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 1)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 4 x i16>, ptr %pa, align 1
+ ret <vscale x 4 x i16> %va
+}
+
+define <vscale x 4 x i16> @vload_nx4i16_align2(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx4i16_align2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx4i16_align2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 4 x i16>, ptr %pa, align 2
+ ret <vscale x 4 x i16> %va
+}
+
+define <vscale x 4 x i16> @vload_nx4i16_align4(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx4i16_align4
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx4i16_align4
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 4 x i16>, ptr %pa, align 4
+ ret <vscale x 4 x i16> %va
+}
+define <vscale x 4 x i16> @vload_nx4i16_align8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx4i16_align8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx4i16_align8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 4 x i16>, ptr %pa, align 8
+ ret <vscale x 4 x i16> %va
+}
+
+define <vscale x 4 x i16> @vload_nx4i16_align16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx4i16_align16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx4i16_align16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 4 x i16>, ptr %pa, align 16
+ ret <vscale x 4 x i16> %va
+}
+
+define <vscale x 2 x i32> @vload_nx2i32_align2(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i32_align2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 2)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx2i32_align2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 2)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 2 x i32>, ptr %pa, align 2
+ ret <vscale x 2 x i32> %va
+}
+
+define <vscale x 2 x i32> @vload_nx2i32_align4(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i32_align4
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx2i32_align4
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 2 x i32>, ptr %pa, align 4
+ ret <vscale x 2 x i32> %va
+}
+
+define <vscale x 2 x i32> @vload_nx2i32_align8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i32_align8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx2i32_align8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 2 x i32>, ptr %pa, align 8
+ ret <vscale x 2 x i32> %va
+}
+
+define <vscale x 2 x i32> @vload_nx2i32_align16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i32_align16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx2i32_align16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 2 x i32>, ptr %pa, align 16
+ ret <vscale x 2 x i32> %va
+}
+
+define <vscale x 2 x i32> @vload_nx2i32_align256(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i32_align256
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx2i32_align256
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 2 x i32>, ptr %pa, align 256
+ ret <vscale x 2 x i32> %va
+}
+define <vscale x 2 x i64> @vload_nx2i64_align4(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i64_align4
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 4)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx2i64_align4
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 4)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 2 x i64>, ptr %pa, align 4
+ ret <vscale x 2 x i64> %va
+}
+
+define <vscale x 2 x i64> @vload_nx2i64_align8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i64_align8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx2i64_align8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 2 x i64>, ptr %pa, align 8
+ ret <vscale x 2 x i64> %va
+}
+
+define <vscale x 2 x i64> @vload_nx2i64_align16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i64_align16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx2i64_align16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 2 x i64>, ptr %pa, align 16
+ ret <vscale x 2 x i64> %va
+}
+
+define <vscale x 2 x i64> @vload_nx2i64_align32(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i64_align32
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx2i64_align32
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 2 x i64>, ptr %pa, align 32
+ ret <vscale x 2 x i64> %va
+}
+
+define <vscale x 1 x ptr> @vload_nx1ptr(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx1ptr
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx1ptr
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 1 x ptr>, ptr %pa
+ ret <vscale x 1 x ptr> %va
+}
+
+define <vscale x 2 x ptr> @vload_nx2ptr(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2ptr
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx2ptr
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x p0>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 2x ptr>, ptr %pa
+ ret <vscale x 2 x ptr> %va
+}
+
+define <vscale x 8 x ptr> @vload_nx8ptr(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx8ptr
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ ; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: vload_nx8ptr
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ ; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x p0>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %va = load <vscale x 8 x ptr>, ptr %pa
+ ret <vscale x 8 x ptr> %va
+}
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
index d169eb3..b3c62df 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
@@ -89,10 +89,12 @@ body: |
; CHECK-NEXT: %yhi:_(s32) = COPY $x13
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD %xlo, %ylo
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %ylo
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD %xhi, %yhi
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%xlo:_(s32) = COPY $x10
%xhi:_(s32) = COPY $x11
@@ -121,10 +123,12 @@ body: |
; CHECK-NEXT: %hi2:_(s32) = COPY $x13
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD %lo1, %lo2
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %lo2
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD %hi1, %hi2
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%lo1:_(s32) = COPY $x10
%hi1:_(s32) = COPY $x11
@@ -152,6 +156,7 @@ body: |
; CHECK-NEXT: %hi2:_(s32) = COPY $x15
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD %lo1, %lo2
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %lo2
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD %mid1, %mid2
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), %mid1
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
@@ -159,11 +164,13 @@ body: |
; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[C]]
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP2]], [[ICMP]]
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ICMP1]], [[AND]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD %hi1, %hi2
; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[OR]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
- ; CHECK-NEXT: $x12 = COPY [[ADD4]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD4]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s32)
+ ; CHECK-NEXT: $x12 = COPY [[COPY2]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%lo1:_(s32) = COPY $x10
%mid1:_(s32) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
index f394e4d..6e76bb0 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
@@ -121,10 +121,12 @@ body: |
; CHECK-NEXT: %y01:_(s64) = COPY $x13
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD %x00, %y00
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %y00
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD %x01, %y01
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s64)
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ADD2]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s64)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%x00:_(s64) = COPY $x10
%x01:_(s64) = COPY $x11
@@ -153,10 +155,12 @@ body: |
; CHECK-NEXT: %hi2:_(s64) = COPY $x13
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD %lo1, %lo2
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %lo2
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD %hi1, %hi2
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s64)
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ADD2]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s64)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%lo1:_(s64) = COPY $x10
%hi1:_(s64) = COPY $x11
@@ -184,6 +188,7 @@ body: |
; CHECK-NEXT: %hi2:_(s64) = COPY $x15
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD %lo1, %lo2
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %lo2
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD %mid1, %mid2
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD1]](s64), %mid1
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
@@ -194,14 +199,16 @@ body: |
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]]
; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[TRUNC2]], [[AND]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ADD2]](s64)
; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s64) = G_ADD %hi1, %hi2
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR]](s32)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[ADD3]], [[AND1]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s64)
- ; CHECK-NEXT: $x12 = COPY [[ADD4]](s64)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ADD4]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s64)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s64)
+ ; CHECK-NEXT: $x12 = COPY [[COPY2]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%lo1:_(s64) = COPY $x10
%mid1:_(s64) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv32.mir
index c348ec6..9227e65 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv32.mir
@@ -92,7 +92,8 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[ADD]](s32), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[COPY1]](s32), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ICMP1]], [[ICMP]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s32)
; CHECK-NEXT: $x11 = COPY [[XOR]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%0:_(s32) = COPY $x10
@@ -119,21 +120,23 @@ body: |
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY2]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY2]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY3]]
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[ADD2]](s32), [[COPY1]]
- ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[COPY1]]
- ; CHECK-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY]]
+ ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[COPY5]](s32), [[COPY1]]
+ ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY5]](s32), [[COPY1]]
+ ; CHECK-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY4]](s32), [[COPY]]
; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s32), [[ICMP3]], [[ICMP1]]
; CHECK-NEXT: [[ICMP4:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[COPY3]](s32), [[C1]]
; CHECK-NEXT: [[ICMP5:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY3]](s32), [[C1]]
; CHECK-NEXT: [[ICMP6:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY2]](s32), [[C]]
; CHECK-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s32), [[ICMP6]], [[ICMP4]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[SELECT1]], [[SELECT]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY4]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY5]](s32)
; CHECK-NEXT: $x12 = COPY [[XOR]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%2:_(s32) = COPY $x10
@@ -241,7 +244,8 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[SUB]](s32), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(sgt), [[COPY1]](s32), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ICMP1]], [[ICMP]]
- ; CHECK-NEXT: $x10 = COPY [[SUB]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s32)
; CHECK-NEXT: $x11 = COPY [[XOR]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%0:_(s32) = COPY $x10
@@ -377,7 +381,8 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY1]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s32)
; CHECK-NEXT: $x11 = COPY [[ICMP]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%0:_(s32) = COPY $x10
@@ -404,14 +409,16 @@ body: |
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY2]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY2]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY3]]
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
- ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD2]](s32), [[COPY3]]
- ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[COPY3]]
- ; CHECK-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY2]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY5]](s32), [[COPY3]]
+ ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY5]](s32), [[COPY3]]
+ ; CHECK-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY4]](s32), [[COPY2]]
; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s32), [[ICMP3]], [[ICMP1]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY4]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY5]](s32)
; CHECK-NEXT: $x12 = COPY [[SELECT]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%2:_(s32) = COPY $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir
index 5506f52..8acaff5 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir
@@ -125,8 +125,9 @@ body: |
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[TRUNC]], [[TRUNC1]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[XOR]](s32)
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s64)
; CHECK-NEXT: $x11 = COPY [[ANYEXT]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%0:_(s64) = COPY $x10
@@ -261,8 +262,9 @@ body: |
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[TRUNC]], [[TRUNC1]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[SUB]](s64)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[XOR]](s32)
- ; CHECK-NEXT: $x10 = COPY [[SUB]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s64)
; CHECK-NEXT: $x11 = COPY [[ANYEXT]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%0:_(s64) = COPY $x10
@@ -364,7 +366,8 @@ body: |
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ZEXT]](s64), [[AND]]
- ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY2]](s32)
; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
; CHECK-NEXT: $x11 = COPY [[ICMP]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
@@ -393,7 +396,8 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), [[COPY1]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s64)
; CHECK-NEXT: $x11 = COPY [[ICMP]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%0:_(s64) = COPY $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv32.mir
index a890a41..354fc10 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv32.mir
@@ -50,8 +50,8 @@ body: |
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; RV32I-NEXT: [[AND8:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C13]]
; RV32I-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND8]], [[C14]]
; RV32I-NEXT: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND8]], [[C14]]
; RV32I-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C15]](s32)
; RV32I-NEXT: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; RV32I-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C16]], [[LSHR6]]
@@ -129,8 +129,8 @@ body: |
; RV32I-NEXT: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
; RV32I-NEXT: [[AND9:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C15]]
; RV32I-NEXT: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C16]]
; RV32I-NEXT: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C16]]
; RV32I-NEXT: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32I-NEXT: [[AND10:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C18]]
; RV32I-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[AND10]], [[C17]](s32)
@@ -201,8 +201,8 @@ body: |
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C10]]
; RV32I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C11]]
; RV32I-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C11]]
; RV32I-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C12]](s32)
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C13]], [[LSHR8]]
@@ -267,8 +267,8 @@ body: |
; RV32I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C11]]
; RV32I-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C12]]
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C12]]
; RV32I-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C13]](s32)
; RV32I-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C14]], [[LSHR8]]
@@ -306,8 +306,8 @@ body: |
; RV32I-NEXT: [[C26:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ADD4]], [[C26]]
; RV32I-NEXT: [[C27:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C27]]
; RV32I-NEXT: [[C28:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C27]]
; RV32I-NEXT: [[LSHR17:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C28]](s32)
; RV32I-NEXT: [[C29:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C29]], [[LSHR17]]
@@ -389,8 +389,8 @@ body: |
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; RV32I-NEXT: [[AND8:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C13]]
; RV32I-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND8]], [[C14]]
; RV32I-NEXT: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND8]], [[C14]]
; RV32I-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C15]](s32)
; RV32I-NEXT: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; RV32I-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C16]], [[LSHR6]]
@@ -468,8 +468,8 @@ body: |
; RV32I-NEXT: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
; RV32I-NEXT: [[AND9:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C15]]
; RV32I-NEXT: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C16]]
; RV32I-NEXT: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C16]]
; RV32I-NEXT: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32I-NEXT: [[AND10:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C18]]
; RV32I-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[AND10]], [[C17]](s32)
@@ -540,8 +540,8 @@ body: |
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C10]]
; RV32I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C11]]
; RV32I-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C11]]
; RV32I-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C12]](s32)
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C13]], [[LSHR8]]
@@ -606,8 +606,8 @@ body: |
; RV32I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C11]]
; RV32I-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C12]]
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C12]]
; RV32I-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C13]](s32)
; RV32I-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C14]], [[LSHR8]]
@@ -645,8 +645,8 @@ body: |
; RV32I-NEXT: [[C26:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ADD4]], [[C26]]
; RV32I-NEXT: [[C27:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C27]]
; RV32I-NEXT: [[C28:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C27]]
; RV32I-NEXT: [[LSHR17:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C28]](s32)
; RV32I-NEXT: [[C29:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C29]], [[LSHR17]]
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir
index add8a56..38a4b9c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir
@@ -283,8 +283,8 @@ body: |
; RV64I-NEXT: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 1085102592571150095
; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C11]]
; RV64I-NEXT: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C12]]
; RV64I-NEXT: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C12]]
; RV64I-NEXT: [[LSHR9:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C13]](s64)
; RV64I-NEXT: [[C14:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C14]], [[LSHR9]]
@@ -583,8 +583,8 @@ body: |
; RV64I-NEXT: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 1085102592571150095
; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C11]]
; RV64I-NEXT: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C12]]
; RV64I-NEXT: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C12]]
; RV64I-NEXT: [[LSHR9:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C13]](s64)
; RV64I-NEXT: [[C14:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C14]], [[LSHR9]]
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv32.mir
index d4eb5eb..c64669c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv32.mir
@@ -35,8 +35,8 @@ body: |
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; RV32I-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C7]]
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND5]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND5]], [[C8]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C9]](s32)
; RV32I-NEXT: $x10 = COPY [[LSHR3]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
@@ -90,8 +90,8 @@ body: |
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
; RV32I-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C7]]
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND5]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND5]], [[C8]]
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32I-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C10]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[AND6]], [[C9]](s32)
@@ -143,8 +143,8 @@ body: |
; RV32I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C5]]
; RV32I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C7]](s32)
; RV32I-NEXT: $x10 = COPY [[LSHR3]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
@@ -190,8 +190,8 @@ body: |
; RV32I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C5]]
; RV32I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C7]](s32)
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; RV32I-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C8]](s32)
@@ -210,8 +210,8 @@ body: |
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ADD3]], [[C13]]
; RV32I-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C14]]
; RV32I-NEXT: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C14]]
; RV32I-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C15]](s32)
; RV32I-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[LSHR7]], [[LSHR3]]
; RV32I-NEXT: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir
index e2434ba..196b367 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir
@@ -205,8 +205,8 @@ body: |
; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1085102592571150095
; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C5]]
; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C6]]
; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C6]]
; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C7]](s64)
; RV64I-NEXT: $x10 = COPY [[LSHR3]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv32.mir
index 19555a7..372beca 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv32.mir
@@ -39,8 +39,8 @@ body: |
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; RV32I-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C10]](s32)
; RV32I-NEXT: $x10 = COPY [[LSHR3]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
@@ -98,8 +98,8 @@ body: |
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
; RV32I-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32I-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C11]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[AND7]], [[C10]](s32)
@@ -155,8 +155,8 @@ body: |
; RV32I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C6]]
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C7]]
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C7]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C8]](s32)
; RV32I-NEXT: $x10 = COPY [[LSHR3]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
@@ -208,8 +208,8 @@ body: |
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C7]]
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C8]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C9]](s32)
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[LSHR3]], [[C10]]
@@ -234,8 +234,8 @@ body: |
; RV32I-NEXT: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND9:%[0-9]+]]:_(s32) = G_AND [[ADD6]], [[C17]]
; RV32I-NEXT: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C18]]
; RV32I-NEXT: [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C18]]
; RV32I-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C19]](s32)
; RV32I-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[ADD3]], [[LSHR7]]
; RV32I-NEXT: [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
@@ -304,8 +304,8 @@ body: |
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; RV32I-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C10]](s32)
; RV32I-NEXT: $x10 = COPY [[LSHR3]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
@@ -363,8 +363,8 @@ body: |
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
; RV32I-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32I-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C11]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[AND7]], [[C10]](s32)
@@ -420,8 +420,8 @@ body: |
; RV32I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C6]]
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C7]]
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C7]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C8]](s32)
; RV32I-NEXT: $x10 = COPY [[LSHR3]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
@@ -473,8 +473,8 @@ body: |
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C7]]
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C8]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C9]](s32)
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[LSHR3]], [[C10]]
@@ -499,8 +499,8 @@ body: |
; RV32I-NEXT: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND9:%[0-9]+]]:_(s32) = G_AND [[ADD6]], [[C17]]
; RV32I-NEXT: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C18]]
; RV32I-NEXT: [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C18]]
; RV32I-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C19]](s32)
; RV32I-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[ADD3]], [[LSHR7]]
; RV32I-NEXT: [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir
index e030e3c..e51a214 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir
@@ -221,8 +221,8 @@ body: |
; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1085102592571150095
; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[C6]]
; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND4]], [[C7]]
; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND4]], [[C7]]
; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C8]](s64)
; RV64I-NEXT: $x10 = COPY [[LSHR3]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
@@ -457,8 +457,8 @@ body: |
; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1085102592571150095
; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[C6]]
; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND4]], [[C7]]
; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND4]], [[C7]]
; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C8]](s64)
; RV64I-NEXT: $x10 = COPY [[LSHR3]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv32.mir
index 433d6e6..ec2dc56 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv32.mir
@@ -162,8 +162,10 @@ body: |
; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH %lo1, %lo2
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL1]], [[MUL2]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[MUL2]]
- ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[UMULH]]
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[UMULH]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), [[UMULH]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[ADD1]](s32)
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ICMP]], [[ICMP1]]
; CHECK-NEXT: [[MUL3:%[0-9]+]]:_(s32) = G_MUL %hi1, %lo2
; CHECK-NEXT: [[MUL4:%[0-9]+]]:_(s32) = G_MUL %mid1, %mid2
@@ -171,13 +173,18 @@ body: |
; CHECK-NEXT: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH %mid1, %lo2
; CHECK-NEXT: [[UMULH2:%[0-9]+]]:_(s32) = G_UMULH %lo1, %mid2
; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[MUL3]], [[MUL4]]
- ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[MUL5]]
- ; CHECK-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[UMULH1]]
- ; CHECK-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[ADD5]], [[UMULH2]]
- ; CHECK-NEXT: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[ADD6]], [[ADD2]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD3]](s32)
+ ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[MUL5]]
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ADD4]](s32)
+ ; CHECK-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[COPY3]], [[UMULH1]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD5]](s32)
+ ; CHECK-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[COPY4]], [[UMULH2]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD6]](s32)
+ ; CHECK-NEXT: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[COPY5]], [[ADD2]]
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ADD7]](s32)
; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD1]](s32)
- ; CHECK-NEXT: $x12 = COPY [[ADD7]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s32)
+ ; CHECK-NEXT: $x12 = COPY [[COPY6]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%lo1:_(s32) = COPY $x10
%mid1:_(s32) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv64.mir
index 09e002e..39d9c5b 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv64.mir
@@ -194,8 +194,10 @@ body: |
; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH %lo1, %lo2
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[MUL1]], [[MUL2]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), [[MUL2]]
- ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ADD]], [[UMULH]]
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[UMULH]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD1]](s64), [[UMULH]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ADD1]](s64)
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ICMP]], [[ICMP1]]
; CHECK-NEXT: [[MUL3:%[0-9]+]]:_(s64) = G_MUL %hi1, %lo2
; CHECK-NEXT: [[MUL4:%[0-9]+]]:_(s64) = G_MUL %mid1, %mid2
@@ -203,13 +205,18 @@ body: |
; CHECK-NEXT: [[UMULH1:%[0-9]+]]:_(s64) = G_UMULH %mid1, %lo2
; CHECK-NEXT: [[UMULH2:%[0-9]+]]:_(s64) = G_UMULH %lo1, %mid2
; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s64) = G_ADD [[MUL3]], [[MUL4]]
- ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[ADD3]], [[MUL5]]
- ; CHECK-NEXT: [[ADD5:%[0-9]+]]:_(s64) = G_ADD [[ADD4]], [[UMULH1]]
- ; CHECK-NEXT: [[ADD6:%[0-9]+]]:_(s64) = G_ADD [[ADD5]], [[UMULH2]]
- ; CHECK-NEXT: [[ADD7:%[0-9]+]]:_(s64) = G_ADD [[ADD6]], [[ADD2]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ADD3]](s64)
+ ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[MUL5]]
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[ADD4]](s64)
+ ; CHECK-NEXT: [[ADD5:%[0-9]+]]:_(s64) = G_ADD [[COPY3]], [[UMULH1]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD5]](s64)
+ ; CHECK-NEXT: [[ADD6:%[0-9]+]]:_(s64) = G_ADD [[COPY4]], [[UMULH2]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY [[ADD6]](s64)
+ ; CHECK-NEXT: [[ADD7:%[0-9]+]]:_(s64) = G_ADD [[COPY5]], [[ADD2]]
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY [[ADD7]](s64)
; CHECK-NEXT: $x10 = COPY [[MUL]](s64)
- ; CHECK-NEXT: $x11 = COPY [[ADD1]](s64)
- ; CHECK-NEXT: $x12 = COPY [[ADD7]](s64)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s64)
+ ; CHECK-NEXT: $x12 = COPY [[COPY6]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%lo1:_(s64) = COPY $x10
%mid1:_(s64) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-vacopy.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-vacopy.mir
index f9eda12..16542f58 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-vacopy.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-vacopy.mir
@@ -14,7 +14,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY1]](p0) :: (load (p0))
- ; CHECK-NEXT: G_STORE [[COPY]](p0), [[LOAD]](p0) :: (store (p0))
+ ; CHECK-NEXT: G_STORE [[LOAD]](p0), [[COPY]](p0) :: (store (p0))
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(p0) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-bitcast.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-bitcast.mir
new file mode 100644
index 0000000..7b5d568
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-bitcast.mir
@@ -0,0 +1,356 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: bitcast_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv1i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s1>) = G_BITCAST [[DEF]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s1>) = G_BITCAST %0(<vscale x 1 x s8>)
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv2i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 1 x s16>) = G_BITCAST [[DEF]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s16>) = G_BITCAST %0(<vscale x 2 x s8>)
+ $v8 = COPY %1(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv4i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s16>) = G_BITCAST [[DEF]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s16>) = G_BITCAST %0(<vscale x 4 x s8>)
+ $v8 = COPY %1(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv8i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 64 x s1>) = G_BITCAST [[DEF]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 64 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 64 x s1>) = G_BITCAST %0(<vscale x 8 x s8>)
+ $v8 = COPY %1(<vscale x 64 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv16i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 4 x s32>) = G_BITCAST [[DEF]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s32>) = G_BITCAST %0(<vscale x 16 x s8>)
+ $v8m2 = COPY %1(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: bitcast_nxv32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv32i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 16 x s16>) = G_BITCAST [[DEF]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: $v8m4 = COPY [[BITCAST]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 16 x s16>) = G_BITCAST %0(<vscale x 32 x s8>)
+ $v8m4 = COPY %1(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: bitcast_nxv64i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv64i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s64>) = G_BITCAST [[DEF]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: $v8m8 = COPY [[BITCAST]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s64>) = G_BITCAST %0(<vscale x 64 x s8>)
+ $v8m8 = COPY %1(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: bitcast_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv1i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s8>) = G_BITCAST [[DEF]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s8>) = G_BITCAST %0(<vscale x 1 x s16>)
+ $v8 = COPY %1(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv2i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 1 x s32>) = G_BITCAST [[DEF]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s32>) = G_BITCAST %0(<vscale x 2 x s16>)
+ $v8 = COPY %1(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv4i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 1 x s64>) = G_BITCAST [[DEF]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s64>) = G_BITCAST %0(<vscale x 4 x s16>)
+ $v8 = COPY %1(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv8i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s64>) = G_BITCAST [[DEF]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s64>) = G_BITCAST %0(<vscale x 8 x s16>)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: bitcast_nxv16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv16i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s32>) = G_BITCAST [[DEF]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: $v8m4 = COPY [[BITCAST]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s32>) = G_BITCAST %0(<vscale x 16 x s16>)
+ $v8m4 = COPY %1(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: bitcast_nxv32i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv32i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s64>) = G_BITCAST [[DEF]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: $v8m8 = COPY [[BITCAST]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s64>) = G_BITCAST %0(<vscale x 32 x s16>)
+ $v8m8 = COPY %1(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: bitcast_nxv1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv1i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s16>) = G_BITCAST [[DEF]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s16>) = G_BITCAST %0(<vscale x 1 x s32>)
+ $v8 = COPY %1(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv2i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 4 x s16>) = G_BITCAST [[DEF]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s16>) = G_BITCAST %0(<vscale x 2 x s32>)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv4i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s16>) = G_BITCAST [[DEF]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s16>) = G_BITCAST %0(<vscale x 4 x s32>)
+ $v8m2 = COPY %1(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: bitcast_nxv8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv8i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 4 x s64>) = G_BITCAST [[DEF]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: $v8m4 = COPY [[BITCAST]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s64>) = G_BITCAST %0(<vscale x 8 x s32>)
+ $v8m4 = COPY %1(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: bitcast_nxv16i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv16i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 32 x s16>) = G_BITCAST [[DEF]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: $v8m8 = COPY [[BITCAST]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 32 x s16>) = G_BITCAST %0(<vscale x 16 x s32>)
+ $v8m8 = COPY %1(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: bitcast_nxv1i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv1i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s32>) = G_BITCAST [[DEF]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s32>) = G_BITCAST %0(<vscale x 1 x s64>)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv2i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv2i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 4 x s32>) = G_BITCAST [[DEF]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s32>) = G_BITCAST %0(<vscale x 2 x s64>)
+ $v8m2 = COPY %1(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: bitcast_nxv4i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv4i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 16 x s16>) = G_BITCAST [[DEF]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: $v8m4 = COPY [[BITCAST]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 16 x s16>) = G_BITCAST %0(<vscale x 4 x s64>)
+ $v8m4 = COPY %1(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: bitcast_nxv8i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv8i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 64 x s8>) = G_BITCAST [[DEF]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: $v8m8 = COPY [[BITCAST]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 64 x s8>) = G_BITCAST %0(<vscale x 8 x s64>)
+ $v8m8 = COPY %1(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-implicit-def.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-implicit-def.mir
new file mode 100644
index 0000000..8ee4086
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-implicit-def.mir
@@ -0,0 +1,410 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: implicitdef_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv1i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv2i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv4i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv8i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv16i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 16 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv32i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv32i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 32 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv64i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv64i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 64 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 64 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv1i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv2i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv4i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv8i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv16i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv32i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv32i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv64i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv64i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+---
+name: implicitdef_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv1i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv2i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv4i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv8i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv16i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv16i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv32i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv32i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: implicitdef_nxv1i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv1i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv2i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv4i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv8i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv8i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv16i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv16i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: implicitdef_nxv1i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv1i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv2i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv4i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv4i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv8i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv8i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-select.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-select.mir
new file mode 100644
index 0000000..6e1d4aa
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-select.mir
@@ -0,0 +1,400 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: select_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv1i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s8>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s8>), %2(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv2i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s8>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s8>), %2(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv4i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s8>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s8>), %2(<vscale x 4 x s8>)
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv8i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s8>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s8>), %2(<vscale x 8 x s8>)
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv16i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s8>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s8>), %2(<vscale x 16 x s8>)
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv32i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv32i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s8>) = G_SELECT %1(<vscale x 32 x s1>), %2(<vscale x 32 x s8>), %2(<vscale x 32 x s8>)
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv64i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv64i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 64 x s8>) = G_SELECT %1(<vscale x 64 x s1>), %2(<vscale x 64 x s8>), %2(<vscale x 64 x s8>)
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+---
+name: select_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv1i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s16>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s16>), %2(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv2i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s16>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s16>), %2(<vscale x 2 x s16>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv4i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s16>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s16>), %2(<vscale x 4 x s16>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv8i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s16>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s16>), %2(<vscale x 8 x s16>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv16i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv16i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s16>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s16>), %2(<vscale x 16 x s16>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv32i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv32i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s16>) = G_SELECT %1(<vscale x 32 x s1>), %2(<vscale x 32 x s16>), %2(<vscale x 32 x s16>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: select_nxv1i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv1i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s32>), %2(<vscale x 1 x s32>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv2i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s32>), %2(<vscale x 2 x s32>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv4i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv4i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s32>), %2(<vscale x 4 x s32>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv8i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv8i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s32>), %2(<vscale x 8 x s32>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv16i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv16i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s32>), %2(<vscale x 16 x s32>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: select_nxv1i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv1i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s64>), %2(<vscale x 1 x s64>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv2i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s64>), %2(<vscale x 2 x s64>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv4i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv4i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s64>), %2(<vscale x 4 x s64>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv8i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv8i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s64>), %2(<vscale x 8 x s64>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv32.mir
new file mode 100644
index 0000000..899f795
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv32.mir
@@ -0,0 +1,228 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v,+m -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: test_1_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_1_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 1
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_2_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_2_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 2
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_3_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_3_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 3
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_4_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_4_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 4
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_8_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_8_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: $x10 = COPY [[READ_VLENB]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 8
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_16_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_16_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[SHL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 16
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_40_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_40_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[READ_VLENB]], [[C]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 40
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+
+---
+name: test_1_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_1_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 1
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
+---
+name: test_2_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_2_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 2
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
+---
+name: test_3_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_3_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 3
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
+---
+name: test_4_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_4_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 4
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
+---
+name: test_8_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_8_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 8
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
+---
+name: test_16_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_16_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 16
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
+---
+name: test_40_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_40_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 40
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv64.mir
new file mode 100644
index 0000000..c0453a0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv64.mir
@@ -0,0 +1,110 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+v,+m -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: test_1
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_1
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 1
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_2
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_2
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 2
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_3
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_3
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 3
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_4
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_4
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 4
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_8
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: $x10 = COPY [[READ_VLENB]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 8
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_16
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[READ_VLENB]], [[C]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[SHL]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 16
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_40
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_40
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 5
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[READ_VLENB]], [[C]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 40
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/implicit-def.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/implicit-def.mir
new file mode 100644
index 0000000..ef1e355
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/implicit-def.mir
@@ -0,0 +1,425 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV64I %s
+---
+name: implicitdef_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv32i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv32i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv64i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv64i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv64i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+---
+name: implicitdef_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv1i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv1i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv4i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv4i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv16i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv16i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv32i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv32i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv32i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: implicitdef_nxv1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv1i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv1i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv2i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv2i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv4i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv4i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv8i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv8i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv16i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv16i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv16i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: implicitdef_nxv1i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv1i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv1i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv2i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv2i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv4i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv4i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv4i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv8i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv8i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv8i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/select.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/select.mir
new file mode 100644
index 0000000..4dc077a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/select.mir
@@ -0,0 +1,558 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV64I %s
+
+---
+name: select_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s8>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s8>), %2(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s8>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s8>), %2(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s8>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s8>), %2(<vscale x 4 x s8>)
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s8>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s8>), %2(<vscale x 8 x s8>)
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: select_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s8>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s8>), %2(<vscale x 16 x s8>)
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv32i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: select_nxv32i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s8>) = G_SELECT %1(<vscale x 32 x s1>), %2(<vscale x 32 x s8>), %2(<vscale x 32 x s8>)
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv64i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv64i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: select_nxv64i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 64 x s8>) = G_SELECT %1(<vscale x 64 x s1>), %2(<vscale x 64 x s8>), %2(<vscale x 64 x s8>)
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+---
+name: select_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv1i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv1i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s16>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s16>), %2(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s16>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s16>), %2(<vscale x 2 x s16>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv4i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv4i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s16>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s16>), %2(<vscale x 4 x s16>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: select_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s16>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s16>), %2(<vscale x 8 x s16>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv16i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: select_nxv16i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s16>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s16>), %2(<vscale x 16 x s16>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv32i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv32i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: select_nxv32i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s16>) = G_SELECT %1(<vscale x 32 x s1>), %2(<vscale x 32 x s16>), %2(<vscale x 32 x s16>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: select_nxv1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv1i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv1i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s32>), %2(<vscale x 1 x s32>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv2i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv2i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s32>), %2(<vscale x 2 x s32>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv4i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: select_nxv4i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s32>), %2(<vscale x 4 x s32>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv8i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: select_nxv8i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s32>), %2(<vscale x 8 x s32>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv16i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv16i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: select_nxv16i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s32>), %2(<vscale x 16 x s32>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: select_nxv1i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv1i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv1i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s64>), %2(<vscale x 1 x s64>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv2i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: select_nxv2i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s64>), %2(<vscale x 2 x s64>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv4i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv4i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: select_nxv4i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s64>), %2(<vscale x 4 x s64>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv8i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv8i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: select_nxv8i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s64>), %2(<vscale x 8 x s64>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv32.mir
new file mode 100644
index 0000000..ae3bb0a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv32.mir
@@ -0,0 +1,48 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck %s
+
+---
+name: test_s32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:gprb(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:gprb(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s32) = G_READ_VLENB
+ %2:_(s32) = G_CONSTANT i32 3
+ %0:_(s32) = G_LSHR %1, %2(s32)
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_s64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:gprb(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:gprb(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gprb(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:_(s32) = G_READ_VLENB
+ %18:_(s32) = G_CONSTANT i32 3
+ %2:_(s32) = G_LSHR %17, %18(s32)
+ %15:_(s32) = G_CONSTANT i32 1
+ %9:_(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv64.mir
new file mode 100644
index 0000000..a7446d9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv64.mir
@@ -0,0 +1,25 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck %s
+
+---
+name: test
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:gprb(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:gprb(s64) = G_CONSTANT i64 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:gprb(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s64) = G_READ_VLENB
+ %2:_(s64) = G_CONSTANT i64 3
+ %0:_(s64) = G_LSHR %1, %2(s64)
+ $x10 = COPY %0(s64)
+ PseudoRET implicit $x10
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
index 7b110e5..d55adf3 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
@@ -17,6 +17,12 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel -mattr=+d -target-abi lp64d \
; RUN: -verify-machineinstrs \
; RUN: | FileCheck -check-prefixes=RV64,LP64D %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv32 -global-isel \
+; RUN: -frame-pointer=all -target-abi ilp32 -verify-machineinstrs \
+; RUN: | FileCheck -check-prefixes=RV32-WITHFP %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel \
+; RUN: -frame-pointer=all -target-abi lp64 -verify-machineinstrs \
+; RUN: | FileCheck -check-prefixes=RV64-WITHFP %s
; The same vararg calling convention is used for ilp32/ilp32f/ilp32d and for
; lp64/lp64f/lp64d. Different CHECK lines are required due to slight
@@ -79,6 +85,67 @@ define i32 @va1(ptr %fmt, ...) {
; RV64-NEXT: lw a0, 0(a0)
; RV64-NEXT: addi sp, sp, 80
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va1:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: .cfi_offset ra, -36
+; RV32-WITHFP-NEXT: .cfi_offset s0, -40
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 32
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a0)
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va1:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: .cfi_offset ra, -72
+; RV64-WITHFP-NEXT: .cfi_offset s0, -80
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 64
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: addi a0, s0, 8
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: lw a0, -20(s0)
+; RV64-WITHFP-NEXT: lwu a1, -24(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: slli a0, a0, 32
+; RV64-WITHFP-NEXT: or a0, a0, a1
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: srli a2, a1, 32
+; RV64-WITHFP-NEXT: sw a1, -24(s0)
+; RV64-WITHFP-NEXT: sw a2, -20(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%argp.cur = load ptr, ptr %va, align 4
@@ -131,6 +198,58 @@ define i32 @va1_va_arg(ptr %fmt, ...) nounwind {
; RV64-NEXT: lw a0, 0(a0)
; RV64-NEXT: addi sp, sp, 80
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va1_va_arg:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a0)
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va1_va_arg:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: addi a0, s0, 8
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -24(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%1 = va_arg ptr %va, i32
@@ -212,6 +331,78 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; RV64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 96
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va1_va_arg_alloca:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -16(s0)
+; RV32-WITHFP-NEXT: lw a0, -16(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -16(s0)
+; RV32-WITHFP-NEXT: lw s1, 0(a0)
+; RV32-WITHFP-NEXT: addi a0, s1, 15
+; RV32-WITHFP-NEXT: andi a0, a0, -16
+; RV32-WITHFP-NEXT: sub a0, sp, a0
+; RV32-WITHFP-NEXT: mv sp, a0
+; RV32-WITHFP-NEXT: call notdead
+; RV32-WITHFP-NEXT: mv a0, s1
+; RV32-WITHFP-NEXT: addi sp, s0, -16
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va1_va_arg_alloca:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: addi a0, s0, 8
+; RV64-WITHFP-NEXT: sd a0, -32(s0)
+; RV64-WITHFP-NEXT: ld a0, -32(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -32(s0)
+; RV64-WITHFP-NEXT: lw s1, 0(a0)
+; RV64-WITHFP-NEXT: slli a0, s1, 32
+; RV64-WITHFP-NEXT: srli a0, a0, 32
+; RV64-WITHFP-NEXT: addi a0, a0, 15
+; RV64-WITHFP-NEXT: andi a0, a0, -16
+; RV64-WITHFP-NEXT: sub a0, sp, a0
+; RV64-WITHFP-NEXT: mv sp, a0
+; RV64-WITHFP-NEXT: call notdead
+; RV64-WITHFP-NEXT: mv a0, s1
+; RV64-WITHFP-NEXT: addi sp, s0, -32
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%1 = va_arg ptr %va, i32
@@ -273,6 +464,36 @@ define void @va1_caller() nounwind {
; LP64D-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LP64D-NEXT: addi sp, sp, 16
; LP64D-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va1_caller:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -16
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: lui a3, 261888
+; RV32-WITHFP-NEXT: li a4, 2
+; RV32-WITHFP-NEXT: li a2, 0
+; RV32-WITHFP-NEXT: call va1
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 16
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va1_caller:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -16
+; RV64-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 16
+; RV64-WITHFP-NEXT: lui a0, %hi(.LCPI3_0)
+; RV64-WITHFP-NEXT: ld a1, %lo(.LCPI3_0)(a0)
+; RV64-WITHFP-NEXT: li a2, 2
+; RV64-WITHFP-NEXT: call va1
+; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 16
+; RV64-WITHFP-NEXT: ret
%1 = call i32 (ptr, ...) @va1(ptr undef, double 1.0, i32 2)
ret void
}
@@ -395,6 +616,59 @@ define i64 @va2(ptr %fmt, ...) nounwind {
; RV64-NEXT: ld a0, 0(a1)
; RV64-NEXT: addi sp, sp, 80
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va2:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 7
+; RV32-WITHFP-NEXT: andi a1, a0, -8
+; RV32-WITHFP-NEXT: addi a0, a0, 8
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a1)
+; RV32-WITHFP-NEXT: lw a1, 4(a1)
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va2:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: addi a0, s0, 8
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: addi a1, a0, 7
+; RV64-WITHFP-NEXT: andi a1, a1, -8
+; RV64-WITHFP-NEXT: addi a0, a0, 15
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, 0(a1)
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%argp.cur = load ptr, ptr %va
@@ -459,6 +733,61 @@ define i64 @va2_va_arg(ptr %fmt, ...) nounwind {
; RV64-NEXT: srli a0, a0, 32
; RV64-NEXT: addi sp, sp, 80
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va2_va_arg:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a0)
+; RV32-WITHFP-NEXT: li a1, 0
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va2_va_arg:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: addi a0, s0, 8
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -24(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: slli a0, a0, 32
+; RV64-WITHFP-NEXT: srli a0, a0, 32
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%1 = va_arg ptr %va, i32
@@ -487,6 +816,32 @@ define void @va2_caller() nounwind {
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va2_caller:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -16
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: li a1, 1
+; RV32-WITHFP-NEXT: call va2
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 16
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va2_caller:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -16
+; RV64-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 16
+; RV64-WITHFP-NEXT: li a1, 1
+; RV64-WITHFP-NEXT: call va2
+; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 16
+; RV64-WITHFP-NEXT: ret
%1 = call i64 (ptr, ...) @va2(ptr undef, i32 1)
ret void
}
@@ -617,6 +972,61 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind {
; RV64-NEXT: add a0, a1, a0
; RV64-NEXT: addi sp, sp, 64
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va3:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 20(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 16(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 24
+; RV32-WITHFP-NEXT: sw a3, 4(s0)
+; RV32-WITHFP-NEXT: sw a4, 8(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: sw a5, 12(s0)
+; RV32-WITHFP-NEXT: sw a6, 16(s0)
+; RV32-WITHFP-NEXT: sw a7, 20(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 7
+; RV32-WITHFP-NEXT: andi a3, a0, -8
+; RV32-WITHFP-NEXT: addi a0, a0, 8
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a4, 0(a3)
+; RV32-WITHFP-NEXT: lw a3, 4(a3)
+; RV32-WITHFP-NEXT: add a0, a1, a4
+; RV32-WITHFP-NEXT: sltu a1, a0, a4
+; RV32-WITHFP-NEXT: add a2, a2, a3
+; RV32-WITHFP-NEXT: add a1, a2, a1
+; RV32-WITHFP-NEXT: lw ra, 20(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 16(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va3:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -80
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a2, 0(s0)
+; RV64-WITHFP-NEXT: sd a3, 8(s0)
+; RV64-WITHFP-NEXT: sd a4, 16(s0)
+; RV64-WITHFP-NEXT: mv a0, s0
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: sd a5, 24(s0)
+; RV64-WITHFP-NEXT: sd a6, 32(s0)
+; RV64-WITHFP-NEXT: sd a7, 40(s0)
+; RV64-WITHFP-NEXT: addi a2, a0, 7
+; RV64-WITHFP-NEXT: andi a2, a2, -8
+; RV64-WITHFP-NEXT: addi a0, a0, 15
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, 0(a2)
+; RV64-WITHFP-NEXT: add a0, a1, a0
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 80
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%argp.cur = load ptr, ptr %va
@@ -682,6 +1092,61 @@ define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind {
; RV64-NEXT: add a0, a1, a0
; RV64-NEXT: addi sp, sp, 64
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va3_va_arg:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 20(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 16(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 24
+; RV32-WITHFP-NEXT: sw a3, 4(s0)
+; RV32-WITHFP-NEXT: sw a4, 8(s0)
+; RV32-WITHFP-NEXT: sw a5, 12(s0)
+; RV32-WITHFP-NEXT: sw a6, 16(s0)
+; RV32-WITHFP-NEXT: sw a7, 20(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a3, a0, 4
+; RV32-WITHFP-NEXT: sw a3, -12(s0)
+; RV32-WITHFP-NEXT: lw a3, 0(a0)
+; RV32-WITHFP-NEXT: add a0, a1, a3
+; RV32-WITHFP-NEXT: sltu a1, a0, a3
+; RV32-WITHFP-NEXT: add a1, a2, a1
+; RV32-WITHFP-NEXT: lw ra, 20(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 16(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va3_va_arg:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -80
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a2, 0(s0)
+; RV64-WITHFP-NEXT: sd a3, 8(s0)
+; RV64-WITHFP-NEXT: sd a4, 16(s0)
+; RV64-WITHFP-NEXT: sd a5, 24(s0)
+; RV64-WITHFP-NEXT: sd a6, 32(s0)
+; RV64-WITHFP-NEXT: sd a7, 40(s0)
+; RV64-WITHFP-NEXT: mv a0, s0
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a2, a0, 4
+; RV64-WITHFP-NEXT: sd a2, -24(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: slli a0, a0, 32
+; RV64-WITHFP-NEXT: srli a0, a0, 32
+; RV64-WITHFP-NEXT: add a0, a1, a0
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 80
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%1 = va_arg ptr %va, i32
@@ -718,6 +1183,39 @@ define void @va3_caller() nounwind {
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va3_caller:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -16
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: lui a0, 5
+; RV32-WITHFP-NEXT: addi a3, a0, -480
+; RV32-WITHFP-NEXT: li a0, 2
+; RV32-WITHFP-NEXT: li a1, 1111
+; RV32-WITHFP-NEXT: li a2, 0
+; RV32-WITHFP-NEXT: call va3
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 16
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va3_caller:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -16
+; RV64-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 16
+; RV64-WITHFP-NEXT: lui a0, 5
+; RV64-WITHFP-NEXT: addiw a2, a0, -480
+; RV64-WITHFP-NEXT: li a0, 2
+; RV64-WITHFP-NEXT: li a1, 1111
+; RV64-WITHFP-NEXT: call va3
+; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 16
+; RV64-WITHFP-NEXT: ret
%1 = call i64 (i32, i64, ...) @va3(i32 2, i64 1111, i32 20000)
ret void
}
@@ -745,9 +1243,8 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; RV32-NEXT: addi a1, a0, 4
; RV32-NEXT: sw a1, 4(sp)
; RV32-NEXT: lw a1, 4(sp)
-; RV32-NEXT: mv a2, sp
; RV32-NEXT: lw s0, 0(a0)
-; RV32-NEXT: sw a2, 0(a1)
+; RV32-NEXT: sw a1, 0(sp)
; RV32-NEXT: lw a0, 0(sp)
; RV32-NEXT: call notdead
; RV32-NEXT: lw a0, 4(sp)
@@ -796,9 +1293,8 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; RV64-NEXT: addi a1, a0, 4
; RV64-NEXT: sd a1, 8(sp)
; RV64-NEXT: ld a1, 8(sp)
-; RV64-NEXT: mv a2, sp
; RV64-NEXT: lw s0, 0(a0)
-; RV64-NEXT: sd a2, 0(a1)
+; RV64-NEXT: sd a1, 0(sp)
; RV64-NEXT: lw a0, 4(sp)
; RV64-NEXT: lwu a1, 0(sp)
; RV64-NEXT: slli a0, a0, 32
@@ -829,6 +1325,115 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 96
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va4_va_copy:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -64
+; RV32-WITHFP-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 32
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -16(s0)
+; RV32-WITHFP-NEXT: lw a0, -16(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -16(s0)
+; RV32-WITHFP-NEXT: lw a1, -16(s0)
+; RV32-WITHFP-NEXT: lw s1, 0(a0)
+; RV32-WITHFP-NEXT: sw a1, -20(s0)
+; RV32-WITHFP-NEXT: lw a0, -20(s0)
+; RV32-WITHFP-NEXT: call notdead
+; RV32-WITHFP-NEXT: lw a0, -16(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -16(s0)
+; RV32-WITHFP-NEXT: lw a1, -16(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a0)
+; RV32-WITHFP-NEXT: addi a1, a1, 3
+; RV32-WITHFP-NEXT: andi a1, a1, -4
+; RV32-WITHFP-NEXT: addi a2, a1, 4
+; RV32-WITHFP-NEXT: sw a2, -16(s0)
+; RV32-WITHFP-NEXT: lw a2, -16(s0)
+; RV32-WITHFP-NEXT: lw a1, 0(a1)
+; RV32-WITHFP-NEXT: addi a2, a2, 3
+; RV32-WITHFP-NEXT: andi a2, a2, -4
+; RV32-WITHFP-NEXT: addi a3, a2, 4
+; RV32-WITHFP-NEXT: sw a3, -16(s0)
+; RV32-WITHFP-NEXT: lw a2, 0(a2)
+; RV32-WITHFP-NEXT: add a0, a0, s1
+; RV32-WITHFP-NEXT: add a1, a1, a2
+; RV32-WITHFP-NEXT: add a0, a0, a1
+; RV32-WITHFP-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 64
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va4_va_copy:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -112
+; RV64-WITHFP-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 48
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: addi a0, s0, 8
+; RV64-WITHFP-NEXT: sd a0, -32(s0)
+; RV64-WITHFP-NEXT: ld a0, -32(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -32(s0)
+; RV64-WITHFP-NEXT: ld a1, -32(s0)
+; RV64-WITHFP-NEXT: lw s1, 0(a0)
+; RV64-WITHFP-NEXT: sd a1, -40(s0)
+; RV64-WITHFP-NEXT: lw a0, -36(s0)
+; RV64-WITHFP-NEXT: lwu a1, -40(s0)
+; RV64-WITHFP-NEXT: slli a0, a0, 32
+; RV64-WITHFP-NEXT: or a0, a0, a1
+; RV64-WITHFP-NEXT: call notdead
+; RV64-WITHFP-NEXT: ld a0, -32(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -32(s0)
+; RV64-WITHFP-NEXT: ld a1, -32(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: addi a1, a1, 3
+; RV64-WITHFP-NEXT: andi a1, a1, -4
+; RV64-WITHFP-NEXT: addi a2, a1, 4
+; RV64-WITHFP-NEXT: sd a2, -32(s0)
+; RV64-WITHFP-NEXT: ld a2, -32(s0)
+; RV64-WITHFP-NEXT: lw a1, 0(a1)
+; RV64-WITHFP-NEXT: addi a2, a2, 3
+; RV64-WITHFP-NEXT: andi a2, a2, -4
+; RV64-WITHFP-NEXT: addi a3, a2, 4
+; RV64-WITHFP-NEXT: sd a3, -32(s0)
+; RV64-WITHFP-NEXT: lw a2, 0(a2)
+; RV64-WITHFP-NEXT: add a0, a0, s1
+; RV64-WITHFP-NEXT: add a1, a1, a2
+; RV64-WITHFP-NEXT: addw a0, a0, a1
+; RV64-WITHFP-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 112
+; RV64-WITHFP-NEXT: ret
%vargs = alloca ptr
%wargs = alloca ptr
call void @llvm.va_start(ptr %vargs)
@@ -899,6 +1504,60 @@ define i32 @va6_no_fixed_args(...) nounwind {
; RV64-NEXT: lw a0, 0(a0)
; RV64-NEXT: addi sp, sp, 80
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va6_no_fixed_args:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: sw a0, 0(s0)
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: mv a0, s0
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a0)
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va6_no_fixed_args:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a0, 0(s0)
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: mv a0, s0
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -24(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%1 = va_arg ptr %va, i32
@@ -993,6 +1652,85 @@ define i32 @va_large_stack(ptr %fmt, ...) {
; RV64-NEXT: addiw a1, a1, 336
; RV64-NEXT: add sp, sp, a1
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va_large_stack:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -2032
+; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 2032
+; RV32-WITHFP-NEXT: sw ra, 1996(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 1992(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: .cfi_offset ra, -36
+; RV32-WITHFP-NEXT: .cfi_offset s0, -40
+; RV32-WITHFP-NEXT: addi s0, sp, 2000
+; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 32
+; RV32-WITHFP-NEXT: lui a0, 24414
+; RV32-WITHFP-NEXT: addi a0, a0, -1728
+; RV32-WITHFP-NEXT: sub sp, sp, a0
+; RV32-WITHFP-NEXT: lui a0, 24414
+; RV32-WITHFP-NEXT: addi a0, a0, 272
+; RV32-WITHFP-NEXT: sub a0, s0, a0
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: addi a1, s0, 4
+; RV32-WITHFP-NEXT: sw a1, 0(a0)
+; RV32-WITHFP-NEXT: lw a1, 0(a0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a2, a1, 4
+; RV32-WITHFP-NEXT: sw a2, 0(a0)
+; RV32-WITHFP-NEXT: lw a0, 0(a1)
+; RV32-WITHFP-NEXT: lui a1, 24414
+; RV32-WITHFP-NEXT: addi a1, a1, -1728
+; RV32-WITHFP-NEXT: add sp, sp, a1
+; RV32-WITHFP-NEXT: lw ra, 1996(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 1992(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 2032
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va_large_stack:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -2032
+; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 2032
+; RV64-WITHFP-NEXT: sd ra, 1960(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 1952(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: .cfi_offset ra, -72
+; RV64-WITHFP-NEXT: .cfi_offset s0, -80
+; RV64-WITHFP-NEXT: addi s0, sp, 1968
+; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 64
+; RV64-WITHFP-NEXT: lui a0, 24414
+; RV64-WITHFP-NEXT: addiw a0, a0, -1680
+; RV64-WITHFP-NEXT: sub sp, sp, a0
+; RV64-WITHFP-NEXT: lui a0, 24414
+; RV64-WITHFP-NEXT: addiw a0, a0, 288
+; RV64-WITHFP-NEXT: sub a0, s0, a0
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: addi a1, s0, 8
+; RV64-WITHFP-NEXT: sd a1, 0(a0)
+; RV64-WITHFP-NEXT: lw a1, 4(a0)
+; RV64-WITHFP-NEXT: lwu a2, 0(a0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: slli a1, a1, 32
+; RV64-WITHFP-NEXT: or a1, a1, a2
+; RV64-WITHFP-NEXT: addi a2, a1, 4
+; RV64-WITHFP-NEXT: srli a3, a2, 32
+; RV64-WITHFP-NEXT: sw a2, 0(a0)
+; RV64-WITHFP-NEXT: sw a3, 4(a0)
+; RV64-WITHFP-NEXT: lw a0, 0(a1)
+; RV64-WITHFP-NEXT: lui a1, 24414
+; RV64-WITHFP-NEXT: addiw a1, a1, -1680
+; RV64-WITHFP-NEXT: add sp, sp, a1
+; RV64-WITHFP-NEXT: ld ra, 1960(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 1952(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 2032
+; RV64-WITHFP-NEXT: ret
%large = alloca [ 100000000 x i8 ]
%va = alloca ptr
call void @llvm.va_start(ptr %va)
@@ -1004,5 +1742,193 @@ define i32 @va_large_stack(ptr %fmt, ...) {
ret i32 %1
}
+define i32 @va_vprintf(ptr %fmt, ptr %arg_start) {
+; RV32-LABEL: va_vprintf:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: lw a0, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: lw a0, 8(sp)
+; RV32-NEXT: addi a0, a0, 3
+; RV32-NEXT: andi a0, a0, -4
+; RV32-NEXT: addi a1, a0, 4
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lw a0, 0(a0)
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: va_vprintf:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd a1, 8(sp)
+; RV64-NEXT: ld a0, 8(sp)
+; RV64-NEXT: sd a0, 0(sp)
+; RV64-NEXT: ld a0, 0(sp)
+; RV64-NEXT: addi a0, a0, 3
+; RV64-NEXT: andi a0, a0, -4
+; RV64-NEXT: addi a1, a0, 4
+; RV64-NEXT: sd a1, 0(sp)
+; RV64-NEXT: lw a0, 0(a0)
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va_vprintf:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -16
+; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 16
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: .cfi_offset ra, -4
+; RV32-WITHFP-NEXT: .cfi_offset s0, -8
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 0
+; RV32-WITHFP-NEXT: sw a1, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: sw a0, -16(s0)
+; RV32-WITHFP-NEXT: lw a0, -16(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -16(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a0)
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 16
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va_vprintf:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -32
+; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 32
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: .cfi_offset ra, -8
+; RV64-WITHFP-NEXT: .cfi_offset s0, -16
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 0
+; RV64-WITHFP-NEXT: sd a1, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: sd a0, -32(s0)
+; RV64-WITHFP-NEXT: ld a0, -32(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -32(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 32
+; RV64-WITHFP-NEXT: ret
+ %args = alloca ptr
+ %args_cp = alloca ptr
+ store ptr %arg_start, ptr %args
+ call void @llvm.va_copy(ptr %args_cp, ptr %args)
+ %width = va_arg ptr %args_cp, i32
+ call void @llvm.va_end(ptr %args_cp)
+ ret i32 %width
+}
-
+define i32 @va_printf(ptr %fmt, ...) {
+; RV32-LABEL: va_printf:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -36
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a2, 24(sp)
+; RV32-NEXT: sw a3, 28(sp)
+; RV32-NEXT: sw a4, 32(sp)
+; RV32-NEXT: addi a1, sp, 20
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lw a1, 8(sp)
+; RV32-NEXT: sw a5, 36(sp)
+; RV32-NEXT: sw a6, 40(sp)
+; RV32-NEXT: sw a7, 44(sp)
+; RV32-NEXT: call va_vprintf
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: va_printf:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -80
+; RV64-NEXT: .cfi_def_cfa_offset 80
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -72
+; RV64-NEXT: sd a1, 24(sp)
+; RV64-NEXT: sd a2, 32(sp)
+; RV64-NEXT: sd a3, 40(sp)
+; RV64-NEXT: sd a4, 48(sp)
+; RV64-NEXT: addi a1, sp, 24
+; RV64-NEXT: sd a1, 0(sp)
+; RV64-NEXT: ld a1, 0(sp)
+; RV64-NEXT: sd a5, 56(sp)
+; RV64-NEXT: sd a6, 64(sp)
+; RV64-NEXT: sd a7, 72(sp)
+; RV64-NEXT: call va_vprintf
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 80
+; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va_printf:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: .cfi_offset ra, -36
+; RV32-WITHFP-NEXT: .cfi_offset s0, -40
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 32
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: addi a1, s0, 4
+; RV32-WITHFP-NEXT: sw a1, -12(s0)
+; RV32-WITHFP-NEXT: lw a1, -12(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: call va_vprintf
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va_printf:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: .cfi_offset ra, -72
+; RV64-WITHFP-NEXT: .cfi_offset s0, -80
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 64
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: addi a1, s0, 8
+; RV64-WITHFP-NEXT: sd a1, -24(s0)
+; RV64-WITHFP-NEXT: ld a1, -24(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: call va_vprintf
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
+ %args = alloca ptr
+ call void @llvm.va_start(ptr %args)
+ %arg_start = load ptr, ptr %args
+ %ret_val = call i32 @va_vprintf(ptr %fmt, ptr %arg_start)
+ call void @llvm.va_end(ptr %args)
+ ret i32 %ret_val
+}
diff --git a/llvm/test/CodeGen/RISCV/allow-check.ll b/llvm/test/CodeGen/RISCV/allow-check.ll
new file mode 100644
index 0000000..0ddb526
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/allow-check.ll
@@ -0,0 +1,32 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv32 -global-isel=0 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=riscv32 -global-isel=1 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=riscv32 -global-isel=0 -fast-isel=1 | FileCheck %s
+
+; RUN: llc < %s -mtriple=riscv64 -global-isel=0 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=riscv64 -global-isel=1 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=riscv64 -global-isel=0 -fast-isel=1 | FileCheck %s
+
+define i1 @test_runtime() local_unnamed_addr {
+; CHECK-LABEL: test_runtime:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: ret
+entry:
+ %allow = call i1 @llvm.allow.runtime.check(metadata !"test_check")
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.runtime.check(metadata) nounwind
+
+define i1 @test_ubsan() local_unnamed_addr {
+; CHECK-LABEL: test_ubsan:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: ret
+entry:
+ %allow = call i1 @llvm.allow.ubsan.check(i8 7)
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.ubsan.check(i8) nounwind
diff --git a/llvm/test/CodeGen/RISCV/attributes-module-flag.ll b/llvm/test/CodeGen/RISCV/attributes-module-flag.ll
new file mode 100644
index 0000000..4580539
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/attributes-module-flag.ll
@@ -0,0 +1,17 @@
+; RUN: llc -mtriple=riscv32 %s -o - | FileCheck %s --check-prefix=RV32
+; RUN: llc -mtriple=riscv64 %s -o - | FileCheck %s --check-prefix=RV64
+
+; Test generation of ELF attribute from module metadata
+
+; RV32: .attribute 5, "rv32i2p1_m2p0_zba1p0"
+; RV64: .attribute 5, "rv64i2p1_m2p0_zba1p0"
+
+define i32 @addi(i32 %a) {
+ %1 = add i32 %a, 1
+ ret i32 %1
+}
+
+!llvm.module.flags = !{!0}
+
+!0 = !{i32 6, !"riscv-isa", !1}
+!1 = !{!"rv64i2p1_m2p0", !"rv64i2p1_zba1p0"}
diff --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
index 455e6e5..549d531 100644
--- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
@@ -1160,8 +1160,6 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: beqz a0, .LBB10_2
; RV32I-NEXT: # %bb.1: # %cond.false
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
@@ -1189,12 +1187,11 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
; RV32I-NEXT: lui a1, 61681
; RV32I-NEXT: addi a1, a1, -241
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB10_2:
; RV32I-NEXT: li a0, 32
@@ -1205,8 +1202,6 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
; RV64I-NEXT: sext.w a1, a0
; RV64I-NEXT: beqz a1, .LBB10_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -1232,14 +1227,13 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB10_2:
; RV64I-NEXT: li a0, 32
@@ -1354,19 +1348,16 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
define i64 @test_ctlz_i64(i64 %a) nounwind {
; RV32I-LABEL: test_ctlz_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: or a0, a1, a0
+; RV32I-NEXT: lui a2, 349525
+; RV32I-NEXT: addi a4, a2, 1365
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a3, a2, 819
+; RV32I-NEXT: lui a2, 61681
+; RV32I-NEXT: addi a2, a2, -241
+; RV32I-NEXT: bnez a1, .LBB11_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: srli a1, a0, 1
+; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -1377,28 +1368,26 @@ define i64 @test_ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s4, a2, 1365
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s5, a1, 819
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s6, a1, -241
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s3, a1, 257
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: srli a0, s2, 1
-; RV32I-NEXT: or a0, s2, a0
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a0, a0, 32
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB11_2:
+; RV32I-NEXT: srli a0, a1, 1
+; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -1409,43 +1398,27 @@ define i64 @test_ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: bnez s0, .LBB11_2
-; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi a0, a0, 32
-; RV32I-NEXT: j .LBB11_3
-; RV32I-NEXT: .LBB11_2:
-; RV32I-NEXT: srli a0, s1, 24
-; RV32I-NEXT: .LBB11_3:
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_ctlz_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB11_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -1481,14 +1454,13 @@ define i64 @test_ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB11_2:
; RV64I-NEXT: li a0, 64
@@ -1831,8 +1803,6 @@ define i16 @test_ctlz_i16_zero_undef(i16 %a) nounwind {
define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind {
; RV32I-LABEL: test_ctlz_i32_zero_undef:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
@@ -1860,18 +1830,15 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind {
; RV32I-NEXT: lui a1, 61681
; RV32I-NEXT: addi a1, a1, -241
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_ctlz_i32_zero_undef:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -1897,14 +1864,13 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32M-LABEL: test_ctlz_i32_zero_undef:
@@ -2005,19 +1971,16 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind {
define i64 @test_ctlz_i64_zero_undef(i64 %a) nounwind {
; RV32I-LABEL: test_ctlz_i64_zero_undef:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: or a0, a1, a0
+; RV32I-NEXT: lui a2, 349525
+; RV32I-NEXT: addi a4, a2, 1365
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a3, a2, 819
+; RV32I-NEXT: lui a2, 61681
+; RV32I-NEXT: addi a2, a2, -241
+; RV32I-NEXT: bnez a1, .LBB15_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: srli a1, a0, 1
+; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -2028,28 +1991,26 @@ define i64 @test_ctlz_i64_zero_undef(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s4, a2, 1365
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s5, a1, 819
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s6, a1, -241
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s3, a1, 257
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: srli a0, s2, 1
-; RV32I-NEXT: or a0, s2, a0
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a0, a0, 32
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB15_2:
+; RV32I-NEXT: srli a0, a1, 1
+; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -2060,41 +2021,25 @@ define i64 @test_ctlz_i64_zero_undef(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: bnez s0, .LBB15_2
-; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi a0, a0, 32
-; RV32I-NEXT: j .LBB15_3
-; RV32I-NEXT: .LBB15_2:
-; RV32I-NEXT: srli a0, s1, 24
-; RV32I-NEXT: .LBB15_3:
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_ctlz_i64_zero_undef:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -2130,14 +2075,13 @@ define i64 @test_ctlz_i64_zero_undef(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32M-LABEL: test_ctlz_i64_zero_undef:
@@ -2464,8 +2408,6 @@ define i16 @test_ctpop_i16(i16 %a) nounwind {
define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV32I-LABEL: test_ctpop_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: lui a2, 349525
; RV32I-NEXT: addi a2, a2, 1365
@@ -2482,18 +2424,15 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV32I-NEXT: lui a1, 61681
; RV32I-NEXT: addi a1, a1, -241
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_ctpop_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: addiw a2, a2, 1365
@@ -2508,14 +2447,13 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32M-LABEL: test_ctpop_i32:
@@ -2578,8 +2516,6 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
;
; RV32XTHEADBB-LABEL: test_ctpop_i32:
; RV32XTHEADBB: # %bb.0:
-; RV32XTHEADBB-NEXT: addi sp, sp, -16
-; RV32XTHEADBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32XTHEADBB-NEXT: srli a1, a0, 1
; RV32XTHEADBB-NEXT: lui a2, 349525
; RV32XTHEADBB-NEXT: addi a2, a2, 1365
@@ -2596,18 +2532,15 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV32XTHEADBB-NEXT: lui a1, 61681
; RV32XTHEADBB-NEXT: addi a1, a1, -241
; RV32XTHEADBB-NEXT: and a0, a0, a1
-; RV32XTHEADBB-NEXT: lui a1, 4112
-; RV32XTHEADBB-NEXT: addi a1, a1, 257
-; RV32XTHEADBB-NEXT: call __mulsi3
+; RV32XTHEADBB-NEXT: slli a1, a0, 8
+; RV32XTHEADBB-NEXT: add a0, a0, a1
+; RV32XTHEADBB-NEXT: slli a1, a0, 16
+; RV32XTHEADBB-NEXT: add a0, a0, a1
; RV32XTHEADBB-NEXT: srli a0, a0, 24
-; RV32XTHEADBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: addi sp, sp, 16
; RV32XTHEADBB-NEXT: ret
;
; RV64XTHEADBB-LABEL: test_ctpop_i32:
; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: addi sp, sp, -16
-; RV64XTHEADBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64XTHEADBB-NEXT: srli a1, a0, 1
; RV64XTHEADBB-NEXT: lui a2, 349525
; RV64XTHEADBB-NEXT: addiw a2, a2, 1365
@@ -2622,14 +2555,13 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV64XTHEADBB-NEXT: srli a1, a0, 4
; RV64XTHEADBB-NEXT: add a0, a0, a1
; RV64XTHEADBB-NEXT: lui a1, 61681
-; RV64XTHEADBB-NEXT: addiw a1, a1, -241
+; RV64XTHEADBB-NEXT: addi a1, a1, -241
; RV64XTHEADBB-NEXT: and a0, a0, a1
-; RV64XTHEADBB-NEXT: lui a1, 4112
-; RV64XTHEADBB-NEXT: addiw a1, a1, 257
-; RV64XTHEADBB-NEXT: call __muldi3
+; RV64XTHEADBB-NEXT: slli a1, a0, 8
+; RV64XTHEADBB-NEXT: add a0, a0, a1
+; RV64XTHEADBB-NEXT: slli a1, a0, 16
+; RV64XTHEADBB-NEXT: add a0, a0, a1
; RV64XTHEADBB-NEXT: srliw a0, a0, 24
-; RV64XTHEADBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64XTHEADBB-NEXT: addi sp, sp, 16
; RV64XTHEADBB-NEXT: ret
%1 = call i32 @llvm.ctpop.i32(i32 %a)
ret i32 %1
@@ -2638,65 +2570,48 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
define i64 @test_ctpop_i64(i64 %a) nounwind {
; RV32I-LABEL: test_ctpop_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s2, a2, 1365
-; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: sub a1, a1, a0
-; RV32I-NEXT: lui a0, 209715
-; RV32I-NEXT: addi s3, a0, 819
-; RV32I-NEXT: and a0, a1, s3
+; RV32I-NEXT: srli a2, a1, 1
+; RV32I-NEXT: lui a3, 349525
+; RV32I-NEXT: addi a3, a3, 1365
+; RV32I-NEXT: and a2, a2, a3
+; RV32I-NEXT: sub a1, a1, a2
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a2, a2, 819
+; RV32I-NEXT: and a4, a1, a2
; RV32I-NEXT: srli a1, a1, 2
-; RV32I-NEXT: and a1, a1, s3
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s4, a1, -241
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s1, a1, 257
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli s5, a0, 24
-; RV32I-NEXT: srli a0, s0, 1
-; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: sub s0, s0, a0
-; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: srli s0, s0, 2
-; RV32I-NEXT: and a1, s0, s3
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: add a1, a4, a1
+; RV32I-NEXT: srli a4, a1, 4
+; RV32I-NEXT: add a1, a1, a4
+; RV32I-NEXT: lui a4, 61681
+; RV32I-NEXT: addi a4, a4, -241
+; RV32I-NEXT: and a1, a1, a4
+; RV32I-NEXT: slli a5, a1, 8
+; RV32I-NEXT: add a1, a1, a5
+; RV32I-NEXT: slli a5, a1, 16
+; RV32I-NEXT: add a1, a1, a5
+; RV32I-NEXT: srli a1, a1, 24
+; RV32I-NEXT: srli a5, a0, 1
+; RV32I-NEXT: and a3, a5, a3
+; RV32I-NEXT: sub a0, a0, a3
+; RV32I-NEXT: and a3, a0, a2
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: add a0, a3, a0
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: and a0, a0, a4
+; RV32I-NEXT: slli a2, a0, 8
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 16
+; RV32I-NEXT: add a0, a0, a2
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: add a0, a0, s5
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_ctpop_i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: addiw a2, a2, 1365
@@ -2719,14 +2634,13 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32M-LABEL: test_ctpop_i64:
@@ -2814,65 +2728,48 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
;
; RV32XTHEADBB-LABEL: test_ctpop_i64:
; RV32XTHEADBB: # %bb.0:
-; RV32XTHEADBB-NEXT: addi sp, sp, -32
-; RV32XTHEADBB-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: mv s0, a0
-; RV32XTHEADBB-NEXT: srli a0, a1, 1
-; RV32XTHEADBB-NEXT: lui a2, 349525
-; RV32XTHEADBB-NEXT: addi s2, a2, 1365
-; RV32XTHEADBB-NEXT: and a0, a0, s2
-; RV32XTHEADBB-NEXT: sub a1, a1, a0
-; RV32XTHEADBB-NEXT: lui a0, 209715
-; RV32XTHEADBB-NEXT: addi s3, a0, 819
-; RV32XTHEADBB-NEXT: and a0, a1, s3
+; RV32XTHEADBB-NEXT: srli a2, a1, 1
+; RV32XTHEADBB-NEXT: lui a3, 349525
+; RV32XTHEADBB-NEXT: addi a3, a3, 1365
+; RV32XTHEADBB-NEXT: and a2, a2, a3
+; RV32XTHEADBB-NEXT: sub a1, a1, a2
+; RV32XTHEADBB-NEXT: lui a2, 209715
+; RV32XTHEADBB-NEXT: addi a2, a2, 819
+; RV32XTHEADBB-NEXT: and a4, a1, a2
; RV32XTHEADBB-NEXT: srli a1, a1, 2
-; RV32XTHEADBB-NEXT: and a1, a1, s3
-; RV32XTHEADBB-NEXT: add a0, a0, a1
-; RV32XTHEADBB-NEXT: srli a1, a0, 4
-; RV32XTHEADBB-NEXT: add a0, a0, a1
-; RV32XTHEADBB-NEXT: lui a1, 61681
-; RV32XTHEADBB-NEXT: addi s4, a1, -241
-; RV32XTHEADBB-NEXT: and a0, a0, s4
-; RV32XTHEADBB-NEXT: lui a1, 4112
-; RV32XTHEADBB-NEXT: addi s1, a1, 257
-; RV32XTHEADBB-NEXT: mv a1, s1
-; RV32XTHEADBB-NEXT: call __mulsi3
-; RV32XTHEADBB-NEXT: srli s5, a0, 24
-; RV32XTHEADBB-NEXT: srli a0, s0, 1
-; RV32XTHEADBB-NEXT: and a0, a0, s2
-; RV32XTHEADBB-NEXT: sub s0, s0, a0
-; RV32XTHEADBB-NEXT: and a0, s0, s3
-; RV32XTHEADBB-NEXT: srli s0, s0, 2
-; RV32XTHEADBB-NEXT: and a1, s0, s3
-; RV32XTHEADBB-NEXT: add a0, a0, a1
-; RV32XTHEADBB-NEXT: srli a1, a0, 4
-; RV32XTHEADBB-NEXT: add a0, a0, a1
-; RV32XTHEADBB-NEXT: and a0, a0, s4
-; RV32XTHEADBB-NEXT: mv a1, s1
-; RV32XTHEADBB-NEXT: call __mulsi3
+; RV32XTHEADBB-NEXT: and a1, a1, a2
+; RV32XTHEADBB-NEXT: add a1, a4, a1
+; RV32XTHEADBB-NEXT: srli a4, a1, 4
+; RV32XTHEADBB-NEXT: add a1, a1, a4
+; RV32XTHEADBB-NEXT: lui a4, 61681
+; RV32XTHEADBB-NEXT: addi a4, a4, -241
+; RV32XTHEADBB-NEXT: and a1, a1, a4
+; RV32XTHEADBB-NEXT: slli a5, a1, 8
+; RV32XTHEADBB-NEXT: add a1, a1, a5
+; RV32XTHEADBB-NEXT: slli a5, a1, 16
+; RV32XTHEADBB-NEXT: add a1, a1, a5
+; RV32XTHEADBB-NEXT: srli a1, a1, 24
+; RV32XTHEADBB-NEXT: srli a5, a0, 1
+; RV32XTHEADBB-NEXT: and a3, a5, a3
+; RV32XTHEADBB-NEXT: sub a0, a0, a3
+; RV32XTHEADBB-NEXT: and a3, a0, a2
+; RV32XTHEADBB-NEXT: srli a0, a0, 2
+; RV32XTHEADBB-NEXT: and a0, a0, a2
+; RV32XTHEADBB-NEXT: add a0, a3, a0
+; RV32XTHEADBB-NEXT: srli a2, a0, 4
+; RV32XTHEADBB-NEXT: add a0, a0, a2
+; RV32XTHEADBB-NEXT: and a0, a0, a4
+; RV32XTHEADBB-NEXT: slli a2, a0, 8
+; RV32XTHEADBB-NEXT: add a0, a0, a2
+; RV32XTHEADBB-NEXT: slli a2, a0, 16
+; RV32XTHEADBB-NEXT: add a0, a0, a2
; RV32XTHEADBB-NEXT: srli a0, a0, 24
-; RV32XTHEADBB-NEXT: add a0, a0, s5
+; RV32XTHEADBB-NEXT: add a0, a0, a1
; RV32XTHEADBB-NEXT: li a1, 0
-; RV32XTHEADBB-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: addi sp, sp, 32
; RV32XTHEADBB-NEXT: ret
;
; RV64XTHEADBB-LABEL: test_ctpop_i64:
; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: addi sp, sp, -16
-; RV64XTHEADBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64XTHEADBB-NEXT: srli a1, a0, 1
; RV64XTHEADBB-NEXT: lui a2, 349525
; RV64XTHEADBB-NEXT: addiw a2, a2, 1365
@@ -2895,14 +2792,13 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
; RV64XTHEADBB-NEXT: slli a2, a1, 32
; RV64XTHEADBB-NEXT: add a1, a1, a2
; RV64XTHEADBB-NEXT: and a0, a0, a1
-; RV64XTHEADBB-NEXT: lui a1, 4112
-; RV64XTHEADBB-NEXT: addiw a1, a1, 257
-; RV64XTHEADBB-NEXT: slli a2, a1, 32
-; RV64XTHEADBB-NEXT: add a1, a1, a2
-; RV64XTHEADBB-NEXT: call __muldi3
+; RV64XTHEADBB-NEXT: slli a1, a0, 8
+; RV64XTHEADBB-NEXT: add a0, a0, a1
+; RV64XTHEADBB-NEXT: slli a1, a0, 16
+; RV64XTHEADBB-NEXT: add a0, a0, a1
+; RV64XTHEADBB-NEXT: slli a1, a0, 32
+; RV64XTHEADBB-NEXT: add a0, a0, a1
; RV64XTHEADBB-NEXT: srli a0, a0, 56
-; RV64XTHEADBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64XTHEADBB-NEXT: addi sp, sp, 16
; RV64XTHEADBB-NEXT: ret
%1 = call i64 @llvm.ctpop.i64(i64 %a)
ret i64 %1
diff --git a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
index adf6144..9ae30e6 100644
--- a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
+++ b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
@@ -602,19 +602,16 @@ define signext i32 @ctlz(i64 %b) nounwind {
;
; RV32I-LABEL: ctlz:
; RV32I: # %bb.0: # %entry
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: or a0, a1, a0
+; RV32I-NEXT: lui a2, 349525
+; RV32I-NEXT: addi a4, a2, 1365
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a3, a2, 819
+; RV32I-NEXT: lui a2, 61681
+; RV32I-NEXT: addi a2, a2, -241
+; RV32I-NEXT: bnez a1, .LBB7_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: srli a1, a0, 1
+; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -625,28 +622,26 @@ define signext i32 @ctlz(i64 %b) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s4, a2, 1365
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s5, a1, 819
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s6, a1, -241
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s3, a1, 257
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: srli a0, s2, 1
-; RV32I-NEXT: or a0, s2, a0
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a0, a0, 32
+; RV32I-NEXT: andi a0, a0, 63
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB7_2:
+; RV32I-NEXT: srli a0, a1, 1
+; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -657,41 +652,25 @@ define signext i32 @ctlz(i64 %b) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: bnez s0, .LBB7_2
-; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi s1, a0, 32
-; RV32I-NEXT: j .LBB7_3
-; RV32I-NEXT: .LBB7_2:
-; RV32I-NEXT: srli s1, s1, 24
-; RV32I-NEXT: .LBB7_3: # %entry
-; RV32I-NEXT: andi a0, s1, 63
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: andi a0, a0, 63
; RV32I-NEXT: ret
;
; RV64I-LABEL: ctlz:
; RV64I: # %bb.0: # %entry
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -727,15 +706,14 @@ define signext i32 @ctlz(i64 %b) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: slli a0, a0, 2
; RV64I-NEXT: srli a0, a0, 58
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/double-arith-strict.ll b/llvm/test/CodeGen/RISCV/double-arith-strict.ll
index 1861755..2333693 100644
--- a/llvm/test/CodeGen/RISCV/double-arith-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith-strict.ll
@@ -24,21 +24,7 @@ define double @fadd_d(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fadd_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fadd_d:
@@ -76,21 +62,7 @@ define double @fsub_d(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fsub_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsub.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fsub_d:
@@ -128,21 +100,7 @@ define double @fmul_d(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fmul_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmul.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmul_d:
@@ -180,21 +138,7 @@ define double @fdiv_d(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fdiv_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fdiv.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fdiv_d:
@@ -232,17 +176,7 @@ define double @fsqrt_d(double %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fsqrt_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsqrt.d a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fsqrt_d:
@@ -398,25 +332,7 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fmadd_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmadd_d:
@@ -463,27 +379,9 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fmsub_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6
; RV32IZFINXZDINX-NEXT: fmsub.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmsub_d:
@@ -572,28 +470,10 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fnmadd_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6
; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6
; RV32IZFINXZDINX-NEXT: fnmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmadd_d:
@@ -701,28 +581,10 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fnmadd_d_2:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6
; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6
; RV32IZFINXZDINX-NEXT: fnmadd.d a0, a2, a0, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmadd_d_2:
@@ -829,27 +691,9 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fnmsub_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6
; RV32IZFINXZDINX-NEXT: fnmsub.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmsub_d:
@@ -932,27 +776,9 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fnmsub_d_2:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6
; RV32IZFINXZDINX-NEXT: fnmsub.d a0, a2, a0, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmsub_d_2:
diff --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll
index 82ddf06..a2093f5 100644
--- a/llvm/test/CodeGen/RISCV/double-arith.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith.ll
@@ -25,21 +25,7 @@ define double @fadd_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fadd_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fadd_d:
@@ -76,21 +62,7 @@ define double @fsub_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fsub_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsub.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fsub_d:
@@ -127,21 +99,7 @@ define double @fmul_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmul_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmul.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmul_d:
@@ -178,21 +136,7 @@ define double @fdiv_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fdiv_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fdiv.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fdiv_d:
@@ -231,17 +175,7 @@ define double @fsqrt_d(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fsqrt_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsqrt.d a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fsqrt_d:
@@ -280,21 +214,7 @@ define double @fsgnj_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fsgnj_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsgnj.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fsgnj_d:
@@ -335,15 +255,9 @@ define i32 @fneg_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fneg_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: fneg.d a2, a0
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fneg_d:
@@ -401,21 +315,7 @@ define double @fsgnjn_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fsgnjn_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsgnjn.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fsgnjn_d:
@@ -464,23 +364,9 @@ define double @fabs_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fabs_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: fabs.d a2, a0
; RV32IZFINXZDINX-NEXT: fadd.d a0, a2, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fabs_d:
@@ -532,21 +418,7 @@ define double @fmin_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmin_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmin_d:
@@ -585,21 +457,7 @@ define double @fmax_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmax_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmax_d:
@@ -638,25 +496,7 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmadd_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmadd_d:
@@ -702,27 +542,9 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmsub_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6
; RV32IZFINXZDINX-NEXT: fmsub.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmsub_d:
@@ -811,28 +633,10 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fnmadd_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6
; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6
; RV32IZFINXZDINX-NEXT: fnmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmadd_d:
@@ -940,28 +744,10 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fnmadd_d_2:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6
; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6
; RV32IZFINXZDINX-NEXT: fnmadd.d a0, a2, a0, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmadd_d_2:
@@ -1060,27 +846,9 @@ define double @fnmadd_d_3(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fnmadd_d_3:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: lui a2, 524288
; RV32IZFINXZDINX-NEXT: xor a1, a1, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmadd_d_3:
@@ -1127,27 +895,9 @@ define double @fnmadd_nsz(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fnmadd_nsz:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: lui a2, 524288
; RV32IZFINXZDINX-NEXT: xor a1, a1, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmadd_nsz:
@@ -1202,27 +952,9 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fnmsub_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6
; RV32IZFINXZDINX-NEXT: fnmsub.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmsub_d:
@@ -1305,27 +1037,9 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fnmsub_d_2:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6
; RV32IZFINXZDINX-NEXT: fnmsub.d a0, a2, a0, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmsub_d_2:
@@ -1403,25 +1117,7 @@ define double @fmadd_d_contract(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmadd_d_contract:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmadd_d_contract:
@@ -1482,27 +1178,9 @@ define double @fmsub_d_contract(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmsub_d_contract:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6
; RV32IZFINXZDINX-NEXT: fmsub.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmsub_d_contract:
@@ -1601,29 +1279,11 @@ define double @fnmadd_d_contract(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fnmadd_d_contract:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6
; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6
; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6
; RV32IZFINXZDINX-NEXT: fnmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmadd_d_contract:
@@ -1749,28 +1409,10 @@ define double @fnmsub_d_contract(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fnmsub_d_contract:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6
; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6
; RV32IZFINXZDINX-NEXT: fnmsub.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmsub_d_contract:
diff --git a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
index 55bf95a..99835ff 100644
--- a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
@@ -141,21 +141,7 @@ define double @fcopysign_fneg(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcopysign_fneg:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsgnjn.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64I-LABEL: fcopysign_fneg:
diff --git a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
index 2c5505e..035228e 100644
--- a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
@@ -89,23 +89,13 @@ define void @br_fcmp_oeq(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_oeq:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: bnez a0, .LBB1_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB1_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_oeq:
@@ -155,23 +145,13 @@ define void @br_fcmp_oeq_alt(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_oeq_alt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: bnez a0, .LBB2_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB2_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_oeq_alt:
@@ -218,23 +198,13 @@ define void @br_fcmp_ogt(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ogt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: bnez a0, .LBB3_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB3_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ogt:
@@ -281,23 +251,13 @@ define void @br_fcmp_oge(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_oge:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
; RV32IZFINXZDINX-NEXT: fle.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: bnez a0, .LBB4_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB4_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_oge:
@@ -344,23 +304,13 @@ define void @br_fcmp_olt(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_olt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: flt.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: bnez a0, .LBB5_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB5_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_olt:
@@ -407,23 +357,13 @@ define void @br_fcmp_ole(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ole:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: bnez a0, .LBB6_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB6_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ole:
@@ -474,25 +414,15 @@ define void @br_fcmp_one(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_one:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2
; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: or a0, a0, a4
; RV32IZFINXZDINX-NEXT: bnez a0, .LBB7_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB7_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_one:
@@ -545,25 +475,15 @@ define void @br_fcmp_ord(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ord:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
; RV32IZFINXZDINX-NEXT: feq.d a2, a2, a2
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
; RV32IZFINXZDINX-NEXT: bnez a0, .LBB8_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB8_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ord:
@@ -616,25 +536,15 @@ define void @br_fcmp_ueq(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ueq:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2
; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: or a0, a0, a4
; RV32IZFINXZDINX-NEXT: beqz a0, .LBB9_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB9_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ueq:
@@ -683,23 +593,13 @@ define void @br_fcmp_ugt(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ugt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: beqz a0, .LBB10_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB10_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ugt:
@@ -746,23 +646,13 @@ define void @br_fcmp_uge(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_uge:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: flt.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: beqz a0, .LBB11_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB11_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_uge:
@@ -809,23 +699,13 @@ define void @br_fcmp_ult(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ult:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
; RV32IZFINXZDINX-NEXT: fle.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: beqz a0, .LBB12_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB12_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ult:
@@ -872,23 +752,13 @@ define void @br_fcmp_ule(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ule:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: beqz a0, .LBB13_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB13_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ule:
@@ -935,23 +805,13 @@ define void @br_fcmp_une(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_une:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: beqz a0, .LBB14_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB14_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_une:
@@ -1002,25 +862,15 @@ define void @br_fcmp_uno(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_uno:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
; RV32IZFINXZDINX-NEXT: feq.d a2, a2, a2
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
; RV32IZFINXZDINX-NEXT: beqz a0, .LBB15_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB15_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_uno:
diff --git a/llvm/test/CodeGen/RISCV/double-calling-conv.ll b/llvm/test/CodeGen/RISCV/double-calling-conv.ll
index d46256b..57aaa4c 100644
--- a/llvm/test/CodeGen/RISCV/double-calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/double-calling-conv.ll
@@ -28,21 +28,7 @@ define double @callee_double_inreg(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: callee_double_inreg:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
%1 = fadd double %a, %b
ret double %1
@@ -106,22 +92,11 @@ define double @callee_double_split_reg_stack(i32 %a, i64 %b, i64 %c, double %d,
;
; RV32IZFINXZDINX-LABEL: callee_double_split_reg_stack:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: lw a0, 16(sp)
-; RV32IZFINXZDINX-NEXT: sw a7, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a6, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv a0, a7
+; RV32IZFINXZDINX-NEXT: lw a1, 0(sp)
+; RV32IZFINXZDINX-NEXT: mv a3, a6
+; RV32IZFINXZDINX-NEXT: mv a2, a5
; RV32IZFINXZDINX-NEXT: fadd.d a0, a2, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
%1 = fadd double %d, %e
ret double %1
@@ -190,17 +165,11 @@ define double @callee_double_stack(i64 %a, i64 %b, i64 %c, i64 %d, double %e, do
;
; RV32IZFINXZDINX-LABEL: callee_double_stack:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: lw a0, 24(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 28(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 16(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 20(sp)
-; RV32IZFINXZDINX-NEXT: fadd.d a0, a2, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
+; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
+; RV32IZFINXZDINX-NEXT: fadd.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: ret
%1 = fadd double %e, %f
ret double %1
diff --git a/llvm/test/CodeGen/RISCV/double-convert-strict.ll b/llvm/test/CodeGen/RISCV/double-convert-strict.ll
index 967b119..13bcafb 100644
--- a/llvm/test/CodeGen/RISCV/double-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert-strict.ll
@@ -28,13 +28,7 @@ define float @fcvt_s_d(double %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_s_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.s.d a0, a0
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_s_d:
@@ -72,13 +66,7 @@ define double @fcvt_d_s(float %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_s:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.s a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_s:
@@ -116,13 +104,7 @@ define i32 @fcvt_w_d(double %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_w_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_w_d:
@@ -162,13 +144,7 @@ define i32 @fcvt_wu_d(double %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_d:
@@ -210,15 +186,9 @@ define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
; RV32IZFINXZDINX-NEXT: seqz a1, a0
; RV32IZFINXZDINX-NEXT: add a0, a0, a1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use:
@@ -263,13 +233,7 @@ define double @fcvt_d_w(i32 %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_w:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_w:
@@ -309,14 +273,8 @@ define double @fcvt_d_w_load(ptr %p) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_w_load:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: lw a0, 0(a0)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_w_load:
@@ -357,13 +315,7 @@ define double @fcvt_d_wu(i32 %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_wu:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_wu:
@@ -409,14 +361,8 @@ define double @fcvt_d_wu_load(ptr %p) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_wu_load:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: lw a0, 0(a0)
; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_wu_load:
@@ -661,13 +607,7 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_w_i8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_w_i8:
@@ -705,13 +645,7 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_wu_i8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i8:
@@ -749,13 +683,7 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_w_i16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_w_i16:
@@ -793,13 +721,7 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_wu_i16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i16:
diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll
index 3700a18..da882ca 100644
--- a/llvm/test/CodeGen/RISCV/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert.ll
@@ -20,13 +20,7 @@ define float @fcvt_s_d(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_s_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.s.d a0, a0
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_s_d:
@@ -63,13 +57,7 @@ define double @fcvt_d_s(float %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_s:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.s a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_s:
@@ -106,13 +94,7 @@ define i32 @fcvt_w_d(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_w_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_w_d:
@@ -153,17 +135,11 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_w_d_sat:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rtz
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_w_d_sat:
@@ -287,13 +263,7 @@ define i32 @fcvt_wu_d(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_d:
@@ -334,15 +304,9 @@ define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
; RV32IZFINXZDINX-NEXT: seqz a1, a0
; RV32IZFINXZDINX-NEXT: add a0, a0, a1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use:
@@ -402,17 +366,11 @@ define i32 @fcvt_wu_d_sat(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_d_sat:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rtz
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_d_sat:
@@ -512,13 +470,7 @@ define double @fcvt_d_w(i32 %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_w:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_w:
@@ -557,14 +509,8 @@ define double @fcvt_d_w_load(ptr %p) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_w_load:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: lw a0, 0(a0)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_w_load:
@@ -605,13 +551,7 @@ define double @fcvt_d_wu(i32 %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_wu:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_wu:
@@ -656,14 +596,8 @@ define double @fcvt_d_wu_load(ptr %p) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_wu_load:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: lw a0, 0(a0)
; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_wu_load:
@@ -749,47 +683,41 @@ define i64 @fcvt_l_d(double %a) nounwind {
define i64 @fcvt_l_d_sat(double %a) nounwind {
; RV32IFD-LABEL: fcvt_l_d_sat:
; RV32IFD: # %bb.0: # %start
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: lui a0, %hi(.LCPI12_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI12_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI12_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB12_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB12_2
; RV32IFD-NEXT: # %bb.1: # %start
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB12_2: # %start
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB12_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI12_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI12_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB12_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB12_4: # %start
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: fcvt_l_d_sat:
@@ -803,50 +731,44 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_l_d_sat:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 4(sp)
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI12_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI12_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI12_0)(a2)
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI12_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI12_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI12_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB12_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB12_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %start
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB12_2: # %start
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB12_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI12_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI12_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI12_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB12_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB12_4: # %start
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_l_d_sat:
@@ -1057,18 +979,17 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_lu_d_sat:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: mv s1, a1
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
-; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s2, a2
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: fle.d a0, a2, s0
+; RV32IZFINXZDINX-NEXT: neg s2, a0
+; RV32IZFINXZDINX-NEXT: mv a0, s0
; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI14_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI14_0+4)(a2)
@@ -1079,11 +1000,11 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: or a0, a2, a0
; RV32IZFINXZDINX-NEXT: and a1, s2, a1
; RV32IZFINXZDINX-NEXT: or a1, a2, a1
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_lu_d_sat:
@@ -1185,21 +1106,7 @@ define i64 @fmv_x_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmv_x_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmv_x_d:
@@ -1334,13 +1241,13 @@ define double @fmv_d_x(i64 %a, i64 %b) nounwind {
; RV32IFD-LABEL: fmv_d_x:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw a3, 4(sp)
-; RV32IFD-NEXT: sw a2, 0(sp)
-; RV32IFD-NEXT: sw a1, 12(sp)
; RV32IFD-NEXT: sw a0, 8(sp)
-; RV32IFD-NEXT: fld fa5, 0(sp)
+; RV32IFD-NEXT: sw a1, 12(sp)
+; RV32IFD-NEXT: fld fa5, 8(sp)
+; RV32IFD-NEXT: sw a2, 8(sp)
+; RV32IFD-NEXT: sw a3, 12(sp)
; RV32IFD-NEXT: fld fa4, 8(sp)
-; RV32IFD-NEXT: fadd.d fa0, fa4, fa5
+; RV32IFD-NEXT: fadd.d fa0, fa5, fa4
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
@@ -1353,21 +1260,7 @@ define double @fmv_d_x(i64 %a, i64 %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmv_d_x:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw a3, 20(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 16(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 28(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 24(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 16(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 20(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 24(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 28(sp)
-; RV32IZFINXZDINX-NEXT: fadd.d a0, a2, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmv_d_x:
@@ -1406,13 +1299,7 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_w_i8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_w_i8:
@@ -1449,13 +1336,7 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_wu_i8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i8:
@@ -1492,13 +1373,7 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_w_i16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_w_i16:
@@ -1535,13 +1410,7 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_wu_i16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i16:
@@ -1731,13 +1600,7 @@ define signext i16 @fcvt_w_s_i16(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_w_s_i16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_w_s_i16:
@@ -1797,24 +1660,18 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_w_s_sat_i16:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI26_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI26_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI26_0)(a2)
; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI26_1)
; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI26_1+4)(a4)
; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI26_1)(a4)
-; RV32IZFINXZDINX-NEXT: fmax.d a2, a0, a2
-; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
-; RV32IZFINXZDINX-NEXT: neg a0, a0
-; RV32IZFINXZDINX-NEXT: fmin.d a2, a2, a4
-; RV32IZFINXZDINX-NEXT: fcvt.w.d a1, a2, rtz
-; RV32IZFINXZDINX-NEXT: and a0, a0, a1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: feq.d a6, a0, a0
+; RV32IZFINXZDINX-NEXT: neg a6, a6
+; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a2
+; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a4
+; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
+; RV32IZFINXZDINX-NEXT: and a0, a6, a0
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_w_s_sat_i16:
@@ -1948,13 +1805,7 @@ define zeroext i16 @fcvt_wu_s_i16(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_s_i16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_s_i16:
@@ -2006,11 +1857,6 @@ define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_s_sat_i16:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI28_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI28_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI28_0)(a2)
@@ -2018,7 +1864,6 @@ define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a4
; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_s_sat_i16:
@@ -2130,13 +1975,7 @@ define signext i8 @fcvt_w_s_i8(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_w_s_i8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_w_s_i8:
@@ -2196,24 +2035,18 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_w_s_sat_i8:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI30_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI30_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI30_0)(a2)
; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI30_1)
; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI30_1+4)(a4)
; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI30_1)(a4)
-; RV32IZFINXZDINX-NEXT: fmax.d a2, a0, a2
-; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
-; RV32IZFINXZDINX-NEXT: neg a0, a0
-; RV32IZFINXZDINX-NEXT: fmin.d a2, a2, a4
-; RV32IZFINXZDINX-NEXT: fcvt.w.d a1, a2, rtz
-; RV32IZFINXZDINX-NEXT: and a0, a0, a1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: feq.d a6, a0, a0
+; RV32IZFINXZDINX-NEXT: neg a6, a6
+; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a2
+; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a4
+; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
+; RV32IZFINXZDINX-NEXT: and a0, a6, a0
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_w_s_sat_i8:
@@ -2344,13 +2177,7 @@ define zeroext i8 @fcvt_wu_s_i8(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_s_i8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_s_i8:
@@ -2404,11 +2231,6 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_s_sat_i8:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI32_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI32_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI32_0)(a2)
@@ -2416,7 +2238,6 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a4
; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_s_sat_i8:
@@ -2532,17 +2353,11 @@ define zeroext i32 @fcvt_wu_d_sat_zext(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_d_sat_zext:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rtz
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_d_sat_zext:
@@ -2647,17 +2462,11 @@ define signext i32 @fcvt_w_d_sat_sext(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_w_d_sat_sext:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rtz
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_w_d_sat_sext:
diff --git a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
index 3ae2e99..e864d8f 100644
--- a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
@@ -24,17 +24,7 @@ define i32 @fcmp_oeq(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_oeq:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_oeq:
@@ -78,20 +68,11 @@ define i32 @fcmp_ogt(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_ogt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a1, fflags
-; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a4
-; RV32IZFINXZDINX-NEXT: csrw fflags, a1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a4
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a5, fflags
+; RV32IZFINXZDINX-NEXT: flt.d a4, a2, a0
+; RV32IZFINXZDINX-NEXT: csrw fflags, a5
+; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_ogt:
@@ -138,20 +119,11 @@ define i32 @fcmp_oge(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_oge:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a1, fflags
-; RV32IZFINXZDINX-NEXT: fle.d a0, a2, a4
-; RV32IZFINXZDINX-NEXT: csrw fflags, a1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a4
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a5, fflags
+; RV32IZFINXZDINX-NEXT: fle.d a4, a2, a0
+; RV32IZFINXZDINX-NEXT: csrw fflags, a5
+; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_oge:
@@ -200,20 +172,11 @@ define i32 @fcmp_olt(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_olt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a1, fflags
-; RV32IZFINXZDINX-NEXT: flt.d a0, a4, a2
-; RV32IZFINXZDINX-NEXT: csrw fflags, a1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a4, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a5, fflags
+; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2
+; RV32IZFINXZDINX-NEXT: csrw fflags, a5
+; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_olt:
@@ -260,20 +223,11 @@ define i32 @fcmp_ole(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_ole:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a1, fflags
-; RV32IZFINXZDINX-NEXT: fle.d a0, a4, a2
-; RV32IZFINXZDINX-NEXT: csrw fflags, a1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a4, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a5, fflags
+; RV32IZFINXZDINX-NEXT: fle.d a4, a0, a2
+; RV32IZFINXZDINX-NEXT: csrw fflags, a5
+; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_ole:
@@ -327,25 +281,16 @@ define i32 @fcmp_one(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_one:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a0, fflags
-; RV32IZFINXZDINX-NEXT: flt.d a1, a4, a2
-; RV32IZFINXZDINX-NEXT: csrw fflags, a0
-; RV32IZFINXZDINX-NEXT: feq.d zero, a4, a2
-; RV32IZFINXZDINX-NEXT: csrr a0, fflags
-; RV32IZFINXZDINX-NEXT: flt.d a6, a2, a4
-; RV32IZFINXZDINX-NEXT: csrw fflags, a0
-; RV32IZFINXZDINX-NEXT: or a0, a6, a1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a4
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a4, fflags
+; RV32IZFINXZDINX-NEXT: flt.d a5, a0, a2
+; RV32IZFINXZDINX-NEXT: csrw fflags, a4
+; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2
+; RV32IZFINXZDINX-NEXT: csrr a4, fflags
+; RV32IZFINXZDINX-NEXT: flt.d a6, a2, a0
+; RV32IZFINXZDINX-NEXT: csrw fflags, a4
+; RV32IZFINXZDINX-NEXT: or a4, a6, a5
+; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_one:
@@ -430,19 +375,9 @@ define i32 @fcmp_ord(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_ord:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: feq.d a2, a2, a2
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_ord:
@@ -495,26 +430,17 @@ define i32 @fcmp_ueq(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_ueq:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a0, fflags
-; RV32IZFINXZDINX-NEXT: flt.d a1, a4, a2
-; RV32IZFINXZDINX-NEXT: csrw fflags, a0
-; RV32IZFINXZDINX-NEXT: feq.d zero, a4, a2
-; RV32IZFINXZDINX-NEXT: csrr a0, fflags
-; RV32IZFINXZDINX-NEXT: flt.d a6, a2, a4
-; RV32IZFINXZDINX-NEXT: csrw fflags, a0
-; RV32IZFINXZDINX-NEXT: or a0, a6, a1
-; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a4
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a4, fflags
+; RV32IZFINXZDINX-NEXT: flt.d a5, a0, a2
+; RV32IZFINXZDINX-NEXT: csrw fflags, a4
+; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2
+; RV32IZFINXZDINX-NEXT: csrr a4, fflags
+; RV32IZFINXZDINX-NEXT: flt.d a6, a2, a0
+; RV32IZFINXZDINX-NEXT: csrw fflags, a4
+; RV32IZFINXZDINX-NEXT: or a4, a6, a5
+; RV32IZFINXZDINX-NEXT: xori a4, a4, 1
+; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_ueq:
@@ -602,21 +528,12 @@ define i32 @fcmp_ugt(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_ugt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a0, fflags
-; RV32IZFINXZDINX-NEXT: fle.d a1, a4, a2
-; RV32IZFINXZDINX-NEXT: csrw fflags, a0
-; RV32IZFINXZDINX-NEXT: xori a0, a1, 1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a4, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a4, fflags
+; RV32IZFINXZDINX-NEXT: fle.d a5, a0, a2
+; RV32IZFINXZDINX-NEXT: csrw fflags, a4
+; RV32IZFINXZDINX-NEXT: xori a4, a5, 1
+; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_ugt:
@@ -665,21 +582,12 @@ define i32 @fcmp_uge(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_uge:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a0, fflags
-; RV32IZFINXZDINX-NEXT: flt.d a1, a4, a2
-; RV32IZFINXZDINX-NEXT: csrw fflags, a0
-; RV32IZFINXZDINX-NEXT: xori a0, a1, 1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a4, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a4, fflags
+; RV32IZFINXZDINX-NEXT: flt.d a5, a0, a2
+; RV32IZFINXZDINX-NEXT: csrw fflags, a4
+; RV32IZFINXZDINX-NEXT: xori a4, a5, 1
+; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_uge:
@@ -730,21 +638,12 @@ define i32 @fcmp_ult(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_ult:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a0, fflags
-; RV32IZFINXZDINX-NEXT: fle.d a1, a2, a4
-; RV32IZFINXZDINX-NEXT: csrw fflags, a0
-; RV32IZFINXZDINX-NEXT: xori a0, a1, 1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a4
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a4, fflags
+; RV32IZFINXZDINX-NEXT: fle.d a5, a2, a0
+; RV32IZFINXZDINX-NEXT: csrw fflags, a4
+; RV32IZFINXZDINX-NEXT: xori a4, a5, 1
+; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_ult:
@@ -793,21 +692,12 @@ define i32 @fcmp_ule(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_ule:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a0, fflags
-; RV32IZFINXZDINX-NEXT: flt.d a1, a2, a4
-; RV32IZFINXZDINX-NEXT: csrw fflags, a0
-; RV32IZFINXZDINX-NEXT: xori a0, a1, 1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a4
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a4, fflags
+; RV32IZFINXZDINX-NEXT: flt.d a5, a2, a0
+; RV32IZFINXZDINX-NEXT: csrw fflags, a4
+; RV32IZFINXZDINX-NEXT: xori a4, a5, 1
+; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_ule:
@@ -853,18 +743,8 @@ define i32 @fcmp_une(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_une:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_une:
@@ -908,20 +788,10 @@ define i32 @fcmp_uno(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_uno:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: feq.d a2, a2, a2
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_uno:
@@ -966,19 +836,9 @@ define i32 @fcmps_oeq(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_oeq:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fle.d a4, a2, a0
; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: and a0, a0, a4
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_oeq:
@@ -1021,17 +881,7 @@ define i32 @fcmps_ogt(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_ogt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_ogt:
@@ -1071,17 +921,7 @@ define i32 @fcmps_oge(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_oge:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fle.d a0, a2, a0
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_oge:
@@ -1123,17 +963,7 @@ define i32 @fcmps_olt(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_olt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: flt.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_olt:
@@ -1173,17 +1003,7 @@ define i32 @fcmps_ole(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_ole:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_ole:
@@ -1225,19 +1045,9 @@ define i32 @fcmps_one(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_one:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2
; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: or a0, a0, a4
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_one:
@@ -1315,19 +1125,9 @@ define i32 @fcmps_ord(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_ord:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, a2
; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_ord:
@@ -1372,20 +1172,10 @@ define i32 @fcmps_ueq(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_ueq:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2
; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: or a0, a0, a4
; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_ueq:
@@ -1463,18 +1253,8 @@ define i32 @fcmps_ugt(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_ugt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_ugt:
@@ -1516,18 +1296,8 @@ define i32 @fcmps_uge(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_uge:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: flt.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_uge:
@@ -1571,18 +1341,8 @@ define i32 @fcmps_ult(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_ult:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fle.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_ult:
@@ -1624,18 +1384,8 @@ define i32 @fcmps_ule(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_ule:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_ule:
@@ -1679,20 +1429,10 @@ define i32 @fcmps_une(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_une:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fle.d a4, a2, a0
; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: and a0, a0, a4
; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_une:
@@ -1738,20 +1478,10 @@ define i32 @fcmps_uno(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_uno:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, a2
; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_uno:
diff --git a/llvm/test/CodeGen/RISCV/double-fcmp.ll b/llvm/test/CodeGen/RISCV/double-fcmp.ll
index 64a154f..1e609f8 100644
--- a/llvm/test/CodeGen/RISCV/double-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-fcmp.ll
@@ -45,17 +45,7 @@ define i32 @fcmp_oeq(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_oeq:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: feq.d a0, a0, a2
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_oeq:
@@ -95,17 +85,7 @@ define i32 @fcmp_ogt(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_ogt:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_ogt:
@@ -145,17 +125,7 @@ define i32 @fcmp_oge(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_oge:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: fle.d a0, a2, a0
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_oge:
@@ -197,17 +167,7 @@ define i32 @fcmp_olt(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_olt:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: flt.d a0, a0, a2
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_olt:
@@ -247,17 +207,7 @@ define i32 @fcmp_ole(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_ole:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_ole:
@@ -299,19 +249,9 @@ define i32 @fcmp_one(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_one:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: flt.d a4, a0, a2
; CHECKRV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; CHECKRV32IZFINXZDINX-NEXT: or a0, a0, a4
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_one:
@@ -389,19 +329,9 @@ define i32 @fcmp_ord(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_ord:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: feq.d a2, a2, a2
; CHECKRV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; CHECKRV32IZFINXZDINX-NEXT: and a0, a0, a2
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_ord:
@@ -446,20 +376,10 @@ define i32 @fcmp_ueq(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_ueq:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: flt.d a4, a0, a2
; CHECKRV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; CHECKRV32IZFINXZDINX-NEXT: or a0, a0, a4
; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_ueq:
@@ -537,18 +457,8 @@ define i32 @fcmp_ugt(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_ugt:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_ugt:
@@ -590,18 +500,8 @@ define i32 @fcmp_uge(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_uge:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: flt.d a0, a0, a2
; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_uge:
@@ -645,18 +545,8 @@ define i32 @fcmp_ult(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_ult:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: fle.d a0, a2, a0
; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_ult:
@@ -698,18 +588,8 @@ define i32 @fcmp_ule(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_ule:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_ule:
@@ -751,18 +631,8 @@ define i32 @fcmp_une(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_une:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: feq.d a0, a0, a2
; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_une:
@@ -806,20 +676,10 @@ define i32 @fcmp_uno(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_uno:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: feq.d a2, a2, a2
; CHECKRV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; CHECKRV32IZFINXZDINX-NEXT: and a0, a0, a2
; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_uno:
diff --git a/llvm/test/CodeGen/RISCV/double-imm.ll b/llvm/test/CodeGen/RISCV/double-imm.ll
index 9254369..74d4acc 100644
--- a/llvm/test/CodeGen/RISCV/double-imm.ll
+++ b/llvm/test/CodeGen/RISCV/double-imm.ll
@@ -54,20 +54,10 @@ define double @double_imm_op(double %a) nounwind {
;
; CHECKRV32ZDINX-LABEL: double_imm_op:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: lui a2, %hi(.LCPI1_0)
; CHECKRV32ZDINX-NEXT: lw a3, %lo(.LCPI1_0+4)(a2)
; CHECKRV32ZDINX-NEXT: lw a2, %lo(.LCPI1_0)(a2)
; CHECKRV32ZDINX-NEXT: fadd.d a0, a0, a2
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: double_imm_op:
@@ -153,24 +143,18 @@ define dso_local double @negzero_sel(i16 noundef %a, double noundef %d) nounwind
;
; CHECKRV32ZDINX-LABEL: negzero_sel:
; CHECKRV32ZDINX: # %bb.0: # %entry
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a1, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a2, 12(sp)
-; CHECKRV32ZDINX-NEXT: slli a2, a0, 16
-; CHECKRV32ZDINX-NEXT: fcvt.d.w a0, zero
-; CHECKRV32ZDINX-NEXT: beqz a2, .LBB4_2
+; CHECKRV32ZDINX-NEXT: slli a0, a0, 16
+; CHECKRV32ZDINX-NEXT: fcvt.d.w a4, zero
+; CHECKRV32ZDINX-NEXT: beqz a0, .LBB4_2
; CHECKRV32ZDINX-NEXT: # %bb.1: # %entry
-; CHECKRV32ZDINX-NEXT: fneg.d a0, a0
+; CHECKRV32ZDINX-NEXT: fneg.d a2, a4
; CHECKRV32ZDINX-NEXT: j .LBB4_3
; CHECKRV32ZDINX-NEXT: .LBB4_2:
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
+; CHECKRV32ZDINX-NEXT: mv a3, a2
+; CHECKRV32ZDINX-NEXT: mv a2, a1
; CHECKRV32ZDINX-NEXT: .LBB4_3: # %entry
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
+; CHECKRV32ZDINX-NEXT: mv a0, a2
+; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: negzero_sel:
diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
index c574f64..3821586 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
@@ -28,17 +28,7 @@ define double @sqrt_f64(double %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: sqrt_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsqrt.d a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: sqrt_f64:
@@ -299,22 +289,12 @@ define double @sincos_f64(double %a) nounwind strictfp {
; RV32IZFINXZDINX-NEXT: mv s0, a1
; RV32IZFINXZDINX-NEXT: mv s1, a0
; RV32IZFINXZDINX-NEXT: call sin
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s3, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv s2, a0
+; RV32IZFINXZDINX-NEXT: mv s3, a1
; RV32IZFINXZDINX-NEXT: mv a0, s1
; RV32IZFINXZDINX-NEXT: mv a1, s0
; RV32IZFINXZDINX-NEXT: call cos
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, s2, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -765,25 +745,7 @@ define double @fma_f64(double %a, double %b, double %c) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fma_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fma_f64:
@@ -822,25 +784,7 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fmuladd_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmuladd_f64:
@@ -1455,13 +1399,7 @@ define iXLen @lrint_f64(double %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: lrint_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: lrint_f64:
@@ -1505,13 +1443,7 @@ define iXLen @lround_f64(double %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: lround_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: lround_f64:
diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
index f290cf0..52c49cf 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
@@ -26,17 +26,7 @@ define double @sqrt_f64(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: sqrt_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsqrt.d a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: sqrt_f64:
@@ -254,22 +244,12 @@ define double @sincos_f64(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: mv s0, a1
; RV32IZFINXZDINX-NEXT: mv s1, a0
; RV32IZFINXZDINX-NEXT: call sin
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s3, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv s2, a0
+; RV32IZFINXZDINX-NEXT: mv s3, a1
; RV32IZFINXZDINX-NEXT: mv a0, s1
; RV32IZFINXZDINX-NEXT: mv a1, s0
; RV32IZFINXZDINX-NEXT: call cos
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, s2, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -606,25 +586,7 @@ define double @fma_f64(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fma_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fma_f64:
@@ -663,25 +625,7 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmuladd_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmuladd_f64:
@@ -769,21 +713,7 @@ define double @minnum_f64(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: minnum_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: minnum_f64:
@@ -822,21 +752,7 @@ define double @maxnum_f64(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: maxnum_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: maxnum_f64:
@@ -892,21 +808,7 @@ define double @copysign_f64(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: copysign_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsgnj.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: copysign_f64:
@@ -1381,13 +1283,7 @@ define iXLen @lrint_f64(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: lrint_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: lrint_f64:
@@ -1432,13 +1328,7 @@ define iXLen @lround_f64(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: lround_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: lround_f64:
@@ -1475,13 +1365,7 @@ define i32 @lround_i32_f64(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: lround_i32_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: lround_i32_f64:
@@ -1625,16 +1509,9 @@ define i1 @isnan_d_fpclass(double %x) {
;
; RV32IZFINXZDINX-LABEL: isnan_d_fpclass:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fclass.d a0, a0
; RV32IZFINXZDINX-NEXT: andi a0, a0, 768
; RV32IZFINXZDINX-NEXT: snez a0, a0
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: isnan_d_fpclass:
diff --git a/llvm/test/CodeGen/RISCV/double-isnan.ll b/llvm/test/CodeGen/RISCV/double-isnan.ll
index 4d0b815..6a3779d 100644
--- a/llvm/test/CodeGen/RISCV/double-isnan.ll
+++ b/llvm/test/CodeGen/RISCV/double-isnan.ll
@@ -17,14 +17,8 @@ define zeroext i1 @double_is_nan(double %a) nounwind {
;
; CHECKRV32ZDINX-LABEL: double_is_nan:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: feq.d a0, a0, a0
; CHECKRV32ZDINX-NEXT: xori a0, a0, 1
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: double_is_nan:
@@ -44,13 +38,7 @@ define zeroext i1 @double_not_nan(double %a) nounwind {
;
; CHECKRV32ZDINX-LABEL: double_not_nan:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: feq.d a0, a0, a0
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: double_not_nan:
diff --git a/llvm/test/CodeGen/RISCV/double-maximum-minimum.ll b/llvm/test/CodeGen/RISCV/double-maximum-minimum.ll
index 0ca2078..5229117 100644
--- a/llvm/test/CodeGen/RISCV/double-maximum-minimum.ll
+++ b/llvm/test/CodeGen/RISCV/double-maximum-minimum.ll
@@ -36,35 +36,25 @@ define double @fminimum_f64(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fminimum_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: feq.d a6, a0, a0
; RV32IZFINXZDINX-NEXT: mv a4, a2
; RV32IZFINXZDINX-NEXT: mv a5, a3
-; RV32IZFINXZDINX-NEXT: bnez a6, .LBB0_2
+; RV32IZFINXZDINX-NEXT: beqz a6, .LBB0_3
; RV32IZFINXZDINX-NEXT: # %bb.1:
+; RV32IZFINXZDINX-NEXT: feq.d a6, a2, a2
+; RV32IZFINXZDINX-NEXT: beqz a6, .LBB0_4
+; RV32IZFINXZDINX-NEXT: .LBB0_2:
+; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a4
+; RV32IZFINXZDINX-NEXT: ret
+; RV32IZFINXZDINX-NEXT: .LBB0_3:
; RV32IZFINXZDINX-NEXT: mv a4, a0
; RV32IZFINXZDINX-NEXT: mv a5, a1
-; RV32IZFINXZDINX-NEXT: .LBB0_2:
; RV32IZFINXZDINX-NEXT: feq.d a6, a2, a2
-; RV32IZFINXZDINX-NEXT: bnez a6, .LBB0_4
-; RV32IZFINXZDINX-NEXT: # %bb.3:
+; RV32IZFINXZDINX-NEXT: bnez a6, .LBB0_2
+; RV32IZFINXZDINX-NEXT: .LBB0_4:
; RV32IZFINXZDINX-NEXT: mv a0, a2
; RV32IZFINXZDINX-NEXT: mv a1, a3
-; RV32IZFINXZDINX-NEXT: .LBB0_4:
; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fminimum_f64:
@@ -113,35 +103,25 @@ define double @fmaximum_f64(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmaximum_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: feq.d a6, a0, a0
; RV32IZFINXZDINX-NEXT: mv a4, a2
; RV32IZFINXZDINX-NEXT: mv a5, a3
-; RV32IZFINXZDINX-NEXT: bnez a6, .LBB1_2
+; RV32IZFINXZDINX-NEXT: beqz a6, .LBB1_3
; RV32IZFINXZDINX-NEXT: # %bb.1:
+; RV32IZFINXZDINX-NEXT: feq.d a6, a2, a2
+; RV32IZFINXZDINX-NEXT: beqz a6, .LBB1_4
+; RV32IZFINXZDINX-NEXT: .LBB1_2:
+; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a4
+; RV32IZFINXZDINX-NEXT: ret
+; RV32IZFINXZDINX-NEXT: .LBB1_3:
; RV32IZFINXZDINX-NEXT: mv a4, a0
; RV32IZFINXZDINX-NEXT: mv a5, a1
-; RV32IZFINXZDINX-NEXT: .LBB1_2:
; RV32IZFINXZDINX-NEXT: feq.d a6, a2, a2
-; RV32IZFINXZDINX-NEXT: bnez a6, .LBB1_4
-; RV32IZFINXZDINX-NEXT: # %bb.3:
+; RV32IZFINXZDINX-NEXT: bnez a6, .LBB1_2
+; RV32IZFINXZDINX-NEXT: .LBB1_4:
; RV32IZFINXZDINX-NEXT: mv a0, a2
; RV32IZFINXZDINX-NEXT: mv a1, a3
-; RV32IZFINXZDINX-NEXT: .LBB1_4:
; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmaximum_f64:
@@ -174,21 +154,7 @@ define double @fminimum_nnan_f64(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fminimum_nnan_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fminimum_nnan_f64:
@@ -221,35 +187,25 @@ define double @fmaximum_nnan_f64(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmaximum_nnan_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: feq.d a6, a0, a0
; RV32IZFINXZDINX-NEXT: mv a4, a2
; RV32IZFINXZDINX-NEXT: mv a5, a3
-; RV32IZFINXZDINX-NEXT: bnez a6, .LBB3_2
+; RV32IZFINXZDINX-NEXT: beqz a6, .LBB3_3
; RV32IZFINXZDINX-NEXT: # %bb.1:
+; RV32IZFINXZDINX-NEXT: feq.d a6, a2, a2
+; RV32IZFINXZDINX-NEXT: beqz a6, .LBB3_4
+; RV32IZFINXZDINX-NEXT: .LBB3_2:
+; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a4
+; RV32IZFINXZDINX-NEXT: ret
+; RV32IZFINXZDINX-NEXT: .LBB3_3:
; RV32IZFINXZDINX-NEXT: mv a4, a0
; RV32IZFINXZDINX-NEXT: mv a5, a1
-; RV32IZFINXZDINX-NEXT: .LBB3_2:
; RV32IZFINXZDINX-NEXT: feq.d a6, a2, a2
-; RV32IZFINXZDINX-NEXT: bnez a6, .LBB3_4
-; RV32IZFINXZDINX-NEXT: # %bb.3:
+; RV32IZFINXZDINX-NEXT: bnez a6, .LBB3_2
+; RV32IZFINXZDINX-NEXT: .LBB3_4:
; RV32IZFINXZDINX-NEXT: mv a0, a2
; RV32IZFINXZDINX-NEXT: mv a1, a3
-; RV32IZFINXZDINX-NEXT: .LBB3_4:
; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmaximum_nnan_f64:
@@ -289,30 +245,14 @@ define double @fminimum_nnan_op_f64(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fminimum_nnan_op_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: feq.d a0, a2, a2
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: bnez a0, .LBB4_2
+; RV32IZFINXZDINX-NEXT: feq.d a4, a2, a2
+; RV32IZFINXZDINX-NEXT: bnez a4, .LBB4_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a0, a2
-; RV32IZFINXZDINX-NEXT: mv a1, a3
-; RV32IZFINXZDINX-NEXT: j .LBB4_3
+; RV32IZFINXZDINX-NEXT: fmin.d a0, a2, a2
+; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB4_2:
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a0
-; RV32IZFINXZDINX-NEXT: .LBB4_3:
; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fminimum_nnan_op_f64:
@@ -341,23 +281,9 @@ define double @fmaximum_nnan_op_f64(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmaximum_nnan_op_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a4, a0, a2
; RV32IZFINXZDINX-NEXT: fsub.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: fmax.d a0, a4, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmaximum_nnan_op_f64:
diff --git a/llvm/test/CodeGen/RISCV/double-mem.ll b/llvm/test/CodeGen/RISCV/double-mem.ll
index 6c6f70d..38cb52b 100644
--- a/llvm/test/CodeGen/RISCV/double-mem.ll
+++ b/llvm/test/CodeGen/RISCV/double-mem.ll
@@ -18,17 +18,11 @@ define dso_local double @fld(ptr %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fld:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: lw a2, 0(a0)
; RV32IZFINXZDINX-NEXT: lw a3, 4(a0)
; RV32IZFINXZDINX-NEXT: lw a1, 28(a0)
; RV32IZFINXZDINX-NEXT: lw a0, 24(a0)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a2, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fld:
@@ -56,21 +50,15 @@ define dso_local void @fsd(ptr %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fsd:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a3, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a4, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a4
+; RV32IZFINXZDINX-NEXT: mv a5, a4
+; RV32IZFINXZDINX-NEXT: mv a7, a2
+; RV32IZFINXZDINX-NEXT: mv a4, a3
+; RV32IZFINXZDINX-NEXT: mv a6, a1
+; RV32IZFINXZDINX-NEXT: fadd.d a2, a6, a4
; RV32IZFINXZDINX-NEXT: sw a2, 0(a0)
; RV32IZFINXZDINX-NEXT: sw a3, 4(a0)
; RV32IZFINXZDINX-NEXT: sw a2, 64(a0)
; RV32IZFINXZDINX-NEXT: sw a3, 68(a0)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fsd:
@@ -105,15 +93,6 @@ define dso_local double @fld_fsd_global(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fld_fsd_global:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: lui a2, %hi(G)
; RV32IZFINXZDINX-NEXT: lw a4, %lo(G)(a2)
@@ -125,11 +104,6 @@ define dso_local double @fld_fsd_global(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: lw a5, 76(a3)
; RV32IZFINXZDINX-NEXT: sw a0, 72(a3)
; RV32IZFINXZDINX-NEXT: sw a1, 76(a3)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fld_fsd_global:
@@ -174,22 +148,12 @@ define dso_local double @fld_fsd_constant(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fld_fsd_constant:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lui a2, 912092
; RV32IZFINXZDINX-NEXT: lw a4, -273(a2)
; RV32IZFINXZDINX-NEXT: lw a5, -269(a2)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a4
; RV32IZFINXZDINX-NEXT: sw a0, -273(a2)
; RV32IZFINXZDINX-NEXT: sw a1, -269(a2)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fld_fsd_constant:
@@ -246,19 +210,13 @@ define dso_local double @fld_stack(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: mv s0, a0
; RV32IZFINXZDINX-NEXT: addi a0, sp, 8
; RV32IZFINXZDINX-NEXT: call notdead
; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, s0
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -313,23 +271,15 @@ define dso_local void @fsd_stack(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fsd_stack:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 16(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 20(sp)
-; RV32IZFINXZDINX-NEXT: addi a0, sp, 16
+; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
+; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv a0, sp
; RV32IZFINXZDINX-NEXT: call notdead
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fsd_stack:
@@ -360,14 +310,10 @@ define dso_local void @fsd_trunc(ptr %a, double %b) nounwind noinline optnone {
;
; RV32IZFINXZDINX-LABEL: fsd_trunc:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a1, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv a3, a2
+; RV32IZFINXZDINX-NEXT: mv a2, a1
; RV32IZFINXZDINX-NEXT: fcvt.s.d a1, a2
; RV32IZFINXZDINX-NEXT: sw a1, 0(a0)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fsd_trunc:
diff --git a/llvm/test/CodeGen/RISCV/double-previous-failure.ll b/llvm/test/CodeGen/RISCV/double-previous-failure.ll
index 8b8f538..c169b10 100644
--- a/llvm/test/CodeGen/RISCV/double-previous-failure.ll
+++ b/llvm/test/CodeGen/RISCV/double-previous-failure.ll
@@ -50,10 +50,6 @@ define i32 @main() nounwind {
; RV32IZFINXZDINX-NEXT: lui a1, 262144
; RV32IZFINXZDINX-NEXT: li a0, 0
; RV32IZFINXZDINX-NEXT: call test
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI1_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI1_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI1_0)(a2)
diff --git a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
index 7cdf18e..f1c56b3 100644
--- a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
@@ -20,18 +20,11 @@ define signext i32 @test_floor_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_floor_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rdn
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_si32:
@@ -50,48 +43,42 @@ define signext i32 @test_floor_si32(double %x) {
define i64 @test_floor_si64(double %x) nounwind {
; RV32IFD-LABEL: test_floor_si64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: call floor
; RV32IFD-NEXT: lui a0, %hi(.LCPI1_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI1_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI1_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI1_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB1_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB1_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB1_2:
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB1_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI1_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI1_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB1_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB1_4:
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_floor_si64:
@@ -105,51 +92,45 @@ define i64 @test_floor_si64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_floor_si64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call floor
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI1_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI1_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI1_0)(a2)
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI1_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI1_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI1_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB1_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB1_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB1_2:
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB1_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI1_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI1_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI1_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB1_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB1_4:
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_si64:
@@ -177,18 +158,11 @@ define signext i32 @test_floor_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_floor_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rdn
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_ui32:
@@ -241,38 +215,30 @@ define i64 @test_floor_ui64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_floor_ui64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call floor
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
+; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI3_0)
+; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI3_0+4)(a4)
+; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI3_0)(a4)
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s2, a2
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI3_0)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI3_0+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI3_0)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s2, a0
-; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: neg a2, a2
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: and a1, s2, a1
-; RV32IZFINXZDINX-NEXT: or a1, a2, a1
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0
+; RV32IZFINXZDINX-NEXT: neg a3, a3
+; RV32IZFINXZDINX-NEXT: or a0, a3, a0
+; RV32IZFINXZDINX-NEXT: and a1, a2, a1
+; RV32IZFINXZDINX-NEXT: or a1, a3, a1
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_ui64:
@@ -300,18 +266,11 @@ define signext i32 @test_ceil_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_ceil_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rup
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_si32:
@@ -330,48 +289,42 @@ define signext i32 @test_ceil_si32(double %x) {
define i64 @test_ceil_si64(double %x) nounwind {
; RV32IFD-LABEL: test_ceil_si64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: call ceil
; RV32IFD-NEXT: lui a0, %hi(.LCPI5_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI5_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI5_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI5_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB5_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB5_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB5_2:
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB5_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI5_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI5_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB5_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB5_4:
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_ceil_si64:
@@ -385,51 +338,45 @@ define i64 @test_ceil_si64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_ceil_si64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call ceil
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI5_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI5_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI5_0)(a2)
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI5_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI5_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI5_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB5_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB5_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB5_2:
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB5_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI5_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI5_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI5_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB5_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB5_4:
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_si64:
@@ -457,18 +404,11 @@ define signext i32 @test_ceil_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_ceil_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rup
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_ui32:
@@ -521,38 +461,30 @@ define i64 @test_ceil_ui64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_ceil_ui64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call ceil
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
+; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI7_0)
+; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI7_0+4)(a4)
+; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI7_0)(a4)
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s2, a2
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI7_0)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI7_0+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI7_0)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s2, a0
-; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: neg a2, a2
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: and a1, s2, a1
-; RV32IZFINXZDINX-NEXT: or a1, a2, a1
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0
+; RV32IZFINXZDINX-NEXT: neg a3, a3
+; RV32IZFINXZDINX-NEXT: or a0, a3, a0
+; RV32IZFINXZDINX-NEXT: and a1, a2, a1
+; RV32IZFINXZDINX-NEXT: or a1, a3, a1
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_ui64:
@@ -580,18 +512,11 @@ define signext i32 @test_trunc_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_trunc_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rtz
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_si32:
@@ -610,48 +535,42 @@ define signext i32 @test_trunc_si32(double %x) {
define i64 @test_trunc_si64(double %x) nounwind {
; RV32IFD-LABEL: test_trunc_si64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: call trunc
; RV32IFD-NEXT: lui a0, %hi(.LCPI9_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI9_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI9_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI9_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB9_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB9_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB9_2:
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB9_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI9_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI9_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB9_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB9_4:
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_trunc_si64:
@@ -665,51 +584,45 @@ define i64 @test_trunc_si64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_trunc_si64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call trunc
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI9_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI9_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI9_0)(a2)
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI9_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI9_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI9_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB9_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB9_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB9_2:
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB9_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI9_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI9_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI9_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB9_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB9_4:
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_si64:
@@ -737,18 +650,11 @@ define signext i32 @test_trunc_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_trunc_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rtz
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_ui32:
@@ -801,38 +707,30 @@ define i64 @test_trunc_ui64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_trunc_ui64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call trunc
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
+; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI11_0)
+; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI11_0+4)(a4)
+; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI11_0)(a4)
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s2, a2
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI11_0)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI11_0+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI11_0)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s2, a0
-; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: neg a2, a2
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: and a1, s2, a1
-; RV32IZFINXZDINX-NEXT: or a1, a2, a1
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0
+; RV32IZFINXZDINX-NEXT: neg a3, a3
+; RV32IZFINXZDINX-NEXT: or a0, a3, a0
+; RV32IZFINXZDINX-NEXT: and a1, a2, a1
+; RV32IZFINXZDINX-NEXT: or a1, a3, a1
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_ui64:
@@ -860,18 +758,11 @@ define signext i32 @test_round_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_round_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rmm
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_si32:
@@ -890,48 +781,42 @@ define signext i32 @test_round_si32(double %x) {
define i64 @test_round_si64(double %x) nounwind {
; RV32IFD-LABEL: test_round_si64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: call round
; RV32IFD-NEXT: lui a0, %hi(.LCPI13_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI13_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI13_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB13_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB13_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB13_2:
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB13_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI13_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI13_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB13_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB13_4:
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_round_si64:
@@ -945,51 +830,45 @@ define i64 @test_round_si64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_round_si64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call round
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI13_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI13_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI13_0)(a2)
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI13_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI13_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI13_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB13_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB13_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB13_2:
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB13_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI13_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI13_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI13_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB13_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB13_4:
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_si64:
@@ -1017,18 +896,11 @@ define signext i32 @test_round_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_round_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rmm
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_ui32:
@@ -1081,38 +953,30 @@ define i64 @test_round_ui64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_round_ui64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call round
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
+; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI15_0)
+; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI15_0+4)(a4)
+; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI15_0)(a4)
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s2, a2
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI15_0)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI15_0+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI15_0)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s2, a0
-; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: neg a2, a2
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: and a1, s2, a1
-; RV32IZFINXZDINX-NEXT: or a1, a2, a1
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0
+; RV32IZFINXZDINX-NEXT: neg a3, a3
+; RV32IZFINXZDINX-NEXT: or a0, a3, a0
+; RV32IZFINXZDINX-NEXT: and a1, a2, a1
+; RV32IZFINXZDINX-NEXT: or a1, a3, a1
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_ui64:
@@ -1140,18 +1004,11 @@ define signext i32 @test_roundeven_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rne
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_si32:
@@ -1170,48 +1027,42 @@ define signext i32 @test_roundeven_si32(double %x) {
define i64 @test_roundeven_si64(double %x) nounwind {
; RV32IFD-LABEL: test_roundeven_si64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: call roundeven
; RV32IFD-NEXT: lui a0, %hi(.LCPI17_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI17_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI17_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB17_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB17_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB17_2:
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB17_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI17_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI17_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB17_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB17_4:
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_roundeven_si64:
@@ -1225,51 +1076,45 @@ define i64 @test_roundeven_si64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_si64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call roundeven
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI17_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI17_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI17_0)(a2)
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI17_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI17_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI17_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB17_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB17_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB17_2:
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB17_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI17_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI17_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI17_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB17_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB17_4:
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_si64:
@@ -1297,18 +1142,11 @@ define signext i32 @test_roundeven_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rne
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_ui32:
@@ -1361,38 +1199,30 @@ define i64 @test_roundeven_ui64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_ui64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call roundeven
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
+; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI19_0)
+; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI19_0+4)(a4)
+; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI19_0)(a4)
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s2, a2
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI19_0)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI19_0+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI19_0)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s2, a0
-; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: neg a2, a2
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: and a1, s2, a1
-; RV32IZFINXZDINX-NEXT: or a1, a2, a1
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0
+; RV32IZFINXZDINX-NEXT: neg a3, a3
+; RV32IZFINXZDINX-NEXT: or a0, a3, a0
+; RV32IZFINXZDINX-NEXT: and a1, a2, a1
+; RV32IZFINXZDINX-NEXT: or a1, a3, a1
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_ui64:
@@ -1420,18 +1250,11 @@ define signext i32 @test_rint_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_rint_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_rint_si32:
@@ -1450,48 +1273,42 @@ define signext i32 @test_rint_si32(double %x) {
define i64 @test_rint_si64(double %x) nounwind {
; RV32IFD-LABEL: test_rint_si64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: call rint
; RV32IFD-NEXT: lui a0, %hi(.LCPI21_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI21_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI21_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI21_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB21_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB21_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB21_2:
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB21_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI21_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI21_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB21_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB21_4:
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_rint_si64:
@@ -1505,51 +1322,45 @@ define i64 @test_rint_si64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_rint_si64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call rint
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI21_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI21_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI21_0)(a2)
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI21_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI21_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI21_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB21_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB21_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB21_2:
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB21_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI21_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI21_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI21_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB21_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB21_4:
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_rint_si64:
@@ -1577,18 +1388,11 @@ define signext i32 @test_rint_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_rint_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_rint_ui32:
@@ -1641,38 +1445,30 @@ define i64 @test_rint_ui64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_rint_ui64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call rint
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
+; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI23_0)
+; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI23_0+4)(a4)
+; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI23_0)(a4)
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s2, a2
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI23_0)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI23_0+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI23_0)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s2, a0
-; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: neg a2, a2
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: and a1, s2, a1
-; RV32IZFINXZDINX-NEXT: or a1, a2, a1
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0
+; RV32IZFINXZDINX-NEXT: neg a3, a3
+; RV32IZFINXZDINX-NEXT: or a0, a3, a0
+; RV32IZFINXZDINX-NEXT: and a1, a2, a1
+; RV32IZFINXZDINX-NEXT: or a1, a3, a1
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_rint_ui64:
diff --git a/llvm/test/CodeGen/RISCV/double-round-conv.ll b/llvm/test/CodeGen/RISCV/double-round-conv.ll
index 094a410..d84d80a 100644
--- a/llvm/test/CodeGen/RISCV/double-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/double-round-conv.ll
@@ -21,14 +21,7 @@ define signext i8 @test_floor_si8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_floor_si8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rdn
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_si8:
@@ -53,14 +46,7 @@ define signext i16 @test_floor_si16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_floor_si16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rdn
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_si16:
@@ -80,14 +66,7 @@ define signext i32 @test_floor_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_floor_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rdn
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_si32:
@@ -151,14 +130,7 @@ define zeroext i8 @test_floor_ui8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_floor_ui8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rdn
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_ui8:
@@ -183,14 +155,7 @@ define zeroext i16 @test_floor_ui16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_floor_ui16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rdn
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_ui16:
@@ -210,14 +175,7 @@ define signext i32 @test_floor_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_floor_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rdn
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_ui32:
@@ -281,14 +239,7 @@ define signext i8 @test_ceil_si8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_ceil_si8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rup
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_si8:
@@ -313,14 +264,7 @@ define signext i16 @test_ceil_si16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_ceil_si16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rup
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_si16:
@@ -340,14 +284,7 @@ define signext i32 @test_ceil_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_ceil_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rup
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_si32:
@@ -411,14 +348,7 @@ define zeroext i8 @test_ceil_ui8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_ceil_ui8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rup
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_ui8:
@@ -443,14 +373,7 @@ define zeroext i16 @test_ceil_ui16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_ceil_ui16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rup
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_ui16:
@@ -470,14 +393,7 @@ define signext i32 @test_ceil_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_ceil_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rup
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_ui32:
@@ -541,14 +457,7 @@ define signext i8 @test_trunc_si8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_trunc_si8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_si8:
@@ -573,14 +482,7 @@ define signext i16 @test_trunc_si16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_trunc_si16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_si16:
@@ -600,14 +502,7 @@ define signext i32 @test_trunc_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_trunc_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_si32:
@@ -671,14 +566,7 @@ define zeroext i8 @test_trunc_ui8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_trunc_ui8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_ui8:
@@ -703,14 +591,7 @@ define zeroext i16 @test_trunc_ui16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_trunc_ui16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_ui16:
@@ -730,14 +611,7 @@ define signext i32 @test_trunc_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_trunc_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_ui32:
@@ -801,14 +675,7 @@ define signext i8 @test_round_si8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_round_si8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_si8:
@@ -833,14 +700,7 @@ define signext i16 @test_round_si16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_round_si16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_si16:
@@ -860,14 +720,7 @@ define signext i32 @test_round_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_round_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_si32:
@@ -931,14 +784,7 @@ define zeroext i8 @test_round_ui8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_round_ui8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rmm
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_ui8:
@@ -963,14 +809,7 @@ define zeroext i16 @test_round_ui16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_round_ui16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rmm
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_ui16:
@@ -990,14 +829,7 @@ define signext i32 @test_round_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_round_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rmm
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_ui32:
@@ -1061,14 +893,7 @@ define signext i8 @test_roundeven_si8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_si8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rne
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_si8:
@@ -1093,14 +918,7 @@ define signext i16 @test_roundeven_si16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_si16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rne
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_si16:
@@ -1120,14 +938,7 @@ define signext i32 @test_roundeven_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rne
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_si32:
@@ -1191,14 +1002,7 @@ define zeroext i8 @test_roundeven_ui8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_ui8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rne
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_ui8:
@@ -1223,14 +1027,7 @@ define zeroext i16 @test_roundeven_ui16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_ui16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rne
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_ui16:
@@ -1250,14 +1047,7 @@ define signext i32 @test_roundeven_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rne
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_ui32:
diff --git a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll
index 766da36..654a460 100644
--- a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll
@@ -41,26 +41,12 @@ define double @select_fcmp_oeq(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_oeq:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: feq.d a4, a0, a2
; CHECKRV32ZDINX-NEXT: bnez a4, .LBB1_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB1_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_oeq:
@@ -88,26 +74,12 @@ define double @select_fcmp_ogt(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_ogt:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32ZDINX-NEXT: flt.d a4, a2, a0
; CHECKRV32ZDINX-NEXT: bnez a4, .LBB2_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB2_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_ogt:
@@ -135,26 +107,12 @@ define double @select_fcmp_oge(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_oge:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32ZDINX-NEXT: fle.d a4, a2, a0
; CHECKRV32ZDINX-NEXT: bnez a4, .LBB3_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB3_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_oge:
@@ -182,26 +140,12 @@ define double @select_fcmp_olt(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_olt:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: flt.d a4, a0, a2
; CHECKRV32ZDINX-NEXT: bnez a4, .LBB4_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB4_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_olt:
@@ -229,26 +173,12 @@ define double @select_fcmp_ole(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_ole:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: fle.d a4, a0, a2
; CHECKRV32ZDINX-NEXT: bnez a4, .LBB5_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB5_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_ole:
@@ -278,15 +208,6 @@ define double @select_fcmp_one(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_one:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: flt.d a4, a0, a2
; CHECKRV32ZDINX-NEXT: flt.d a5, a2, a0
; CHECKRV32ZDINX-NEXT: or a4, a5, a4
@@ -295,11 +216,6 @@ define double @select_fcmp_one(double %a, double %b) nounwind {
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB6_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_one:
@@ -331,15 +247,6 @@ define double @select_fcmp_ord(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_ord:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32ZDINX-NEXT: feq.d a4, a2, a2
; CHECKRV32ZDINX-NEXT: feq.d a5, a0, a0
; CHECKRV32ZDINX-NEXT: and a4, a5, a4
@@ -348,11 +255,6 @@ define double @select_fcmp_ord(double %a, double %b) nounwind {
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB7_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_ord:
@@ -384,15 +286,6 @@ define double @select_fcmp_ueq(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_ueq:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: flt.d a4, a0, a2
; CHECKRV32ZDINX-NEXT: flt.d a5, a2, a0
; CHECKRV32ZDINX-NEXT: or a4, a5, a4
@@ -401,11 +294,6 @@ define double @select_fcmp_ueq(double %a, double %b) nounwind {
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB8_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_ueq:
@@ -435,26 +323,12 @@ define double @select_fcmp_ugt(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_ugt:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: fle.d a4, a0, a2
; CHECKRV32ZDINX-NEXT: beqz a4, .LBB9_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB9_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_ugt:
@@ -482,26 +356,12 @@ define double @select_fcmp_uge(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_uge:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: flt.d a4, a0, a2
; CHECKRV32ZDINX-NEXT: beqz a4, .LBB10_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB10_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_uge:
@@ -529,26 +389,12 @@ define double @select_fcmp_ult(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_ult:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32ZDINX-NEXT: fle.d a4, a2, a0
; CHECKRV32ZDINX-NEXT: beqz a4, .LBB11_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB11_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_ult:
@@ -576,26 +422,12 @@ define double @select_fcmp_ule(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_ule:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32ZDINX-NEXT: flt.d a4, a2, a0
; CHECKRV32ZDINX-NEXT: beqz a4, .LBB12_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB12_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_ule:
@@ -623,26 +455,12 @@ define double @select_fcmp_une(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_une:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: feq.d a4, a0, a2
; CHECKRV32ZDINX-NEXT: beqz a4, .LBB13_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB13_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_une:
@@ -672,15 +490,6 @@ define double @select_fcmp_uno(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_uno:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32ZDINX-NEXT: feq.d a4, a2, a2
; CHECKRV32ZDINX-NEXT: feq.d a5, a0, a0
; CHECKRV32ZDINX-NEXT: and a4, a5, a4
@@ -689,11 +498,6 @@ define double @select_fcmp_uno(double %a, double %b) nounwind {
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB14_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_uno:
@@ -741,22 +545,12 @@ define i32 @i32_select_fcmp_oeq(double %a, double %b, i32 %c, i32 %d) nounwind {
;
; CHECKRV32ZDINX-LABEL: i32_select_fcmp_oeq:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: feq.d a1, a0, a2
; CHECKRV32ZDINX-NEXT: mv a0, a4
; CHECKRV32ZDINX-NEXT: bnez a1, .LBB16_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a5
; CHECKRV32ZDINX-NEXT: .LBB16_2:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: i32_select_fcmp_oeq:
@@ -783,20 +577,9 @@ define i32 @select_fcmp_oeq_1_2(double %a, double %b) {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_oeq_1_2:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: feq.d a0, a0, a2
; CHECKRV32ZDINX-NEXT: li a1, 2
; CHECKRV32ZDINX-NEXT: sub a0, a1, a0
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_oeq_1_2:
@@ -819,18 +602,8 @@ define signext i32 @select_fcmp_uge_negone_zero(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_uge_negone_zero:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: fle.d a0, a0, a2
; CHECKRV32ZDINX-NEXT: addi a0, a0, -1
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_uge_negone_zero:
@@ -852,18 +625,8 @@ define signext i32 @select_fcmp_uge_1_2(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_uge_1_2:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: fle.d a0, a0, a2
; CHECKRV32ZDINX-NEXT: addi a0, a0, 1
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_uge_1_2:
diff --git a/llvm/test/CodeGen/RISCV/double-select-icmp.ll b/llvm/test/CodeGen/RISCV/double-select-icmp.ll
index d864ff5..929ffc5 100644
--- a/llvm/test/CodeGen/RISCV/double-select-icmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-select-icmp.ll
@@ -20,24 +20,13 @@ define double @select_icmp_eq(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_eq:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: bne a0, a1, .LBB0_2
+; RV32ZDINX-NEXT: beq a0, a1, .LBB0_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB0_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_eq:
@@ -64,24 +53,13 @@ define double @select_icmp_ne(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_ne:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: beq a0, a1, .LBB1_2
+; RV32ZDINX-NEXT: bne a0, a1, .LBB1_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB1_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_ne:
@@ -108,24 +86,13 @@ define double @select_icmp_ugt(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_ugt:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: bgeu a1, a0, .LBB2_2
+; RV32ZDINX-NEXT: bltu a1, a0, .LBB2_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB2_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_ugt:
@@ -152,24 +119,13 @@ define double @select_icmp_uge(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_uge:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: bltu a0, a1, .LBB3_2
+; RV32ZDINX-NEXT: bgeu a0, a1, .LBB3_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB3_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_uge:
@@ -196,24 +152,13 @@ define double @select_icmp_ult(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_ult:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: bgeu a0, a1, .LBB4_2
+; RV32ZDINX-NEXT: bltu a0, a1, .LBB4_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB4_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_ult:
@@ -240,24 +185,13 @@ define double @select_icmp_ule(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_ule:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: bltu a1, a0, .LBB5_2
+; RV32ZDINX-NEXT: bgeu a1, a0, .LBB5_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB5_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_ule:
@@ -284,24 +218,13 @@ define double @select_icmp_sgt(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_sgt:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: bge a1, a0, .LBB6_2
+; RV32ZDINX-NEXT: blt a1, a0, .LBB6_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB6_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_sgt:
@@ -328,24 +251,13 @@ define double @select_icmp_sge(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_sge:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: blt a0, a1, .LBB7_2
+; RV32ZDINX-NEXT: bge a0, a1, .LBB7_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB7_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_sge:
@@ -372,24 +284,13 @@ define double @select_icmp_slt(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_slt:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: bge a0, a1, .LBB8_2
+; RV32ZDINX-NEXT: blt a0, a1, .LBB8_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB8_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_slt:
@@ -416,24 +317,13 @@ define double @select_icmp_sle(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_sle:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: blt a1, a0, .LBB9_2
+; RV32ZDINX-NEXT: bge a1, a0, .LBB9_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB9_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_sle:
@@ -458,15 +348,8 @@ define double @select_icmp_slt_one(i32 signext %a) {
;
; RV32ZDINX-LABEL: select_icmp_slt_one:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
; RV32ZDINX-NEXT: slti a0, a0, 1
; RV32ZDINX-NEXT: fcvt.d.w a0, a0
-; RV32ZDINX-NEXT: sw a0, 8(sp)
-; RV32ZDINX-NEXT: sw a1, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_slt_one:
@@ -488,15 +371,8 @@ define double @select_icmp_sgt_zero(i32 signext %a) {
;
; RV32ZDINX-LABEL: select_icmp_sgt_zero:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
; RV32ZDINX-NEXT: slti a0, a0, 1
; RV32ZDINX-NEXT: fcvt.d.w a0, a0
-; RV32ZDINX-NEXT: sw a0, 8(sp)
-; RV32ZDINX-NEXT: sw a1, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_sgt_zero:
diff --git a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
index aa88a36..4ae912a 100644
--- a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
+++ b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
@@ -62,40 +62,28 @@ define double @func(double %d, i32 %n) nounwind {
;
; RV32IZFINXZDINX-LABEL: func:
; RV32IZFINXZDINX: # %bb.0: # %entry
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: mv s0, a0
; RV32IZFINXZDINX-NEXT: beqz a2, .LBB0_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
; RV32IZFINXZDINX-NEXT: addi a2, a2, -1
-; RV32IZFINXZDINX-NEXT: sw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv a0, s0
+; RV32IZFINXZDINX-NEXT: mv a1, s1
; RV32IZFINXZDINX-NEXT: call func
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, s0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: j .LBB0_3
; RV32IZFINXZDINX-NEXT: .LBB0_2: # %return
-; RV32IZFINXZDINX-NEXT: sw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw s1, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv a0, s0
+; RV32IZFINXZDINX-NEXT: mv a1, s1
; RV32IZFINXZDINX-NEXT: .LBB0_3: # %return
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: func:
diff --git a/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll b/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
index fb0b34c..a44d31d 100644
--- a/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
+++ b/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
@@ -160,17 +160,13 @@ define double @caller_double(double %x) nounwind {
;
; ZDINX32-LABEL: caller_double:
; ZDINX32: # %bb.0: # %entry
-; ZDINX32-NEXT: addi sp, sp, -32
-; ZDINX32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; ZDINX32-NEXT: sw a0, 16(sp)
-; ZDINX32-NEXT: sw a1, 20(sp)
-; ZDINX32-NEXT: lw a0, 16(sp)
-; ZDINX32-NEXT: lw a1, 20(sp)
+; ZDINX32-NEXT: addi sp, sp, -16
+; ZDINX32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; ZDINX32-NEXT: sw a0, 0(sp)
; ZDINX32-NEXT: sw a1, 4(sp)
; ZDINX32-NEXT: call d
-; ZDINX32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; ZDINX32-NEXT: addi sp, sp, 32
+; ZDINX32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; ZDINX32-NEXT: addi sp, sp, 16
; ZDINX32-NEXT: ret
;
; ZDINX64-LABEL: caller_double:
@@ -200,14 +196,8 @@ define internal fastcc double @d(double %x) nounwind {
;
; ZDINX32-LABEL: d:
; ZDINX32: # %bb.0: # %entry
-; ZDINX32-NEXT: addi sp, sp, -16
-; ZDINX32-NEXT: lw a0, 16(sp)
-; ZDINX32-NEXT: lw a1, 20(sp)
-; ZDINX32-NEXT: sw a0, 8(sp)
-; ZDINX32-NEXT: sw a1, 12(sp)
-; ZDINX32-NEXT: lw a0, 8(sp)
-; ZDINX32-NEXT: lw a1, 12(sp)
-; ZDINX32-NEXT: addi sp, sp, 16
+; ZDINX32-NEXT: lw a0, 0(sp)
+; ZDINX32-NEXT: lw a1, 4(sp)
; ZDINX32-NEXT: ret
;
; ZDINX64-LABEL: d:
@@ -1360,14 +1350,8 @@ define fastcc double @callee_double_32(<32 x double> %A) nounwind {
;
; ZDINX32-LABEL: callee_double_32:
; ZDINX32: # %bb.0:
-; ZDINX32-NEXT: addi sp, sp, -16
-; ZDINX32-NEXT: lw a0, 16(sp)
-; ZDINX32-NEXT: lw a1, 20(sp)
-; ZDINX32-NEXT: sw a0, 8(sp)
-; ZDINX32-NEXT: sw a1, 12(sp)
-; ZDINX32-NEXT: lw a0, 8(sp)
-; ZDINX32-NEXT: lw a1, 12(sp)
-; ZDINX32-NEXT: addi sp, sp, 16
+; ZDINX32-NEXT: lw a0, 0(sp)
+; ZDINX32-NEXT: lw a1, 4(sp)
; ZDINX32-NEXT: ret
;
; ZDINX64-LABEL: callee_double_32:
diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index 9fb78d4..2c7315f 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -629,23 +629,23 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI12_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI12_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB12_2
; RV32IF-NEXT: # %bb.1: # %start
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB12_2: # %start
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB12_4
+; RV32IF-NEXT: beqz a3, .LBB12_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB12_4: # %start
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -668,37 +668,35 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: lui a0, 913408
; RV32IZFINX-NEXT: fle.s s1, a0, s0
-; RV32IZFINX-NEXT: neg s2, s1
; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call __fixsfdi
-; RV32IZFINX-NEXT: lui a2, %hi(.LCPI12_0)
-; RV32IZFINX-NEXT: lw a2, %lo(.LCPI12_0)(a2)
-; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
-; RV32IZFINX-NEXT: or a0, a2, a0
-; RV32IZFINX-NEXT: feq.s a2, s0, s0
-; RV32IZFINX-NEXT: neg a2, a2
-; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
+; RV32IZFINX-NEXT: lui a2, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB12_2
; RV32IZFINX-NEXT: # %bb.1: # %start
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a2, a1
; RV32IZFINX-NEXT: .LBB12_2: # %start
-; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB12_4
+; RV32IZFINX-NEXT: lui a1, %hi(.LCPI12_0)
+; RV32IZFINX-NEXT: lw a1, %lo(.LCPI12_0)(a1)
+; RV32IZFINX-NEXT: flt.s a3, a1, s0
+; RV32IZFINX-NEXT: beqz a3, .LBB12_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a2, a4, -1
; RV32IZFINX-NEXT: .LBB12_4: # %start
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: feq.s a1, s0, s0
+; RV32IZFINX-NEXT: neg a4, a1
+; RV32IZFINX-NEXT: and a1, a4, a2
+; RV32IZFINX-NEXT: neg a2, s1
+; RV32IZFINX-NEXT: and a0, a2, a0
+; RV32IZFINX-NEXT: neg a2, a3
+; RV32IZFINX-NEXT: or a0, a2, a0
+; RV32IZFINX-NEXT: and a0, a4, a0
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
-; RV32IZFINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
index c72e69c..4f747c2 100644
--- a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
@@ -59,23 +59,23 @@ define i64 @test_floor_si64(float %x) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI1_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI1_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB1_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB1_4:
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB1_6
+; RV32IF-NEXT: beqz a3, .LBB1_6
; RV32IF-NEXT: # %bb.5:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB1_6:
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -117,23 +117,23 @@ define i64 @test_floor_si64(float %x) nounwind {
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI1_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI1_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: flt.s a3, a2, s0
+; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: feq.s a2, s0, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB1_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a4, a1
; RV32IZFINX-NEXT: .LBB1_4:
; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB1_6
+; RV32IZFINX-NEXT: beqz a3, .LBB1_6
; RV32IZFINX-NEXT: # %bb.5:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a4, a5, -1
; RV32IZFINX-NEXT: .LBB1_6:
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: and a1, a2, a4
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -321,23 +321,23 @@ define i64 @test_ceil_si64(float %x) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI5_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI5_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB5_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB5_4:
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB5_6
+; RV32IF-NEXT: beqz a3, .LBB5_6
; RV32IF-NEXT: # %bb.5:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB5_6:
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -379,23 +379,23 @@ define i64 @test_ceil_si64(float %x) nounwind {
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI5_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI5_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: flt.s a3, a2, s0
+; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: feq.s a2, s0, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB5_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a4, a1
; RV32IZFINX-NEXT: .LBB5_4:
; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB5_6
+; RV32IZFINX-NEXT: beqz a3, .LBB5_6
; RV32IZFINX-NEXT: # %bb.5:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a4, a5, -1
; RV32IZFINX-NEXT: .LBB5_6:
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: and a1, a2, a4
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -583,23 +583,23 @@ define i64 @test_trunc_si64(float %x) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI9_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI9_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB9_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB9_4:
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB9_6
+; RV32IF-NEXT: beqz a3, .LBB9_6
; RV32IF-NEXT: # %bb.5:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB9_6:
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -641,23 +641,23 @@ define i64 @test_trunc_si64(float %x) nounwind {
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI9_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI9_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: flt.s a3, a2, s0
+; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: feq.s a2, s0, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB9_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a4, a1
; RV32IZFINX-NEXT: .LBB9_4:
; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB9_6
+; RV32IZFINX-NEXT: beqz a3, .LBB9_6
; RV32IZFINX-NEXT: # %bb.5:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a4, a5, -1
; RV32IZFINX-NEXT: .LBB9_6:
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: and a1, a2, a4
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -845,23 +845,23 @@ define i64 @test_round_si64(float %x) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI13_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI13_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB13_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB13_4:
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB13_6
+; RV32IF-NEXT: beqz a3, .LBB13_6
; RV32IF-NEXT: # %bb.5:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB13_6:
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -903,23 +903,23 @@ define i64 @test_round_si64(float %x) nounwind {
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI13_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI13_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: flt.s a3, a2, s0
+; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: feq.s a2, s0, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB13_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a4, a1
; RV32IZFINX-NEXT: .LBB13_4:
; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB13_6
+; RV32IZFINX-NEXT: beqz a3, .LBB13_6
; RV32IZFINX-NEXT: # %bb.5:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a4, a5, -1
; RV32IZFINX-NEXT: .LBB13_6:
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: and a1, a2, a4
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1107,23 +1107,23 @@ define i64 @test_roundeven_si64(float %x) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI17_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI17_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB17_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB17_4:
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB17_6
+; RV32IF-NEXT: beqz a3, .LBB17_6
; RV32IF-NEXT: # %bb.5:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB17_6:
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1165,23 +1165,23 @@ define i64 @test_roundeven_si64(float %x) nounwind {
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI17_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI17_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: flt.s a3, a2, s0
+; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: feq.s a2, s0, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB17_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a4, a1
; RV32IZFINX-NEXT: .LBB17_4:
; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB17_6
+; RV32IZFINX-NEXT: beqz a3, .LBB17_6
; RV32IZFINX-NEXT: # %bb.5:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a4, a5, -1
; RV32IZFINX-NEXT: .LBB17_6:
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: and a1, a2, a4
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1369,23 +1369,23 @@ define i64 @test_rint_si64(float %x) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI21_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI21_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB21_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB21_4:
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB21_6
+; RV32IF-NEXT: beqz a3, .LBB21_6
; RV32IF-NEXT: # %bb.5:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB21_6:
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1427,23 +1427,23 @@ define i64 @test_rint_si64(float %x) nounwind {
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI21_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI21_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: flt.s a3, a2, s0
+; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: feq.s a2, s0, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB21_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a4, a1
; RV32IZFINX-NEXT: .LBB21_4:
; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB21_6
+; RV32IZFINX-NEXT: beqz a3, .LBB21_6
; RV32IZFINX-NEXT: # %bb.5:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a4, a5, -1
; RV32IZFINX-NEXT: .LBB21_6:
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: and a1, a2, a4
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/half-convert-strict.ll b/llvm/test/CodeGen/RISCV/half-convert-strict.ll
index f03a020..677aa92 100644
--- a/llvm/test/CodeGen/RISCV/half-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert-strict.ll
@@ -1745,13 +1745,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
;
; RV32IZDINXZHINX-LABEL: fcvt_h_d:
; RV32IZDINXZHINX: # %bb.0:
-; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
-; RV32IZDINXZHINX-NEXT: sw a0, 8(sp)
-; RV32IZDINXZHINX-NEXT: sw a1, 12(sp)
-; RV32IZDINXZHINX-NEXT: lw a0, 8(sp)
-; RV32IZDINXZHINX-NEXT: lw a1, 12(sp)
; RV32IZDINXZHINX-NEXT: fcvt.h.d a0, a0
-; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
; RV32IZDINXZHINX-NEXT: ret
;
; RV64IZDINXZHINX-LABEL: fcvt_h_d:
@@ -1807,13 +1801,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
;
; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_d:
; CHECK32-IZDINXZHINXMIN: # %bb.0:
-; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16
-; CHECK32-IZDINXZHINXMIN-NEXT: sw a0, 8(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: sw a1, 12(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 8(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, 12(sp)
; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.d a0, a0
-; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZDINXZHINXMIN-NEXT: ret
;
; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_d:
@@ -1878,13 +1866,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
;
; RV32IZDINXZHINX-LABEL: fcvt_d_h:
; RV32IZDINXZHINX: # %bb.0:
-; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
; RV32IZDINXZHINX-NEXT: fcvt.d.h a0, a0
-; RV32IZDINXZHINX-NEXT: sw a0, 8(sp)
-; RV32IZDINXZHINX-NEXT: sw a1, 12(sp)
-; RV32IZDINXZHINX-NEXT: lw a0, 8(sp)
-; RV32IZDINXZHINX-NEXT: lw a1, 12(sp)
-; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
; RV32IZDINXZHINX-NEXT: ret
;
; RV64IZDINXZHINX-LABEL: fcvt_d_h:
@@ -1944,13 +1926,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
;
; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_d_h:
; CHECK32-IZDINXZHINXMIN: # %bb.0:
-; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.d.h a0, a0
-; CHECK32-IZDINXZHINXMIN-NEXT: sw a0, 8(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: sw a1, 12(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 8(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, 12(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZDINXZHINXMIN-NEXT: ret
;
; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_d_h:
diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index 28ac6e2..16c0962 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -2460,47 +2460,42 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
;
; RV32ID-ILP32-LABEL: fcvt_l_h_sat:
; RV32ID-ILP32: # %bb.0: # %start
-; RV32ID-ILP32-NEXT: addi sp, sp, -32
-; RV32ID-ILP32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32ID-ILP32-NEXT: addi sp, sp, -16
+; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32ID-ILP32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32ID-ILP32-NEXT: call __extendhfsf2
-; RV32ID-ILP32-NEXT: lui a1, %hi(.LCPI10_0)
-; RV32ID-ILP32-NEXT: flw fa5, %lo(.LCPI10_0)(a1)
; RV32ID-ILP32-NEXT: fmv.w.x fa4, a0
-; RV32ID-ILP32-NEXT: fsw fa4, 8(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: flt.s s0, fa5, fa4
-; RV32ID-ILP32-NEXT: neg s1, s0
; RV32ID-ILP32-NEXT: lui a1, 913408
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a1
-; RV32ID-ILP32-NEXT: fle.s s2, fa5, fa4
-; RV32ID-ILP32-NEXT: neg s3, s2
+; RV32ID-ILP32-NEXT: fsw fa4, 4(sp) # 4-byte Folded Spill
+; RV32ID-ILP32-NEXT: fle.s s0, fa5, fa4
; RV32ID-ILP32-NEXT: call __fixsfdi
-; RV32ID-ILP32-NEXT: and a0, s3, a0
-; RV32ID-ILP32-NEXT: or a0, s1, a0
-; RV32ID-ILP32-NEXT: flw fa5, 8(sp) # 4-byte Folded Reload
-; RV32ID-ILP32-NEXT: feq.s a2, fa5, fa5
-; RV32ID-ILP32-NEXT: neg a2, a2
; RV32ID-ILP32-NEXT: lui a4, 524288
-; RV32ID-ILP32-NEXT: lui a3, 524288
-; RV32ID-ILP32-NEXT: beqz s2, .LBB10_2
+; RV32ID-ILP32-NEXT: lui a2, 524288
+; RV32ID-ILP32-NEXT: beqz s0, .LBB10_2
; RV32ID-ILP32-NEXT: # %bb.1: # %start
-; RV32ID-ILP32-NEXT: mv a3, a1
+; RV32ID-ILP32-NEXT: mv a2, a1
; RV32ID-ILP32-NEXT: .LBB10_2: # %start
-; RV32ID-ILP32-NEXT: and a0, a2, a0
-; RV32ID-ILP32-NEXT: beqz s0, .LBB10_4
+; RV32ID-ILP32-NEXT: lui a1, %hi(.LCPI10_0)
+; RV32ID-ILP32-NEXT: flw fa5, %lo(.LCPI10_0)(a1)
+; RV32ID-ILP32-NEXT: flw fa4, 4(sp) # 4-byte Folded Reload
+; RV32ID-ILP32-NEXT: flt.s a3, fa5, fa4
+; RV32ID-ILP32-NEXT: fmv.s fa5, fa4
+; RV32ID-ILP32-NEXT: beqz a3, .LBB10_4
; RV32ID-ILP32-NEXT: # %bb.3:
-; RV32ID-ILP32-NEXT: addi a3, a4, -1
+; RV32ID-ILP32-NEXT: addi a2, a4, -1
; RV32ID-ILP32-NEXT: .LBB10_4: # %start
-; RV32ID-ILP32-NEXT: and a1, a2, a3
-; RV32ID-ILP32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32ID-ILP32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32ID-ILP32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32ID-ILP32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32ID-ILP32-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32ID-ILP32-NEXT: addi sp, sp, 32
+; RV32ID-ILP32-NEXT: feq.s a1, fa5, fa5
+; RV32ID-ILP32-NEXT: neg a4, a1
+; RV32ID-ILP32-NEXT: and a1, a4, a2
+; RV32ID-ILP32-NEXT: neg a2, a3
+; RV32ID-ILP32-NEXT: neg a3, s0
+; RV32ID-ILP32-NEXT: and a0, a3, a0
+; RV32ID-ILP32-NEXT: or a0, a2, a0
+; RV32ID-ILP32-NEXT: and a0, a4, a0
+; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32ID-ILP32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32ID-ILP32-NEXT: addi sp, sp, 16
; RV32ID-ILP32-NEXT: ret
;
; RV64ID-LP64-LABEL: fcvt_l_h_sat:
@@ -5275,21 +5270,10 @@ define half @fcvt_h_d(double %a) nounwind {
; RV64IZHINX-NEXT: addi sp, sp, 16
; RV64IZHINX-NEXT: ret
;
-; RV32IZDINXZHINX-LABEL: fcvt_h_d:
-; RV32IZDINXZHINX: # %bb.0:
-; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
-; RV32IZDINXZHINX-NEXT: sw a0, 8(sp)
-; RV32IZDINXZHINX-NEXT: sw a1, 12(sp)
-; RV32IZDINXZHINX-NEXT: lw a0, 8(sp)
-; RV32IZDINXZHINX-NEXT: lw a1, 12(sp)
-; RV32IZDINXZHINX-NEXT: fcvt.h.d a0, a0
-; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
-; RV32IZDINXZHINX-NEXT: ret
-;
-; RV64IZDINXZHINX-LABEL: fcvt_h_d:
-; RV64IZDINXZHINX: # %bb.0:
-; RV64IZDINXZHINX-NEXT: fcvt.h.d a0, a0
-; RV64IZDINXZHINX-NEXT: ret
+; CHECKIZDINXZHINX-LABEL: fcvt_h_d:
+; CHECKIZDINXZHINX: # %bb.0:
+; CHECKIZDINXZHINX-NEXT: fcvt.h.d a0, a0
+; CHECKIZDINXZHINX-NEXT: ret
;
; RV32I-LABEL: fcvt_h_d:
; RV32I: # %bb.0:
@@ -5405,13 +5389,7 @@ define half @fcvt_h_d(double %a) nounwind {
;
; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_d:
; CHECK32-IZDINXZHINXMIN: # %bb.0:
-; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16
-; CHECK32-IZDINXZHINXMIN-NEXT: sw a0, 8(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: sw a1, 12(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 8(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, 12(sp)
; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.d a0, a0
-; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZDINXZHINXMIN-NEXT: ret
;
; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_d:
@@ -5473,21 +5451,10 @@ define double @fcvt_d_h(half %a) nounwind {
; RV64IZHINX-NEXT: addi sp, sp, 16
; RV64IZHINX-NEXT: ret
;
-; RV32IZDINXZHINX-LABEL: fcvt_d_h:
-; RV32IZDINXZHINX: # %bb.0:
-; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
-; RV32IZDINXZHINX-NEXT: fcvt.d.h a0, a0
-; RV32IZDINXZHINX-NEXT: sw a0, 8(sp)
-; RV32IZDINXZHINX-NEXT: sw a1, 12(sp)
-; RV32IZDINXZHINX-NEXT: lw a0, 8(sp)
-; RV32IZDINXZHINX-NEXT: lw a1, 12(sp)
-; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
-; RV32IZDINXZHINX-NEXT: ret
-;
-; RV64IZDINXZHINX-LABEL: fcvt_d_h:
-; RV64IZDINXZHINX: # %bb.0:
-; RV64IZDINXZHINX-NEXT: fcvt.d.h a0, a0
-; RV64IZDINXZHINX-NEXT: ret
+; CHECKIZDINXZHINX-LABEL: fcvt_d_h:
+; CHECKIZDINXZHINX: # %bb.0:
+; CHECKIZDINXZHINX-NEXT: fcvt.d.h a0, a0
+; CHECKIZDINXZHINX-NEXT: ret
;
; RV32I-LABEL: fcvt_d_h:
; RV32I: # %bb.0:
@@ -5607,13 +5574,7 @@ define double @fcvt_d_h(half %a) nounwind {
;
; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_d_h:
; CHECK32-IZDINXZHINXMIN: # %bb.0:
-; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.d.h a0, a0
-; CHECK32-IZDINXZHINXMIN-NEXT: sw a0, 8(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: sw a1, 12(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 8(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, 12(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZDINXZHINXMIN-NEXT: ret
;
; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_d_h:
diff --git a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
index dd1115b..9c95210 100644
--- a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
@@ -120,16 +120,16 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZFH-NEXT: lui a2, %hi(.LCPI1_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI1_1)(a2)
; RV32IZFH-NEXT: and a0, s1, a0
-; RV32IZFH-NEXT: flt.s a4, fa5, fs0
-; RV32IZFH-NEXT: neg a2, a4
+; RV32IZFH-NEXT: flt.s a3, fa5, fs0
+; RV32IZFH-NEXT: neg a2, a3
; RV32IZFH-NEXT: or a0, a2, a0
; RV32IZFH-NEXT: feq.s a2, fs0, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: lui a5, 524288
-; RV32IZFH-NEXT: lui a3, 524288
+; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: beqz s0, .LBB1_4
; RV32IZFH-NEXT: # %bb.3:
-; RV32IZFH-NEXT: mv a3, a1
+; RV32IZFH-NEXT: mv a4, a1
; RV32IZFH-NEXT: .LBB1_4:
; RV32IZFH-NEXT: and a0, a2, a0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -137,11 +137,11 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
-; RV32IZFH-NEXT: beqz a4, .LBB1_6
+; RV32IZFH-NEXT: beqz a3, .LBB1_6
; RV32IZFH-NEXT: # %bb.5:
-; RV32IZFH-NEXT: addi a3, a5, -1
+; RV32IZFH-NEXT: addi a4, a5, -1
; RV32IZFH-NEXT: .LBB1_6:
-; RV32IZFH-NEXT: and a1, a2, a3
+; RV32IZFH-NEXT: and a1, a2, a4
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_floor_si64:
@@ -179,16 +179,16 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI1_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI1_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
-; RV32IZHINX-NEXT: flt.s a4, a2, s0
-; RV32IZHINX-NEXT: neg a2, a4
+; RV32IZHINX-NEXT: flt.s a3, a2, s0
+; RV32IZHINX-NEXT: neg a2, a3
; RV32IZHINX-NEXT: or a0, a2, a0
; RV32IZHINX-NEXT: feq.s a2, s0, s0
; RV32IZHINX-NEXT: neg a2, a2
; RV32IZHINX-NEXT: lui a5, 524288
-; RV32IZHINX-NEXT: lui a3, 524288
+; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: beqz s1, .LBB1_4
; RV32IZHINX-NEXT: # %bb.3:
-; RV32IZHINX-NEXT: mv a3, a1
+; RV32IZHINX-NEXT: mv a4, a1
; RV32IZHINX-NEXT: .LBB1_4:
; RV32IZHINX-NEXT: and a0, a2, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -196,11 +196,11 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
-; RV32IZHINX-NEXT: beqz a4, .LBB1_6
+; RV32IZHINX-NEXT: beqz a3, .LBB1_6
; RV32IZHINX-NEXT: # %bb.5:
-; RV32IZHINX-NEXT: addi a3, a5, -1
+; RV32IZHINX-NEXT: addi a4, a5, -1
; RV32IZHINX-NEXT: .LBB1_6:
-; RV32IZHINX-NEXT: and a1, a2, a3
+; RV32IZHINX-NEXT: and a1, a2, a4
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_floor_si64:
@@ -251,16 +251,16 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI1_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI1_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s1, a0
-; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0
-; RV32IZFHMIN-NEXT: neg a2, a4
+; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0
+; RV32IZFHMIN-NEXT: neg a2, a3
; RV32IZFHMIN-NEXT: or a0, a2, a0
; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: lui a5, 524288
-; RV32IZFHMIN-NEXT: lui a3, 524288
+; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB1_4
; RV32IZFHMIN-NEXT: # %bb.3:
-; RV32IZFHMIN-NEXT: mv a3, a1
+; RV32IZFHMIN-NEXT: mv a4, a1
; RV32IZFHMIN-NEXT: .LBB1_4:
; RV32IZFHMIN-NEXT: and a0, a2, a0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -268,11 +268,11 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
-; RV32IZFHMIN-NEXT: beqz a4, .LBB1_6
+; RV32IZFHMIN-NEXT: beqz a3, .LBB1_6
; RV32IZFHMIN-NEXT: # %bb.5:
-; RV32IZFHMIN-NEXT: addi a3, a5, -1
+; RV32IZFHMIN-NEXT: addi a4, a5, -1
; RV32IZFHMIN-NEXT: .LBB1_6:
-; RV32IZFHMIN-NEXT: and a1, a2, a3
+; RV32IZFHMIN-NEXT: and a1, a2, a4
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_floor_si64:
@@ -324,16 +324,16 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI1_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI1_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
-; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0
-; RV32IZHINXMIN-NEXT: neg a2, a4
+; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0
+; RV32IZHINXMIN-NEXT: neg a2, a3
; RV32IZHINXMIN-NEXT: or a0, a2, a0
; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0
; RV32IZHINXMIN-NEXT: neg a2, a2
; RV32IZHINXMIN-NEXT: lui a5, 524288
-; RV32IZHINXMIN-NEXT: lui a3, 524288
+; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: beqz s1, .LBB1_4
; RV32IZHINXMIN-NEXT: # %bb.3:
-; RV32IZHINXMIN-NEXT: mv a3, a1
+; RV32IZHINXMIN-NEXT: mv a4, a1
; RV32IZHINXMIN-NEXT: .LBB1_4:
; RV32IZHINXMIN-NEXT: and a0, a2, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -341,11 +341,11 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
-; RV32IZHINXMIN-NEXT: beqz a4, .LBB1_6
+; RV32IZHINXMIN-NEXT: beqz a3, .LBB1_6
; RV32IZHINXMIN-NEXT: # %bb.5:
-; RV32IZHINXMIN-NEXT: addi a3, a5, -1
+; RV32IZHINXMIN-NEXT: addi a4, a5, -1
; RV32IZHINXMIN-NEXT: .LBB1_6:
-; RV32IZHINXMIN-NEXT: and a1, a2, a3
+; RV32IZHINXMIN-NEXT: and a1, a2, a4
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_floor_si64:
@@ -836,16 +836,16 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZFH-NEXT: lui a2, %hi(.LCPI5_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI5_1)(a2)
; RV32IZFH-NEXT: and a0, s1, a0
-; RV32IZFH-NEXT: flt.s a4, fa5, fs0
-; RV32IZFH-NEXT: neg a2, a4
+; RV32IZFH-NEXT: flt.s a3, fa5, fs0
+; RV32IZFH-NEXT: neg a2, a3
; RV32IZFH-NEXT: or a0, a2, a0
; RV32IZFH-NEXT: feq.s a2, fs0, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: lui a5, 524288
-; RV32IZFH-NEXT: lui a3, 524288
+; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: beqz s0, .LBB5_4
; RV32IZFH-NEXT: # %bb.3:
-; RV32IZFH-NEXT: mv a3, a1
+; RV32IZFH-NEXT: mv a4, a1
; RV32IZFH-NEXT: .LBB5_4:
; RV32IZFH-NEXT: and a0, a2, a0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -853,11 +853,11 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
-; RV32IZFH-NEXT: beqz a4, .LBB5_6
+; RV32IZFH-NEXT: beqz a3, .LBB5_6
; RV32IZFH-NEXT: # %bb.5:
-; RV32IZFH-NEXT: addi a3, a5, -1
+; RV32IZFH-NEXT: addi a4, a5, -1
; RV32IZFH-NEXT: .LBB5_6:
-; RV32IZFH-NEXT: and a1, a2, a3
+; RV32IZFH-NEXT: and a1, a2, a4
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_ceil_si64:
@@ -895,16 +895,16 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI5_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI5_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
-; RV32IZHINX-NEXT: flt.s a4, a2, s0
-; RV32IZHINX-NEXT: neg a2, a4
+; RV32IZHINX-NEXT: flt.s a3, a2, s0
+; RV32IZHINX-NEXT: neg a2, a3
; RV32IZHINX-NEXT: or a0, a2, a0
; RV32IZHINX-NEXT: feq.s a2, s0, s0
; RV32IZHINX-NEXT: neg a2, a2
; RV32IZHINX-NEXT: lui a5, 524288
-; RV32IZHINX-NEXT: lui a3, 524288
+; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: beqz s1, .LBB5_4
; RV32IZHINX-NEXT: # %bb.3:
-; RV32IZHINX-NEXT: mv a3, a1
+; RV32IZHINX-NEXT: mv a4, a1
; RV32IZHINX-NEXT: .LBB5_4:
; RV32IZHINX-NEXT: and a0, a2, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -912,11 +912,11 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
-; RV32IZHINX-NEXT: beqz a4, .LBB5_6
+; RV32IZHINX-NEXT: beqz a3, .LBB5_6
; RV32IZHINX-NEXT: # %bb.5:
-; RV32IZHINX-NEXT: addi a3, a5, -1
+; RV32IZHINX-NEXT: addi a4, a5, -1
; RV32IZHINX-NEXT: .LBB5_6:
-; RV32IZHINX-NEXT: and a1, a2, a3
+; RV32IZHINX-NEXT: and a1, a2, a4
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_ceil_si64:
@@ -967,16 +967,16 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI5_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI5_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s1, a0
-; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0
-; RV32IZFHMIN-NEXT: neg a2, a4
+; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0
+; RV32IZFHMIN-NEXT: neg a2, a3
; RV32IZFHMIN-NEXT: or a0, a2, a0
; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: lui a5, 524288
-; RV32IZFHMIN-NEXT: lui a3, 524288
+; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB5_4
; RV32IZFHMIN-NEXT: # %bb.3:
-; RV32IZFHMIN-NEXT: mv a3, a1
+; RV32IZFHMIN-NEXT: mv a4, a1
; RV32IZFHMIN-NEXT: .LBB5_4:
; RV32IZFHMIN-NEXT: and a0, a2, a0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -984,11 +984,11 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
-; RV32IZFHMIN-NEXT: beqz a4, .LBB5_6
+; RV32IZFHMIN-NEXT: beqz a3, .LBB5_6
; RV32IZFHMIN-NEXT: # %bb.5:
-; RV32IZFHMIN-NEXT: addi a3, a5, -1
+; RV32IZFHMIN-NEXT: addi a4, a5, -1
; RV32IZFHMIN-NEXT: .LBB5_6:
-; RV32IZFHMIN-NEXT: and a1, a2, a3
+; RV32IZFHMIN-NEXT: and a1, a2, a4
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_ceil_si64:
@@ -1040,16 +1040,16 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI5_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI5_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
-; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0
-; RV32IZHINXMIN-NEXT: neg a2, a4
+; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0
+; RV32IZHINXMIN-NEXT: neg a2, a3
; RV32IZHINXMIN-NEXT: or a0, a2, a0
; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0
; RV32IZHINXMIN-NEXT: neg a2, a2
; RV32IZHINXMIN-NEXT: lui a5, 524288
-; RV32IZHINXMIN-NEXT: lui a3, 524288
+; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: beqz s1, .LBB5_4
; RV32IZHINXMIN-NEXT: # %bb.3:
-; RV32IZHINXMIN-NEXT: mv a3, a1
+; RV32IZHINXMIN-NEXT: mv a4, a1
; RV32IZHINXMIN-NEXT: .LBB5_4:
; RV32IZHINXMIN-NEXT: and a0, a2, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1057,11 +1057,11 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
-; RV32IZHINXMIN-NEXT: beqz a4, .LBB5_6
+; RV32IZHINXMIN-NEXT: beqz a3, .LBB5_6
; RV32IZHINXMIN-NEXT: # %bb.5:
-; RV32IZHINXMIN-NEXT: addi a3, a5, -1
+; RV32IZHINXMIN-NEXT: addi a4, a5, -1
; RV32IZHINXMIN-NEXT: .LBB5_6:
-; RV32IZHINXMIN-NEXT: and a1, a2, a3
+; RV32IZHINXMIN-NEXT: and a1, a2, a4
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_ceil_si64:
@@ -1552,16 +1552,16 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZFH-NEXT: lui a2, %hi(.LCPI9_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI9_1)(a2)
; RV32IZFH-NEXT: and a0, s1, a0
-; RV32IZFH-NEXT: flt.s a4, fa5, fs0
-; RV32IZFH-NEXT: neg a2, a4
+; RV32IZFH-NEXT: flt.s a3, fa5, fs0
+; RV32IZFH-NEXT: neg a2, a3
; RV32IZFH-NEXT: or a0, a2, a0
; RV32IZFH-NEXT: feq.s a2, fs0, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: lui a5, 524288
-; RV32IZFH-NEXT: lui a3, 524288
+; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: beqz s0, .LBB9_4
; RV32IZFH-NEXT: # %bb.3:
-; RV32IZFH-NEXT: mv a3, a1
+; RV32IZFH-NEXT: mv a4, a1
; RV32IZFH-NEXT: .LBB9_4:
; RV32IZFH-NEXT: and a0, a2, a0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1569,11 +1569,11 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
-; RV32IZFH-NEXT: beqz a4, .LBB9_6
+; RV32IZFH-NEXT: beqz a3, .LBB9_6
; RV32IZFH-NEXT: # %bb.5:
-; RV32IZFH-NEXT: addi a3, a5, -1
+; RV32IZFH-NEXT: addi a4, a5, -1
; RV32IZFH-NEXT: .LBB9_6:
-; RV32IZFH-NEXT: and a1, a2, a3
+; RV32IZFH-NEXT: and a1, a2, a4
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_trunc_si64:
@@ -1611,16 +1611,16 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI9_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI9_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
-; RV32IZHINX-NEXT: flt.s a4, a2, s0
-; RV32IZHINX-NEXT: neg a2, a4
+; RV32IZHINX-NEXT: flt.s a3, a2, s0
+; RV32IZHINX-NEXT: neg a2, a3
; RV32IZHINX-NEXT: or a0, a2, a0
; RV32IZHINX-NEXT: feq.s a2, s0, s0
; RV32IZHINX-NEXT: neg a2, a2
; RV32IZHINX-NEXT: lui a5, 524288
-; RV32IZHINX-NEXT: lui a3, 524288
+; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: beqz s1, .LBB9_4
; RV32IZHINX-NEXT: # %bb.3:
-; RV32IZHINX-NEXT: mv a3, a1
+; RV32IZHINX-NEXT: mv a4, a1
; RV32IZHINX-NEXT: .LBB9_4:
; RV32IZHINX-NEXT: and a0, a2, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1628,11 +1628,11 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
-; RV32IZHINX-NEXT: beqz a4, .LBB9_6
+; RV32IZHINX-NEXT: beqz a3, .LBB9_6
; RV32IZHINX-NEXT: # %bb.5:
-; RV32IZHINX-NEXT: addi a3, a5, -1
+; RV32IZHINX-NEXT: addi a4, a5, -1
; RV32IZHINX-NEXT: .LBB9_6:
-; RV32IZHINX-NEXT: and a1, a2, a3
+; RV32IZHINX-NEXT: and a1, a2, a4
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_trunc_si64:
@@ -1683,16 +1683,16 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI9_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI9_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s1, a0
-; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0
-; RV32IZFHMIN-NEXT: neg a2, a4
+; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0
+; RV32IZFHMIN-NEXT: neg a2, a3
; RV32IZFHMIN-NEXT: or a0, a2, a0
; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: lui a5, 524288
-; RV32IZFHMIN-NEXT: lui a3, 524288
+; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB9_4
; RV32IZFHMIN-NEXT: # %bb.3:
-; RV32IZFHMIN-NEXT: mv a3, a1
+; RV32IZFHMIN-NEXT: mv a4, a1
; RV32IZFHMIN-NEXT: .LBB9_4:
; RV32IZFHMIN-NEXT: and a0, a2, a0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1700,11 +1700,11 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
-; RV32IZFHMIN-NEXT: beqz a4, .LBB9_6
+; RV32IZFHMIN-NEXT: beqz a3, .LBB9_6
; RV32IZFHMIN-NEXT: # %bb.5:
-; RV32IZFHMIN-NEXT: addi a3, a5, -1
+; RV32IZFHMIN-NEXT: addi a4, a5, -1
; RV32IZFHMIN-NEXT: .LBB9_6:
-; RV32IZFHMIN-NEXT: and a1, a2, a3
+; RV32IZFHMIN-NEXT: and a1, a2, a4
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_trunc_si64:
@@ -1756,16 +1756,16 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI9_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI9_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
-; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0
-; RV32IZHINXMIN-NEXT: neg a2, a4
+; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0
+; RV32IZHINXMIN-NEXT: neg a2, a3
; RV32IZHINXMIN-NEXT: or a0, a2, a0
; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0
; RV32IZHINXMIN-NEXT: neg a2, a2
; RV32IZHINXMIN-NEXT: lui a5, 524288
-; RV32IZHINXMIN-NEXT: lui a3, 524288
+; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: beqz s1, .LBB9_4
; RV32IZHINXMIN-NEXT: # %bb.3:
-; RV32IZHINXMIN-NEXT: mv a3, a1
+; RV32IZHINXMIN-NEXT: mv a4, a1
; RV32IZHINXMIN-NEXT: .LBB9_4:
; RV32IZHINXMIN-NEXT: and a0, a2, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1773,11 +1773,11 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
-; RV32IZHINXMIN-NEXT: beqz a4, .LBB9_6
+; RV32IZHINXMIN-NEXT: beqz a3, .LBB9_6
; RV32IZHINXMIN-NEXT: # %bb.5:
-; RV32IZHINXMIN-NEXT: addi a3, a5, -1
+; RV32IZHINXMIN-NEXT: addi a4, a5, -1
; RV32IZHINXMIN-NEXT: .LBB9_6:
-; RV32IZHINXMIN-NEXT: and a1, a2, a3
+; RV32IZHINXMIN-NEXT: and a1, a2, a4
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_trunc_si64:
@@ -2268,16 +2268,16 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZFH-NEXT: lui a2, %hi(.LCPI13_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI13_1)(a2)
; RV32IZFH-NEXT: and a0, s1, a0
-; RV32IZFH-NEXT: flt.s a4, fa5, fs0
-; RV32IZFH-NEXT: neg a2, a4
+; RV32IZFH-NEXT: flt.s a3, fa5, fs0
+; RV32IZFH-NEXT: neg a2, a3
; RV32IZFH-NEXT: or a0, a2, a0
; RV32IZFH-NEXT: feq.s a2, fs0, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: lui a5, 524288
-; RV32IZFH-NEXT: lui a3, 524288
+; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: beqz s0, .LBB13_4
; RV32IZFH-NEXT: # %bb.3:
-; RV32IZFH-NEXT: mv a3, a1
+; RV32IZFH-NEXT: mv a4, a1
; RV32IZFH-NEXT: .LBB13_4:
; RV32IZFH-NEXT: and a0, a2, a0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -2285,11 +2285,11 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
-; RV32IZFH-NEXT: beqz a4, .LBB13_6
+; RV32IZFH-NEXT: beqz a3, .LBB13_6
; RV32IZFH-NEXT: # %bb.5:
-; RV32IZFH-NEXT: addi a3, a5, -1
+; RV32IZFH-NEXT: addi a4, a5, -1
; RV32IZFH-NEXT: .LBB13_6:
-; RV32IZFH-NEXT: and a1, a2, a3
+; RV32IZFH-NEXT: and a1, a2, a4
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_round_si64:
@@ -2327,16 +2327,16 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI13_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI13_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
-; RV32IZHINX-NEXT: flt.s a4, a2, s0
-; RV32IZHINX-NEXT: neg a2, a4
+; RV32IZHINX-NEXT: flt.s a3, a2, s0
+; RV32IZHINX-NEXT: neg a2, a3
; RV32IZHINX-NEXT: or a0, a2, a0
; RV32IZHINX-NEXT: feq.s a2, s0, s0
; RV32IZHINX-NEXT: neg a2, a2
; RV32IZHINX-NEXT: lui a5, 524288
-; RV32IZHINX-NEXT: lui a3, 524288
+; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: beqz s1, .LBB13_4
; RV32IZHINX-NEXT: # %bb.3:
-; RV32IZHINX-NEXT: mv a3, a1
+; RV32IZHINX-NEXT: mv a4, a1
; RV32IZHINX-NEXT: .LBB13_4:
; RV32IZHINX-NEXT: and a0, a2, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -2344,11 +2344,11 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
-; RV32IZHINX-NEXT: beqz a4, .LBB13_6
+; RV32IZHINX-NEXT: beqz a3, .LBB13_6
; RV32IZHINX-NEXT: # %bb.5:
-; RV32IZHINX-NEXT: addi a3, a5, -1
+; RV32IZHINX-NEXT: addi a4, a5, -1
; RV32IZHINX-NEXT: .LBB13_6:
-; RV32IZHINX-NEXT: and a1, a2, a3
+; RV32IZHINX-NEXT: and a1, a2, a4
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_round_si64:
@@ -2399,16 +2399,16 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI13_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI13_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s1, a0
-; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0
-; RV32IZFHMIN-NEXT: neg a2, a4
+; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0
+; RV32IZFHMIN-NEXT: neg a2, a3
; RV32IZFHMIN-NEXT: or a0, a2, a0
; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: lui a5, 524288
-; RV32IZFHMIN-NEXT: lui a3, 524288
+; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB13_4
; RV32IZFHMIN-NEXT: # %bb.3:
-; RV32IZFHMIN-NEXT: mv a3, a1
+; RV32IZFHMIN-NEXT: mv a4, a1
; RV32IZFHMIN-NEXT: .LBB13_4:
; RV32IZFHMIN-NEXT: and a0, a2, a0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -2416,11 +2416,11 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
-; RV32IZFHMIN-NEXT: beqz a4, .LBB13_6
+; RV32IZFHMIN-NEXT: beqz a3, .LBB13_6
; RV32IZFHMIN-NEXT: # %bb.5:
-; RV32IZFHMIN-NEXT: addi a3, a5, -1
+; RV32IZFHMIN-NEXT: addi a4, a5, -1
; RV32IZFHMIN-NEXT: .LBB13_6:
-; RV32IZFHMIN-NEXT: and a1, a2, a3
+; RV32IZFHMIN-NEXT: and a1, a2, a4
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_round_si64:
@@ -2472,16 +2472,16 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI13_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI13_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
-; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0
-; RV32IZHINXMIN-NEXT: neg a2, a4
+; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0
+; RV32IZHINXMIN-NEXT: neg a2, a3
; RV32IZHINXMIN-NEXT: or a0, a2, a0
; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0
; RV32IZHINXMIN-NEXT: neg a2, a2
; RV32IZHINXMIN-NEXT: lui a5, 524288
-; RV32IZHINXMIN-NEXT: lui a3, 524288
+; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: beqz s1, .LBB13_4
; RV32IZHINXMIN-NEXT: # %bb.3:
-; RV32IZHINXMIN-NEXT: mv a3, a1
+; RV32IZHINXMIN-NEXT: mv a4, a1
; RV32IZHINXMIN-NEXT: .LBB13_4:
; RV32IZHINXMIN-NEXT: and a0, a2, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -2489,11 +2489,11 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
-; RV32IZHINXMIN-NEXT: beqz a4, .LBB13_6
+; RV32IZHINXMIN-NEXT: beqz a3, .LBB13_6
; RV32IZHINXMIN-NEXT: # %bb.5:
-; RV32IZHINXMIN-NEXT: addi a3, a5, -1
+; RV32IZHINXMIN-NEXT: addi a4, a5, -1
; RV32IZHINXMIN-NEXT: .LBB13_6:
-; RV32IZHINXMIN-NEXT: and a1, a2, a3
+; RV32IZHINXMIN-NEXT: and a1, a2, a4
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_round_si64:
@@ -2984,16 +2984,16 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZFH-NEXT: lui a2, %hi(.LCPI17_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI17_1)(a2)
; RV32IZFH-NEXT: and a0, s1, a0
-; RV32IZFH-NEXT: flt.s a4, fa5, fs0
-; RV32IZFH-NEXT: neg a2, a4
+; RV32IZFH-NEXT: flt.s a3, fa5, fs0
+; RV32IZFH-NEXT: neg a2, a3
; RV32IZFH-NEXT: or a0, a2, a0
; RV32IZFH-NEXT: feq.s a2, fs0, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: lui a5, 524288
-; RV32IZFH-NEXT: lui a3, 524288
+; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: beqz s0, .LBB17_4
; RV32IZFH-NEXT: # %bb.3:
-; RV32IZFH-NEXT: mv a3, a1
+; RV32IZFH-NEXT: mv a4, a1
; RV32IZFH-NEXT: .LBB17_4:
; RV32IZFH-NEXT: and a0, a2, a0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3001,11 +3001,11 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
-; RV32IZFH-NEXT: beqz a4, .LBB17_6
+; RV32IZFH-NEXT: beqz a3, .LBB17_6
; RV32IZFH-NEXT: # %bb.5:
-; RV32IZFH-NEXT: addi a3, a5, -1
+; RV32IZFH-NEXT: addi a4, a5, -1
; RV32IZFH-NEXT: .LBB17_6:
-; RV32IZFH-NEXT: and a1, a2, a3
+; RV32IZFH-NEXT: and a1, a2, a4
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_roundeven_si64:
@@ -3043,16 +3043,16 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI17_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI17_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
-; RV32IZHINX-NEXT: flt.s a4, a2, s0
-; RV32IZHINX-NEXT: neg a2, a4
+; RV32IZHINX-NEXT: flt.s a3, a2, s0
+; RV32IZHINX-NEXT: neg a2, a3
; RV32IZHINX-NEXT: or a0, a2, a0
; RV32IZHINX-NEXT: feq.s a2, s0, s0
; RV32IZHINX-NEXT: neg a2, a2
; RV32IZHINX-NEXT: lui a5, 524288
-; RV32IZHINX-NEXT: lui a3, 524288
+; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: beqz s1, .LBB17_4
; RV32IZHINX-NEXT: # %bb.3:
-; RV32IZHINX-NEXT: mv a3, a1
+; RV32IZHINX-NEXT: mv a4, a1
; RV32IZHINX-NEXT: .LBB17_4:
; RV32IZHINX-NEXT: and a0, a2, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3060,11 +3060,11 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
-; RV32IZHINX-NEXT: beqz a4, .LBB17_6
+; RV32IZHINX-NEXT: beqz a3, .LBB17_6
; RV32IZHINX-NEXT: # %bb.5:
-; RV32IZHINX-NEXT: addi a3, a5, -1
+; RV32IZHINX-NEXT: addi a4, a5, -1
; RV32IZHINX-NEXT: .LBB17_6:
-; RV32IZHINX-NEXT: and a1, a2, a3
+; RV32IZHINX-NEXT: and a1, a2, a4
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_roundeven_si64:
@@ -3115,16 +3115,16 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI17_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI17_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s1, a0
-; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0
-; RV32IZFHMIN-NEXT: neg a2, a4
+; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0
+; RV32IZFHMIN-NEXT: neg a2, a3
; RV32IZFHMIN-NEXT: or a0, a2, a0
; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: lui a5, 524288
-; RV32IZFHMIN-NEXT: lui a3, 524288
+; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB17_4
; RV32IZFHMIN-NEXT: # %bb.3:
-; RV32IZFHMIN-NEXT: mv a3, a1
+; RV32IZFHMIN-NEXT: mv a4, a1
; RV32IZFHMIN-NEXT: .LBB17_4:
; RV32IZFHMIN-NEXT: and a0, a2, a0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3132,11 +3132,11 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
-; RV32IZFHMIN-NEXT: beqz a4, .LBB17_6
+; RV32IZFHMIN-NEXT: beqz a3, .LBB17_6
; RV32IZFHMIN-NEXT: # %bb.5:
-; RV32IZFHMIN-NEXT: addi a3, a5, -1
+; RV32IZFHMIN-NEXT: addi a4, a5, -1
; RV32IZFHMIN-NEXT: .LBB17_6:
-; RV32IZFHMIN-NEXT: and a1, a2, a3
+; RV32IZFHMIN-NEXT: and a1, a2, a4
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_roundeven_si64:
@@ -3188,16 +3188,16 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI17_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI17_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
-; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0
-; RV32IZHINXMIN-NEXT: neg a2, a4
+; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0
+; RV32IZHINXMIN-NEXT: neg a2, a3
; RV32IZHINXMIN-NEXT: or a0, a2, a0
; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0
; RV32IZHINXMIN-NEXT: neg a2, a2
; RV32IZHINXMIN-NEXT: lui a5, 524288
-; RV32IZHINXMIN-NEXT: lui a3, 524288
+; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: beqz s1, .LBB17_4
; RV32IZHINXMIN-NEXT: # %bb.3:
-; RV32IZHINXMIN-NEXT: mv a3, a1
+; RV32IZHINXMIN-NEXT: mv a4, a1
; RV32IZHINXMIN-NEXT: .LBB17_4:
; RV32IZHINXMIN-NEXT: and a0, a2, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3205,11 +3205,11 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
-; RV32IZHINXMIN-NEXT: beqz a4, .LBB17_6
+; RV32IZHINXMIN-NEXT: beqz a3, .LBB17_6
; RV32IZHINXMIN-NEXT: # %bb.5:
-; RV32IZHINXMIN-NEXT: addi a3, a5, -1
+; RV32IZHINXMIN-NEXT: addi a4, a5, -1
; RV32IZHINXMIN-NEXT: .LBB17_6:
-; RV32IZHINXMIN-NEXT: and a1, a2, a3
+; RV32IZHINXMIN-NEXT: and a1, a2, a4
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_roundeven_si64:
@@ -3700,16 +3700,16 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZFH-NEXT: lui a2, %hi(.LCPI21_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI21_1)(a2)
; RV32IZFH-NEXT: and a0, s1, a0
-; RV32IZFH-NEXT: flt.s a4, fa5, fs0
-; RV32IZFH-NEXT: neg a2, a4
+; RV32IZFH-NEXT: flt.s a3, fa5, fs0
+; RV32IZFH-NEXT: neg a2, a3
; RV32IZFH-NEXT: or a0, a2, a0
; RV32IZFH-NEXT: feq.s a2, fs0, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: lui a5, 524288
-; RV32IZFH-NEXT: lui a3, 524288
+; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: beqz s0, .LBB21_4
; RV32IZFH-NEXT: # %bb.3:
-; RV32IZFH-NEXT: mv a3, a1
+; RV32IZFH-NEXT: mv a4, a1
; RV32IZFH-NEXT: .LBB21_4:
; RV32IZFH-NEXT: and a0, a2, a0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3717,11 +3717,11 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
-; RV32IZFH-NEXT: beqz a4, .LBB21_6
+; RV32IZFH-NEXT: beqz a3, .LBB21_6
; RV32IZFH-NEXT: # %bb.5:
-; RV32IZFH-NEXT: addi a3, a5, -1
+; RV32IZFH-NEXT: addi a4, a5, -1
; RV32IZFH-NEXT: .LBB21_6:
-; RV32IZFH-NEXT: and a1, a2, a3
+; RV32IZFH-NEXT: and a1, a2, a4
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_rint_si64:
@@ -3759,16 +3759,16 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI21_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI21_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
-; RV32IZHINX-NEXT: flt.s a4, a2, s0
-; RV32IZHINX-NEXT: neg a2, a4
+; RV32IZHINX-NEXT: flt.s a3, a2, s0
+; RV32IZHINX-NEXT: neg a2, a3
; RV32IZHINX-NEXT: or a0, a2, a0
; RV32IZHINX-NEXT: feq.s a2, s0, s0
; RV32IZHINX-NEXT: neg a2, a2
; RV32IZHINX-NEXT: lui a5, 524288
-; RV32IZHINX-NEXT: lui a3, 524288
+; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: beqz s1, .LBB21_4
; RV32IZHINX-NEXT: # %bb.3:
-; RV32IZHINX-NEXT: mv a3, a1
+; RV32IZHINX-NEXT: mv a4, a1
; RV32IZHINX-NEXT: .LBB21_4:
; RV32IZHINX-NEXT: and a0, a2, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3776,11 +3776,11 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
-; RV32IZHINX-NEXT: beqz a4, .LBB21_6
+; RV32IZHINX-NEXT: beqz a3, .LBB21_6
; RV32IZHINX-NEXT: # %bb.5:
-; RV32IZHINX-NEXT: addi a3, a5, -1
+; RV32IZHINX-NEXT: addi a4, a5, -1
; RV32IZHINX-NEXT: .LBB21_6:
-; RV32IZHINX-NEXT: and a1, a2, a3
+; RV32IZHINX-NEXT: and a1, a2, a4
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_rint_si64:
@@ -3831,16 +3831,16 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI21_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI21_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s1, a0
-; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0
-; RV32IZFHMIN-NEXT: neg a2, a4
+; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0
+; RV32IZFHMIN-NEXT: neg a2, a3
; RV32IZFHMIN-NEXT: or a0, a2, a0
; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: lui a5, 524288
-; RV32IZFHMIN-NEXT: lui a3, 524288
+; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB21_4
; RV32IZFHMIN-NEXT: # %bb.3:
-; RV32IZFHMIN-NEXT: mv a3, a1
+; RV32IZFHMIN-NEXT: mv a4, a1
; RV32IZFHMIN-NEXT: .LBB21_4:
; RV32IZFHMIN-NEXT: and a0, a2, a0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3848,11 +3848,11 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
-; RV32IZFHMIN-NEXT: beqz a4, .LBB21_6
+; RV32IZFHMIN-NEXT: beqz a3, .LBB21_6
; RV32IZFHMIN-NEXT: # %bb.5:
-; RV32IZFHMIN-NEXT: addi a3, a5, -1
+; RV32IZFHMIN-NEXT: addi a4, a5, -1
; RV32IZFHMIN-NEXT: .LBB21_6:
-; RV32IZFHMIN-NEXT: and a1, a2, a3
+; RV32IZFHMIN-NEXT: and a1, a2, a4
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_rint_si64:
@@ -3904,16 +3904,16 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI21_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI21_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
-; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0
-; RV32IZHINXMIN-NEXT: neg a2, a4
+; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0
+; RV32IZHINXMIN-NEXT: neg a2, a3
; RV32IZHINXMIN-NEXT: or a0, a2, a0
; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0
; RV32IZHINXMIN-NEXT: neg a2, a2
; RV32IZHINXMIN-NEXT: lui a5, 524288
-; RV32IZHINXMIN-NEXT: lui a3, 524288
+; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: beqz s1, .LBB21_4
; RV32IZHINXMIN-NEXT: # %bb.3:
-; RV32IZHINXMIN-NEXT: mv a3, a1
+; RV32IZHINXMIN-NEXT: mv a4, a1
; RV32IZHINXMIN-NEXT: .LBB21_4:
; RV32IZHINXMIN-NEXT: and a0, a2, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3921,11 +3921,11 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
-; RV32IZHINXMIN-NEXT: beqz a4, .LBB21_6
+; RV32IZHINXMIN-NEXT: beqz a3, .LBB21_6
; RV32IZHINXMIN-NEXT: # %bb.5:
-; RV32IZHINXMIN-NEXT: addi a3, a5, -1
+; RV32IZHINXMIN-NEXT: addi a4, a5, -1
; RV32IZHINXMIN-NEXT: .LBB21_6:
-; RV32IZHINXMIN-NEXT: and a1, a2, a3
+; RV32IZHINXMIN-NEXT: and a1, a2, a4
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_rint_si64:
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll b/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
index 71769a8..c480ba8 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
@@ -75,24 +75,10 @@ define double @constraint_f_double_abi_name(double %a) nounwind {
define double @constraint_gpr(double %x) {
; RV32F-LABEL: constraint_gpr:
; RV32F: # %bb.0:
-; RV32F-NEXT: addi sp, sp, -32
-; RV32F-NEXT: .cfi_def_cfa_offset 32
-; RV32F-NEXT: sw a0, 8(sp)
-; RV32F-NEXT: sw a1, 12(sp)
-; RV32F-NEXT: fld fa5, 8(sp)
-; RV32F-NEXT: fsd fa5, 24(sp)
-; RV32F-NEXT: lw a0, 24(sp)
-; RV32F-NEXT: lw a1, 28(sp)
+; RV32F-NEXT: .cfi_def_cfa_offset 0
; RV32F-NEXT: #APP
; RV32F-NEXT: mv a0, a0
; RV32F-NEXT: #NO_APP
-; RV32F-NEXT: sw a1, 20(sp)
-; RV32F-NEXT: sw a0, 16(sp)
-; RV32F-NEXT: fld fa5, 16(sp)
-; RV32F-NEXT: fsd fa5, 8(sp)
-; RV32F-NEXT: lw a0, 8(sp)
-; RV32F-NEXT: lw a1, 12(sp)
-; RV32F-NEXT: addi sp, sp, 32
; RV32F-NEXT: ret
;
; RV64F-LABEL: constraint_gpr:
diff --git a/llvm/test/CodeGen/RISCV/live-sp.mir b/llvm/test/CodeGen/RISCV/live-sp.mir
index 8dd307f..fa6297a 100644
--- a/llvm/test/CodeGen/RISCV/live-sp.mir
+++ b/llvm/test/CodeGen/RISCV/live-sp.mir
@@ -44,7 +44,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 4
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/RISCV/machine-combiner.ll b/llvm/test/CodeGen/RISCV/machine-combiner.ll
index cfdefec..ebf232c 100644
--- a/llvm/test/CodeGen/RISCV/machine-combiner.ll
+++ b/llvm/test/CodeGen/RISCV/machine-combiner.ll
@@ -740,9 +740,9 @@ define i8 @test_reassoc_minu_i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3) {
; CHECK-LABEL: test_reassoc_minu_i8:
; CHECK: # %bb.0:
; CHECK-NEXT: andi a3, a3, 255
-; CHECK-NEXT: andi a2, a2, 255
; CHECK-NEXT: andi a1, a1, 255
; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: andi a2, a2, 255
; CHECK-NEXT: minu a0, a0, a1
; CHECK-NEXT: minu a1, a2, a3
; CHECK-NEXT: minu a0, a0, a1
@@ -757,9 +757,9 @@ define i16 @test_reassoc_minu_i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3) {
; CHECK-LABEL: test_reassoc_minu_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: zext.h a3, a3
-; CHECK-NEXT: zext.h a2, a2
; CHECK-NEXT: zext.h a1, a1
; CHECK-NEXT: zext.h a0, a0
+; CHECK-NEXT: zext.h a2, a2
; CHECK-NEXT: minu a0, a0, a1
; CHECK-NEXT: minu a1, a2, a3
; CHECK-NEXT: minu a0, a0, a1
@@ -774,9 +774,9 @@ define i32 @test_reassoc_minu_i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: test_reassoc_minu_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: sext.w a3, a3
-; CHECK-NEXT: sext.w a2, a2
; CHECK-NEXT: sext.w a1, a1
; CHECK-NEXT: sext.w a0, a0
+; CHECK-NEXT: sext.w a2, a2
; CHECK-NEXT: minu a0, a0, a1
; CHECK-NEXT: minu a1, a2, a3
; CHECK-NEXT: minu a0, a0, a1
@@ -804,9 +804,9 @@ define i8 @test_reassoc_min_i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3) {
; CHECK-LABEL: test_reassoc_min_i8:
; CHECK: # %bb.0:
; CHECK-NEXT: sext.b a3, a3
-; CHECK-NEXT: sext.b a2, a2
; CHECK-NEXT: sext.b a1, a1
; CHECK-NEXT: sext.b a0, a0
+; CHECK-NEXT: sext.b a2, a2
; CHECK-NEXT: min a0, a0, a1
; CHECK-NEXT: min a1, a2, a3
; CHECK-NEXT: min a0, a0, a1
@@ -821,9 +821,9 @@ define i16 @test_reassoc_min_i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3) {
; CHECK-LABEL: test_reassoc_min_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: sext.h a3, a3
-; CHECK-NEXT: sext.h a2, a2
; CHECK-NEXT: sext.h a1, a1
; CHECK-NEXT: sext.h a0, a0
+; CHECK-NEXT: sext.h a2, a2
; CHECK-NEXT: min a0, a0, a1
; CHECK-NEXT: min a1, a2, a3
; CHECK-NEXT: min a0, a0, a1
@@ -838,9 +838,9 @@ define i32 @test_reassoc_min_i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: test_reassoc_min_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: sext.w a3, a3
-; CHECK-NEXT: sext.w a2, a2
; CHECK-NEXT: sext.w a1, a1
; CHECK-NEXT: sext.w a0, a0
+; CHECK-NEXT: sext.w a2, a2
; CHECK-NEXT: min a0, a0, a1
; CHECK-NEXT: min a1, a2, a3
; CHECK-NEXT: min a0, a0, a1
@@ -868,9 +868,9 @@ define i8 @test_reassoc_maxu_i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3) {
; CHECK-LABEL: test_reassoc_maxu_i8:
; CHECK: # %bb.0:
; CHECK-NEXT: andi a3, a3, 255
-; CHECK-NEXT: andi a2, a2, 255
; CHECK-NEXT: andi a1, a1, 255
; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: andi a2, a2, 255
; CHECK-NEXT: maxu a0, a0, a1
; CHECK-NEXT: maxu a1, a2, a3
; CHECK-NEXT: maxu a0, a0, a1
@@ -885,9 +885,9 @@ define i16 @test_reassoc_maxu_i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3) {
; CHECK-LABEL: test_reassoc_maxu_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: zext.h a3, a3
-; CHECK-NEXT: zext.h a2, a2
; CHECK-NEXT: zext.h a1, a1
; CHECK-NEXT: zext.h a0, a0
+; CHECK-NEXT: zext.h a2, a2
; CHECK-NEXT: maxu a0, a0, a1
; CHECK-NEXT: maxu a1, a2, a3
; CHECK-NEXT: maxu a0, a0, a1
@@ -902,9 +902,9 @@ define i32 @test_reassoc_maxu_i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: test_reassoc_maxu_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: sext.w a3, a3
-; CHECK-NEXT: sext.w a2, a2
; CHECK-NEXT: sext.w a1, a1
; CHECK-NEXT: sext.w a0, a0
+; CHECK-NEXT: sext.w a2, a2
; CHECK-NEXT: maxu a0, a0, a1
; CHECK-NEXT: maxu a1, a2, a3
; CHECK-NEXT: maxu a0, a0, a1
@@ -932,9 +932,9 @@ define i8 @test_reassoc_max_i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3) {
; CHECK-LABEL: test_reassoc_max_i8:
; CHECK: # %bb.0:
; CHECK-NEXT: sext.b a3, a3
-; CHECK-NEXT: sext.b a2, a2
; CHECK-NEXT: sext.b a1, a1
; CHECK-NEXT: sext.b a0, a0
+; CHECK-NEXT: sext.b a2, a2
; CHECK-NEXT: max a0, a0, a1
; CHECK-NEXT: max a1, a2, a3
; CHECK-NEXT: max a0, a0, a1
@@ -949,9 +949,9 @@ define i16 @test_reassoc_max_i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3) {
; CHECK-LABEL: test_reassoc_max_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: sext.h a3, a3
-; CHECK-NEXT: sext.h a2, a2
; CHECK-NEXT: sext.h a1, a1
; CHECK-NEXT: sext.h a0, a0
+; CHECK-NEXT: sext.h a2, a2
; CHECK-NEXT: max a0, a0, a1
; CHECK-NEXT: max a1, a2, a3
; CHECK-NEXT: max a0, a0, a1
@@ -966,9 +966,9 @@ define i32 @test_reassoc_max_i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: test_reassoc_max_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: sext.w a3, a3
-; CHECK-NEXT: sext.w a2, a2
; CHECK-NEXT: sext.w a1, a1
; CHECK-NEXT: sext.w a0, a0
+; CHECK-NEXT: sext.w a2, a2
; CHECK-NEXT: max a0, a0, a1
; CHECK-NEXT: max a1, a2, a3
; CHECK-NEXT: max a0, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/make-compressible-zbc.mir b/llvm/test/CodeGen/RISCV/make-compressible-zbc.mir
new file mode 100644
index 0000000..89a6ca7a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/make-compressible-zbc.mir
@@ -0,0 +1,585 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -o - %s -mtriple=riscv32 -mattr=+zcb -simplify-mir \
+# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=CHECK %s
+# RUN: llc -o - %s -mtriple=riscv64 -mattr=+zcb -simplify-mir \
+# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=CHECK %s
+
+--- |
+ define void @store_common_value_i8(ptr %a, ptr %b, ptr %c) #0 {
+ entry:
+ store i8 0, ptr %a, align 1
+ store i8 0, ptr %b, align 1
+ store i8 0, ptr %c, align 1
+ ret void
+ }
+
+ define void @store_common_value_i16(ptr %a, ptr %b, ptr %c) #0 {
+ entry:
+ store i16 0, ptr %a, align 2
+ store i16 0, ptr %b, align 2
+ store i16 0, ptr %c, align 2
+ ret void
+ }
+
+ define void @store_common_ptr_i8(ptr %p) #0 {
+ entry:
+ store volatile i8 1, ptr %p, align 1
+ store volatile i8 3, ptr %p, align 1
+ store volatile i8 5, ptr %p, align 1
+ ret void
+ }
+
+ define void @store_common_ptr_i16(ptr %p) #0 {
+ entry:
+ store volatile i16 1, ptr %p, align 2
+ store volatile i16 3, ptr %p, align 2
+ store volatile i16 5, ptr %p, align 2
+ ret void
+ }
+
+ define void @load_common_ptr_i8(ptr %p) #0 {
+ entry:
+ %0 = load volatile i8, ptr %p, align 1
+ %a = sext i8 %0 to i32
+ %1 = load volatile i8, ptr %p, align 1
+ %2 = load volatile i8, ptr %p, align 1
+ ret void
+ }
+
+ define void @load_common_ptr_s16(ptr %p) #0 {
+ entry:
+ %0 = load volatile i16, ptr %p, align 2
+ %1 = load volatile i16, ptr %p, align 2
+ %2 = load volatile i16, ptr %p, align 2
+ ret void
+ }
+
+ define void @load_common_ptr_u16(ptr %p) #0 {
+ entry:
+ %0 = load volatile i16, ptr %p, align 2
+ %1 = load volatile i16, ptr %p, align 2
+ %2 = load volatile i16, ptr %p, align 2
+ ret void
+ }
+
+ define void @store_large_offset_i8(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i8, ptr %p, i8 100
+ store volatile i8 1, ptr %0, align 1
+ %1 = getelementptr inbounds i8, ptr %p, i8 101
+ store volatile i8 3, ptr %1, align 1
+ %2 = getelementptr inbounds i8, ptr %p, i8 102
+ store volatile i8 5, ptr %2, align 1
+ %3 = getelementptr inbounds i8, ptr %p, i8 103
+ store volatile i8 7, ptr %3, align 1
+ ret void
+ }
+
+ define void @store_large_offset_i16(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i16, ptr %p, i16 100
+ store volatile i16 1, ptr %0, align 2
+ %1 = getelementptr inbounds i16, ptr %p, i16 100
+ store volatile i16 3, ptr %1, align 2
+ %2 = getelementptr inbounds i16, ptr %p, i16 101
+ store volatile i16 3, ptr %1, align 2
+ %3 = getelementptr inbounds i16, ptr %p, i16 101
+ store volatile i16 7, ptr %3, align 2
+ ret void
+ }
+
+ define void @load_large_offset_i8(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i8, ptr %p, i8 100
+ %a = load volatile i8, ptr %0
+ %1 = getelementptr inbounds i8, ptr %p, i8 101
+ %b = load volatile i8, ptr %1
+ %2 = getelementptr inbounds i8, ptr %p, i8 102
+ %c = load volatile i8, ptr %2
+ %3 = getelementptr inbounds i8, ptr %p, i8 103
+ %d = load volatile i8, ptr %3
+ ret void
+ }
+
+ define void @load_large_offset_s16(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i16, ptr %p, i16 100
+ %a = load volatile i16, ptr %0, align 2
+ %1 = getelementptr inbounds i16, ptr %p, i16 100
+ %b = load volatile i16, ptr %1, align 2
+ %2 = getelementptr inbounds i16, ptr %p, i16 101
+ %c = load volatile i16, ptr %2, align 2
+ %3 = getelementptr inbounds i16, ptr %p, i16 101
+ %d = load volatile i16, ptr %3, align 2
+ ret void
+ }
+
+ define void @load_large_offset_u16(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i16, ptr %p, i16 100
+ %a = load volatile i16, ptr %0, align 2
+ %1 = getelementptr inbounds i16, ptr %p, i16 100
+ %b = load volatile i16, ptr %1, align 2
+ %2 = getelementptr inbounds i16, ptr %p, i16 101
+ %c = load volatile i16, ptr %2, align 2
+ %3 = getelementptr inbounds i16, ptr %p, i16 101
+ %d = load volatile i16, ptr %3, align 2
+ ret void
+ }
+ define void @store_large_offset_no_opt_i8(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i8, ptr %p, i8 100
+ store volatile i8 1, ptr %0, align 1
+ %1 = getelementptr inbounds i8, ptr %p, i8 101
+ store volatile i8 3, ptr %1, align 1
+ %2 = getelementptr inbounds i8, ptr %p, i8 104
+ store volatile i8 5, ptr %2, align 1
+ ret void
+ }
+
+ define void @store_large_offset_no_opt_i16(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i16, ptr %p, i16 100
+ %a = load volatile i16, ptr %0, align 2
+ %1 = getelementptr inbounds i16, ptr %p, i16 100
+ %b = load volatile i16, ptr %1, align 2
+ %2 = getelementptr inbounds i16, ptr %p, i16 101
+ %c = load volatile i16, ptr %2, align 2
+ %3 = getelementptr inbounds i16, ptr %p, i16 102
+ %d = load volatile i16, ptr %3, align 2
+ ret void
+ }
+
+ define void @load_large_offset_no_opt_i8(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i8, ptr %p, i8 100
+ %a = load volatile i8, ptr %0
+ %1 = getelementptr inbounds i8, ptr %p, i8 101
+ %b = load volatile i8, ptr %1
+ %2 = getelementptr inbounds i8, ptr %p, i8 103
+ %c = load volatile i8, ptr %2
+ ret void
+ }
+
+ define void @load_large_offset_no_opt_s16(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i16, ptr %p, i16 100
+ %a = load volatile i16, ptr %0, align 2
+ %1 = getelementptr inbounds i16, ptr %p, i16 101
+ %c = load volatile i16, ptr %1, align 2
+ %2 = getelementptr inbounds i16, ptr %p, i16 102
+ %d = load volatile i16, ptr %2, align 2
+ ret void
+ }
+
+ define void @load_large_offset_no_opt_u16(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i16, ptr %p, i16 100
+ %a = load volatile i16, ptr %0, align 2
+ %1 = getelementptr inbounds i16, ptr %p, i16 101
+ %c = load volatile i16, ptr %1, align 2
+ %2 = getelementptr inbounds i16, ptr %p, i16 102
+ %d = load volatile i16, ptr %2, align 2
+ ret void
+ }
+ attributes #0 = { minsize }
+
+...
+---
+name: store_common_value_i8
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11, $x12
+
+ ; CHECK-LABEL: name: store_common_value_i8
+ ; CHECK: liveins: $x10, $x11, $x12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x13 = ADDI $x0, 0
+ ; CHECK-NEXT: SB $x13, killed renamable $x10, 0 :: (store (s8) into %ir.a)
+ ; CHECK-NEXT: SB $x13, killed renamable $x11, 0 :: (store (s8) into %ir.b)
+ ; CHECK-NEXT: SB $x13, killed renamable $x12, 0 :: (store (s8) into %ir.c)
+ ; CHECK-NEXT: PseudoRET
+ SB $x0, killed renamable $x10, 0 :: (store (s8) into %ir.a)
+ SB $x0, killed renamable $x11, 0 :: (store (s8) into %ir.b)
+ SB $x0, killed renamable $x12, 0 :: (store (s8) into %ir.c)
+ PseudoRET
+
+...
+---
+name: store_common_value_i16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11, $x12
+
+ ; CHECK-LABEL: name: store_common_value_i16
+ ; CHECK: liveins: $x10, $x11, $x12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x13 = ADDI $x0, 0
+ ; CHECK-NEXT: SH $x13, killed renamable $x10, 0 :: (store (s16) into %ir.a)
+ ; CHECK-NEXT: SH $x13, killed renamable $x11, 0 :: (store (s16) into %ir.b)
+ ; CHECK-NEXT: SH $x13, killed renamable $x12, 0 :: (store (s16) into %ir.c)
+ ; CHECK-NEXT: PseudoRET
+ SH $x0, killed renamable $x10, 0 :: (store (s16) into %ir.a)
+ SH $x0, killed renamable $x11, 0 :: (store (s16) into %ir.b)
+ SH $x0, killed renamable $x12, 0 :: (store (s16) into %ir.c)
+ PseudoRET
+
+...
+---
+name: store_common_ptr_i8
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: store_common_ptr_i8
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x10 = ADDI $x0, 1
+ ; CHECK-NEXT: $x11 = ADDI $x16, 0
+ ; CHECK-NEXT: SB killed renamable $x10, $x11, 0 :: (volatile store (s8) into %ir.p)
+ ; CHECK-NEXT: renamable $x10 = ADDI $x0, 3
+ ; CHECK-NEXT: SB killed renamable $x10, $x11, 0 :: (volatile store (s8) into %ir.p)
+ ; CHECK-NEXT: renamable $x10 = ADDI $x0, 5
+ ; CHECK-NEXT: SB killed renamable $x10, killed $x11, 0 :: (volatile store (s8) into %ir.p)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x10 = ADDI $x0, 1
+ SB killed renamable $x10, renamable $x16, 0 :: (volatile store (s8) into %ir.p)
+ renamable $x10 = ADDI $x0, 3
+ SB killed renamable $x10, renamable $x16, 0 :: (volatile store (s8) into %ir.p)
+ renamable $x10 = ADDI $x0, 5
+ SB killed renamable $x10, killed renamable $x16, 0 :: (volatile store (s8) into %ir.p)
+ PseudoRET
+
+...
+---
+name: store_common_ptr_i16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: store_common_ptr_i16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x10 = ADDI $x0, 1
+ ; CHECK-NEXT: $x11 = ADDI $x16, 0
+ ; CHECK-NEXT: SH killed renamable $x10, $x11, 0 :: (volatile store (s16) into %ir.p)
+ ; CHECK-NEXT: renamable $x10 = ADDI $x0, 3
+ ; CHECK-NEXT: SH killed renamable $x10, $x11, 0 :: (volatile store (s16) into %ir.p)
+ ; CHECK-NEXT: renamable $x10 = ADDI $x0, 5
+ ; CHECK-NEXT: SH killed renamable $x10, killed $x11, 0 :: (volatile store (s16) into %ir.p)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x10 = ADDI $x0, 1
+ SH killed renamable $x10, renamable $x16, 0 :: (volatile store (s16) into %ir.p)
+ renamable $x10 = ADDI $x0, 3
+ SH killed renamable $x10, renamable $x16, 0 :: (volatile store (s16) into %ir.p)
+ renamable $x10 = ADDI $x0, 5
+ SH killed renamable $x10, killed renamable $x16, 0 :: (volatile store (s16) into %ir.p)
+ PseudoRET
+
+...
+---
+name: load_common_ptr_i8
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_common_ptr_i8
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 0
+ ; CHECK-NEXT: dead $x10 = LBU $x11, 0 :: (volatile load (s8) from %ir.p)
+ ; CHECK-NEXT: dead $x10 = LBU $x11, 0 :: (volatile load (s8) from %ir.p)
+ ; CHECK-NEXT: dead $x10 = LBU killed $x11, 0 :: (volatile load (s8) from %ir.p)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LBU renamable $x16, 0 :: (volatile load (s8) from %ir.p)
+ dead $x10 = LBU renamable $x16, 0 :: (volatile load (s8) from %ir.p)
+ dead $x10 = LBU killed renamable $x16, 0 :: (volatile load (s8) from %ir.p)
+ PseudoRET
+
+...
+---
+name: load_common_ptr_s16
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_common_ptr_s16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 0
+ ; CHECK-NEXT: dead $x10 = LH $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: dead $x10 = LH $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: dead $x10 = LH killed $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LH renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ dead $x10 = LH renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ dead $x10 = LH killed renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ PseudoRET
+
+...
+---
+name: load_common_ptr_u16
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_common_ptr_u16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 0
+ ; CHECK-NEXT: dead $x10 = LHU $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: dead $x10 = LHU $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: dead $x10 = LHU killed $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LHU renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ dead $x10 = LHU renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ dead $x10 = LHU killed renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ PseudoRET
+
+...
+---
+name: store_large_offset_i8
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: store_large_offset_i8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 1
+ ; CHECK-NEXT: $x12 = ADDI $x10, 100
+ ; CHECK-NEXT: SB killed renamable $x11, $x12, 0 :: (volatile store (s8) into %ir.0)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 3
+ ; CHECK-NEXT: SB killed renamable $x11, $x12, 1 :: (volatile store (s8) into %ir.1)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 5
+ ; CHECK-NEXT: SB killed renamable $x11, $x12, 2 :: (volatile store (s8) into %ir.2)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 7
+ ; CHECK-NEXT: SB killed renamable $x11, killed $x12, 3 :: (volatile store (s8) into %ir.3)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x11 = ADDI $x0, 1
+ SB killed renamable $x11, renamable $x10, 100 :: (volatile store (s8) into %ir.0)
+ renamable $x11 = ADDI $x0, 3
+ SB killed renamable $x11, renamable $x10, 101 :: (volatile store (s8) into %ir.1)
+ renamable $x11 = ADDI $x0, 5
+ SB killed renamable $x11, renamable $x10, 102 :: (volatile store (s8) into %ir.2)
+ renamable $x11 = ADDI $x0, 7
+ SB killed renamable $x11, killed renamable $x10, 103 :: (volatile store (s8) into %ir.3)
+ PseudoRET
+
+...
+---
+name: store_large_offset_i16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ ; CHECK-LABEL: name: store_large_offset_i16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 1
+ ; CHECK-NEXT: $x12 = ADDI $x10, 200
+ ; CHECK-NEXT: SH killed renamable $x11, $x12, 0 :: (volatile store (s16) into %ir.0)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 3
+ ; CHECK-NEXT: SH killed renamable $x11, $x12, 0 :: (volatile store (s16) into %ir.1)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 5
+ ; CHECK-NEXT: SH killed renamable $x11, $x12, 2 :: (volatile store (s16) into %ir.2)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 7
+ ; CHECK-NEXT: SH killed renamable $x11, killed $x12, 2 :: (volatile store (s16) into %ir.3)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x11 = ADDI $x0, 1
+ SH killed renamable $x11, renamable $x10, 200 :: (volatile store (s16) into %ir.0)
+ renamable $x11 = ADDI $x0, 3
+ SH killed renamable $x11, renamable $x10, 200 :: (volatile store (s16) into %ir.1)
+ renamable $x11 = ADDI $x0, 5
+ SH killed renamable $x11, renamable $x10, 202 :: (volatile store (s16) into %ir.2)
+ renamable $x11 = ADDI $x0, 7
+ SH killed renamable $x11, killed renamable $x10, 202 :: (volatile store (s16) into %ir.3)
+ PseudoRET
+
+...
+---
+name: load_large_offset_i8
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_i8
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 100
+ ; CHECK-NEXT: dead $x10 = LBU $x11, 0 :: (volatile load (s8) from %ir.0)
+ ; CHECK-NEXT: dead $x10 = LBU $x11, 1 :: (volatile load (s8) from %ir.1)
+ ; CHECK-NEXT: dead $x10 = LBU $x11, 2 :: (volatile load (s8) from %ir.2)
+ ; CHECK-NEXT: dead $x10 = LBU killed $x11, 3 :: (volatile load (s8) from %ir.3)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LBU renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ dead $x10 = LBU renamable $x16, 101 :: (volatile load (s8) from %ir.1)
+ dead $x10 = LBU renamable $x16, 102 :: (volatile load (s8) from %ir.2)
+ dead $x10 = LBU killed renamable $x16, 103 :: (volatile load (s8) from %ir.3)
+ PseudoRET
+
+...
+---
+name: load_large_offset_s16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_s16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 100
+ ; CHECK-NEXT: dead $x10 = LH $x11, 0 :: (volatile load (s16) from %ir.0)
+ ; CHECK-NEXT: dead $x10 = LH $x11, 0 :: (volatile load (s16) from %ir.1)
+ ; CHECK-NEXT: dead $x10 = LH $x11, 2 :: (volatile load (s16) from %ir.2)
+ ; CHECK-NEXT: dead $x10 = LH killed $x11, 2 :: (volatile load (s16) from %ir.3)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LH renamable $x16, 100 :: (volatile load (s16) from %ir.0)
+ dead $x10 = LH renamable $x16, 100 :: (volatile load (s16) from %ir.1)
+ dead $x10 = LH renamable $x16, 102 :: (volatile load (s16) from %ir.2)
+ dead $x10 = LH killed renamable $x16, 102 :: (volatile load (s16) from %ir.3)
+ PseudoRET
+
+...
+---
+name: load_large_offset_u16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_u16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 100
+ ; CHECK-NEXT: dead $x10 = LHU $x11, 0 :: (volatile load (s16) from %ir.0)
+ ; CHECK-NEXT: dead $x10 = LHU $x11, 0 :: (volatile load (s16) from %ir.1)
+ ; CHECK-NEXT: dead $x10 = LHU $x11, 2 :: (volatile load (s16) from %ir.2)
+ ; CHECK-NEXT: dead $x10 = LHU killed $x11, 2 :: (volatile load (s16) from %ir.3)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LHU renamable $x16, 100 :: (volatile load (s16) from %ir.0)
+ dead $x10 = LHU renamable $x16, 100 :: (volatile load (s16) from %ir.1)
+ dead $x10 = LHU renamable $x16, 102 :: (volatile load (s16) from %ir.2)
+ dead $x10 = LHU killed renamable $x16, 102 :: (volatile load (s16) from %ir.3)
+ PseudoRET
+
+...
+---
+name: store_large_offset_no_opt_i8
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: store_large_offset_no_opt_i8
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 1
+ ; CHECK-NEXT: SB killed renamable $x11, renamable $x16, 100 :: (volatile store (s8) into %ir.0)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 3
+ ; CHECK-NEXT: SB killed renamable $x11, renamable $x16, 101 :: (volatile store (s8) into %ir.1)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 5
+ ; CHECK-NEXT: SB killed renamable $x11, renamable $x16, 104 :: (volatile store (s8) into %ir.2)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x11 = ADDI $x0, 1
+ SB killed renamable $x11, renamable $x16, 100 :: (volatile store (s8) into %ir.0)
+ renamable $x11 = ADDI $x0, 3
+ SB killed renamable $x11, renamable $x16, 101 :: (volatile store (s8) into %ir.1)
+ renamable $x11 = ADDI $x0, 5
+ SB killed renamable $x11, renamable $x16, 104 :: (volatile store (s8) into %ir.2)
+ PseudoRET
+
+...
+---
+name: store_large_offset_no_opt_i16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: store_large_offset_no_opt_i16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 1
+ ; CHECK-NEXT: SH killed renamable $x11, renamable $x16, 200 :: (volatile store (s16) into %ir.0)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 3
+ ; CHECK-NEXT: SH killed renamable $x11, renamable $x16, 202 :: (volatile store (s16) into %ir.1)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 5
+ ; CHECK-NEXT: SH killed renamable $x11, renamable $x16, 204 :: (volatile store (s16) into %ir.2)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x11 = ADDI $x0, 1
+ SH killed renamable $x11, renamable $x16, 200 :: (volatile store (s16) into %ir.0)
+ renamable $x11 = ADDI $x0, 3
+ SH killed renamable $x11, renamable $x16, 202 :: (volatile store (s16) into %ir.1)
+ renamable $x11 = ADDI $x0, 5
+ SH killed renamable $x11, renamable $x16, 204 :: (volatile store (s16) into %ir.2)
+ PseudoRET
+
+...
+---
+name: load_large_offset_no_opt_i8
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_no_opt_i8
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead $x10 = LBU renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ ; CHECK-NEXT: dead $x10 = LBU renamable $x16, 101 :: (volatile load (s8) from %ir.1)
+ ; CHECK-NEXT: dead $x10 = LBU killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LBU renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ dead $x10 = LBU renamable $x16, 101 :: (volatile load (s8) from %ir.1)
+ dead $x10 = LBU killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ PseudoRET
+
+...
+---
+name: load_large_offset_no_opt_s16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_no_opt_s16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead $x10 = LH renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ ; CHECK-NEXT: dead $x10 = LH renamable $x16, 102 :: (volatile load (s8) from %ir.1)
+ ; CHECK-NEXT: dead $x10 = LH killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LH renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ dead $x10 = LH renamable $x16, 102 :: (volatile load (s8) from %ir.1)
+ dead $x10 = LH killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ PseudoRET
+
+...
+---
+name: load_large_offset_no_opt_u16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_no_opt_u16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead $x10 = LHU renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ ; CHECK-NEXT: dead $x10 = LHU renamable $x16, 102 :: (volatile load (s8) from %ir.1)
+ ; CHECK-NEXT: dead $x10 = LHU killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LHU renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ dead $x10 = LHU renamable $x16, 102 :: (volatile load (s8) from %ir.1)
+ dead $x10 = LHU killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ PseudoRET
+
+...
diff --git a/llvm/test/CodeGen/RISCV/misched-postra-direction.mir b/llvm/test/CodeGen/RISCV/misched-postra-direction.mir
index 841d0e6..2cca042 100644
--- a/llvm/test/CodeGen/RISCV/misched-postra-direction.mir
+++ b/llvm/test/CodeGen/RISCV/misched-postra-direction.mir
@@ -1,5 +1,15 @@
-# RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -run-pass=postmisched -enable-post-misched -debug-only=machine-scheduler -misched-dump-schedule-trace -misched-postra-direction=topdown -o - %s 2>&1 | FileCheck --check-prefix=TOPDOWN %s
-# RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -run-pass=postmisched -enable-post-misched -debug-only=machine-scheduler -misched-dump-schedule-trace -misched-postra-direction=bottomup -o - %s 2>&1 | FileCheck --check-prefix=BOTTOMUP %s
+# RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -run-pass=postmisched \
+# RUN: -enable-post-misched -debug-only=machine-scheduler \
+# RUN: -misched-dump-schedule-trace -misched-postra-direction=topdown \
+# RUN: -o - %s 2>&1 | FileCheck --check-prefix=TOPDOWN %s
+# RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -run-pass=postmisched \
+# RUN: -enable-post-misched -debug-only=machine-scheduler \
+# RUN: -misched-dump-schedule-trace -misched-postra-direction=bottomup \
+# RUN: -o - %s 2>&1 | FileCheck --check-prefix=BOTTOMUP %s
+# RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -run-pass=postmisched \
+# RUN: -enable-post-misched -debug-only=machine-scheduler \
+# RUN: -misched-dump-schedule-trace -misched-postra-direction=bidirectional \
+# RUN: -o - %s 2>&1 | FileCheck --check-prefix=BIDIRECTIONAL %s
# REQUIRES: asserts
@@ -51,3 +61,9 @@ body: |
# BOTTOMUP-NEXT: SU(1): renamable $x13 = ADD renamable $x11, renamable $x10
# BOTTOMUP-NEXT: SU(0): renamable $x12 = MUL renamable $x11, renamable $x10
# BOTTOMUP-NEXT: SU(2): renamable $x14 = DIVW renamable $x12, renamable $x13
+
+# BIDIRECTIONAL: *** Final schedule for %bb.0 ***
+# BIDIRECTIONAL-NEXT: * Schedule table (Bidirectional): not implemented
+# BIDIRECTIONAL-NEXT: SU(1): renamable $x13 = ADD renamable $x11, renamable $x10
+# BIDIRECTIONAL-NEXT: SU(0): renamable $x12 = MUL renamable $x11, renamable $x10
+# BIDIRECTIONAL-NEXT: SU(2): renamable $x14 = DIVW renamable $x12, renamable $x13
diff --git a/llvm/test/CodeGen/RISCV/module-elf-flags.ll b/llvm/test/CodeGen/RISCV/module-elf-flags.ll
new file mode 100644
index 0000000..1b4bc9f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/module-elf-flags.ll
@@ -0,0 +1,13 @@
+; RUN: llc -mtriple=riscv32 -filetype=obj < %s | llvm-readelf -h - | FileCheck -check-prefixes=FLAGS %s
+
+; FLAGS: Flags: 0x11, RVC, TSO
+
+define i32 @addi(i32 %a) {
+ %1 = add i32 %a, 1
+ ret i32 %1
+}
+
+!llvm.module.flags = !{!0}
+
+!0 = !{i32 6, !"riscv-isa", !1}
+!1 = !{!"rv64i2p1_c2p0_ztso0p1"}
diff --git a/llvm/test/CodeGen/RISCV/pr64645.ll b/llvm/test/CodeGen/RISCV/pr64645.ll
index 44dce5a..f6d4651 100644
--- a/llvm/test/CodeGen/RISCV/pr64645.ll
+++ b/llvm/test/CodeGen/RISCV/pr64645.ll
@@ -5,34 +5,8 @@
define <2 x double> @v2f64(<2 x double> %x, <2 x double> %y) nounwind {
; CHECK-LABEL: v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a4, 8(sp)
-; CHECK-NEXT: sw a5, 12(sp)
-; CHECK-NEXT: lw a4, 8(sp)
-; CHECK-NEXT: lw a5, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: lw a0, 8(sp)
-; CHECK-NEXT: lw a1, 12(sp)
-; CHECK-NEXT: sw a6, 8(sp)
-; CHECK-NEXT: sw a7, 12(sp)
-; CHECK-NEXT: lw a6, 8(sp)
-; CHECK-NEXT: lw a7, 12(sp)
-; CHECK-NEXT: sw a2, 8(sp)
-; CHECK-NEXT: sw a3, 12(sp)
-; CHECK-NEXT: lw a2, 8(sp)
-; CHECK-NEXT: lw a3, 12(sp)
; CHECK-NEXT: fadd.d a2, a2, a6
; CHECK-NEXT: fadd.d a0, a0, a4
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: lw a0, 8(sp)
-; CHECK-NEXT: lw a1, 12(sp)
-; CHECK-NEXT: sw a2, 8(sp)
-; CHECK-NEXT: sw a3, 12(sp)
-; CHECK-NEXT: lw a2, 8(sp)
-; CHECK-NEXT: lw a3, 12(sp)
-; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%1 = fadd <2 x double> %x, %y
ret <2 x double> %1
diff --git a/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll
index 3731b97..b45ab13 100644
--- a/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll
@@ -11,8 +11,6 @@ define i32 @ctlz_i32(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: beqz a0, .LBB0_2
; RV32I-NEXT: # %bb.1: # %cond.false
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
@@ -40,12 +38,11 @@ define i32 @ctlz_i32(i32 %a) nounwind {
; RV32I-NEXT: lui a1, 61681
; RV32I-NEXT: addi a1, a1, -241
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB0_2:
; RV32I-NEXT: li a0, 32
@@ -64,19 +61,16 @@ declare i64 @llvm.ctlz.i64(i64, i1)
define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-LABEL: ctlz_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: or a0, a1, a0
+; RV32I-NEXT: lui a2, 349525
+; RV32I-NEXT: addi a4, a2, 1365
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a3, a2, 819
+; RV32I-NEXT: lui a2, 61681
+; RV32I-NEXT: addi a2, a2, -241
+; RV32I-NEXT: bnez a1, .LBB1_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: srli a1, a0, 1
+; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -87,28 +81,26 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s4, a2, 1365
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s5, a1, 819
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s6, a1, -241
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s3, a1, 257
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: srli a0, s2, 1
-; RV32I-NEXT: or a0, s2, a0
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a0, a0, 32
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB1_2:
+; RV32I-NEXT: srli a0, a1, 1
+; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -119,35 +111,21 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: bnez s0, .LBB1_2
-; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi a0, a0, 32
-; RV32I-NEXT: j .LBB1_3
-; RV32I-NEXT: .LBB1_2:
-; RV32I-NEXT: srli a0, s1, 24
-; RV32I-NEXT: .LBB1_3:
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32XTHEADBB-LABEL: ctlz_i64:
diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll
index 36c1070..7e6c3f9 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll
@@ -11,8 +11,6 @@ define i32 @ctlz_i32(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: beqz a0, .LBB0_2
; RV32I-NEXT: # %bb.1: # %cond.false
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
@@ -40,12 +38,11 @@ define i32 @ctlz_i32(i32 %a) nounwind {
; RV32I-NEXT: lui a1, 61681
; RV32I-NEXT: addi a1, a1, -241
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB0_2:
; RV32I-NEXT: li a0, 32
@@ -64,19 +61,16 @@ declare i64 @llvm.ctlz.i64(i64, i1)
define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-LABEL: ctlz_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: or a0, a1, a0
+; RV32I-NEXT: lui a2, 349525
+; RV32I-NEXT: addi a4, a2, 1365
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a3, a2, 819
+; RV32I-NEXT: lui a2, 61681
+; RV32I-NEXT: addi a2, a2, -241
+; RV32I-NEXT: bnez a1, .LBB1_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: srli a1, a0, 1
+; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -87,28 +81,26 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s4, a2, 1365
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s5, a1, 819
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s6, a1, -241
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s3, a1, 257
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: srli a0, s2, 1
-; RV32I-NEXT: or a0, s2, a0
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a0, a0, 32
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB1_2:
+; RV32I-NEXT: srli a0, a1, 1
+; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -119,35 +111,21 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: bnez s0, .LBB1_2
-; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi a0, a0, 32
-; RV32I-NEXT: j .LBB1_3
-; RV32I-NEXT: .LBB1_2:
-; RV32I-NEXT: srli a0, s1, 24
-; RV32I-NEXT: .LBB1_3:
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: ctlz_i64:
@@ -275,8 +253,6 @@ declare i32 @llvm.ctpop.i32(i32)
define i32 @ctpop_i32(i32 %a) nounwind {
; RV32I-LABEL: ctpop_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: lui a2, 349525
; RV32I-NEXT: addi a2, a2, 1365
@@ -293,12 +269,11 @@ define i32 @ctpop_i32(i32 %a) nounwind {
; RV32I-NEXT: lui a1, 61681
; RV32I-NEXT: addi a1, a1, -241
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: ctpop_i32:
@@ -390,58 +365,42 @@ declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>)
define <2 x i32> @ctpop_v2i32(<2 x i32> %a) nounwind {
; RV32I-LABEL: ctpop_v2i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s3, a2, 1365
-; RV32I-NEXT: and a1, a1, s3
-; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s4, a1, 819
-; RV32I-NEXT: and a1, a0, s4
+; RV32I-NEXT: srli a2, a0, 1
+; RV32I-NEXT: lui a3, 349525
+; RV32I-NEXT: addi a3, a3, 1365
+; RV32I-NEXT: and a2, a2, a3
+; RV32I-NEXT: sub a0, a0, a2
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a2, a2, 819
+; RV32I-NEXT: and a4, a0, a2
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s5, a1, -241
-; RV32I-NEXT: and a0, a0, s5
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s1, a1, 257
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli s2, a0, 24
-; RV32I-NEXT: srli a0, s0, 1
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: sub s0, s0, a0
-; RV32I-NEXT: and a0, s0, s4
-; RV32I-NEXT: srli s0, s0, 2
-; RV32I-NEXT: and a1, s0, s4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s5
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli a1, a0, 24
-; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: add a0, a4, a0
+; RV32I-NEXT: srli a4, a0, 4
+; RV32I-NEXT: add a0, a0, a4
+; RV32I-NEXT: lui a4, 61681
+; RV32I-NEXT: addi a4, a4, -241
+; RV32I-NEXT: and a0, a0, a4
+; RV32I-NEXT: slli a5, a0, 8
+; RV32I-NEXT: add a0, a0, a5
+; RV32I-NEXT: slli a5, a0, 16
+; RV32I-NEXT: add a0, a0, a5
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: srli a5, a1, 1
+; RV32I-NEXT: and a3, a5, a3
+; RV32I-NEXT: sub a1, a1, a3
+; RV32I-NEXT: and a3, a1, a2
+; RV32I-NEXT: srli a1, a1, 2
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: add a1, a3, a1
+; RV32I-NEXT: srli a2, a1, 4
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: and a1, a1, a4
+; RV32I-NEXT: slli a2, a1, 8
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: slli a2, a1, 16
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: srli a1, a1, 24
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: ctpop_v2i32:
@@ -558,59 +517,44 @@ declare i64 @llvm.ctpop.i64(i64)
define i64 @ctpop_i64(i64 %a) nounwind {
; RV32I-LABEL: ctpop_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s2, a2, 1365
-; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: sub a1, a1, a0
-; RV32I-NEXT: lui a0, 209715
-; RV32I-NEXT: addi s3, a0, 819
-; RV32I-NEXT: and a0, a1, s3
+; RV32I-NEXT: srli a2, a1, 1
+; RV32I-NEXT: lui a3, 349525
+; RV32I-NEXT: addi a3, a3, 1365
+; RV32I-NEXT: and a2, a2, a3
+; RV32I-NEXT: sub a1, a1, a2
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a2, a2, 819
+; RV32I-NEXT: and a4, a1, a2
; RV32I-NEXT: srli a1, a1, 2
-; RV32I-NEXT: and a1, a1, s3
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s4, a1, -241
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s1, a1, 257
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli s5, a0, 24
-; RV32I-NEXT: srli a0, s0, 1
-; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: sub s0, s0, a0
-; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: srli s0, s0, 2
-; RV32I-NEXT: and a1, s0, s3
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: add a1, a4, a1
+; RV32I-NEXT: srli a4, a1, 4
+; RV32I-NEXT: add a1, a1, a4
+; RV32I-NEXT: lui a4, 61681
+; RV32I-NEXT: addi a4, a4, -241
+; RV32I-NEXT: and a1, a1, a4
+; RV32I-NEXT: slli a5, a1, 8
+; RV32I-NEXT: add a1, a1, a5
+; RV32I-NEXT: slli a5, a1, 16
+; RV32I-NEXT: add a1, a1, a5
+; RV32I-NEXT: srli a1, a1, 24
+; RV32I-NEXT: srli a5, a0, 1
+; RV32I-NEXT: and a3, a5, a3
+; RV32I-NEXT: sub a0, a0, a3
+; RV32I-NEXT: and a3, a0, a2
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: add a0, a3, a0
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: and a0, a0, a4
+; RV32I-NEXT: slli a2, a0, 8
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 16
+; RV32I-NEXT: add a0, a0, a2
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: add a0, a0, s5
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: ctpop_i64:
@@ -738,99 +682,82 @@ declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind {
; RV32I-LABEL: ctpop_v2i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -48
-; RV32I-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 32(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s7, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s8, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: lw a0, 4(a1)
-; RV32I-NEXT: lw s2, 8(a1)
-; RV32I-NEXT: lw s5, 12(a1)
-; RV32I-NEXT: lw s6, 0(a1)
-; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s3, a2, 1365
-; RV32I-NEXT: and a1, a1, s3
-; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s4, a1, 819
-; RV32I-NEXT: and a1, a0, s4
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s7, a1, -241
-; RV32I-NEXT: and a0, a0, s7
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s1, a1, 257
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli s8, a0, 24
-; RV32I-NEXT: srli a0, s6, 1
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: sub a0, s6, a0
-; RV32I-NEXT: and a1, a0, s4
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s7
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: add s8, a0, s8
-; RV32I-NEXT: srli a0, s5, 1
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: sub a0, s5, a0
-; RV32I-NEXT: and a1, a0, s4
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s7
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli s5, a0, 24
-; RV32I-NEXT: srli a0, s2, 1
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: sub a0, s2, a0
-; RV32I-NEXT: and a1, a0, s4
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s7
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: add a0, a0, s5
-; RV32I-NEXT: sw zero, 12(s0)
-; RV32I-NEXT: sw zero, 4(s0)
-; RV32I-NEXT: sw a0, 8(s0)
-; RV32I-NEXT: sw s8, 0(s0)
-; RV32I-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 32(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s7, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s8, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 48
+; RV32I-NEXT: lw a3, 4(a1)
+; RV32I-NEXT: lw a2, 8(a1)
+; RV32I-NEXT: lw a4, 12(a1)
+; RV32I-NEXT: lw a1, 0(a1)
+; RV32I-NEXT: srli a5, a3, 1
+; RV32I-NEXT: lui a6, 349525
+; RV32I-NEXT: addi a6, a6, 1365
+; RV32I-NEXT: and a5, a5, a6
+; RV32I-NEXT: sub a3, a3, a5
+; RV32I-NEXT: lui a5, 209715
+; RV32I-NEXT: addi a5, a5, 819
+; RV32I-NEXT: and a7, a3, a5
+; RV32I-NEXT: srli a3, a3, 2
+; RV32I-NEXT: and a3, a3, a5
+; RV32I-NEXT: add a3, a7, a3
+; RV32I-NEXT: srli a7, a3, 4
+; RV32I-NEXT: add a3, a3, a7
+; RV32I-NEXT: lui a7, 61681
+; RV32I-NEXT: addi a7, a7, -241
+; RV32I-NEXT: and a3, a3, a7
+; RV32I-NEXT: slli t0, a3, 8
+; RV32I-NEXT: add a3, a3, t0
+; RV32I-NEXT: slli t0, a3, 16
+; RV32I-NEXT: add a3, a3, t0
+; RV32I-NEXT: srli a3, a3, 24
+; RV32I-NEXT: srli t0, a1, 1
+; RV32I-NEXT: and t0, t0, a6
+; RV32I-NEXT: sub a1, a1, t0
+; RV32I-NEXT: and t0, a1, a5
+; RV32I-NEXT: srli a1, a1, 2
+; RV32I-NEXT: and a1, a1, a5
+; RV32I-NEXT: add a1, t0, a1
+; RV32I-NEXT: srli t0, a1, 4
+; RV32I-NEXT: add a1, a1, t0
+; RV32I-NEXT: and a1, a1, a7
+; RV32I-NEXT: slli t0, a1, 8
+; RV32I-NEXT: add a1, a1, t0
+; RV32I-NEXT: slli t0, a1, 16
+; RV32I-NEXT: add a1, a1, t0
+; RV32I-NEXT: srli a1, a1, 24
+; RV32I-NEXT: add a1, a1, a3
+; RV32I-NEXT: srli a3, a4, 1
+; RV32I-NEXT: and a3, a3, a6
+; RV32I-NEXT: sub a4, a4, a3
+; RV32I-NEXT: and a3, a4, a5
+; RV32I-NEXT: srli a4, a4, 2
+; RV32I-NEXT: and a4, a4, a5
+; RV32I-NEXT: add a3, a3, a4
+; RV32I-NEXT: srli a4, a3, 4
+; RV32I-NEXT: add a3, a3, a4
+; RV32I-NEXT: and a3, a3, a7
+; RV32I-NEXT: slli a4, a3, 8
+; RV32I-NEXT: add a3, a3, a4
+; RV32I-NEXT: slli a4, a3, 16
+; RV32I-NEXT: add a3, a3, a4
+; RV32I-NEXT: srli a3, a3, 24
+; RV32I-NEXT: srli a4, a2, 1
+; RV32I-NEXT: and a4, a4, a6
+; RV32I-NEXT: sub a2, a2, a4
+; RV32I-NEXT: and a4, a2, a5
+; RV32I-NEXT: srli a2, a2, 2
+; RV32I-NEXT: and a2, a2, a5
+; RV32I-NEXT: add a2, a4, a2
+; RV32I-NEXT: srli a4, a2, 4
+; RV32I-NEXT: add a2, a2, a4
+; RV32I-NEXT: and a2, a2, a7
+; RV32I-NEXT: slli a4, a2, 8
+; RV32I-NEXT: add a2, a2, a4
+; RV32I-NEXT: slli a4, a2, 16
+; RV32I-NEXT: add a2, a2, a4
+; RV32I-NEXT: srli a2, a2, 24
+; RV32I-NEXT: add a2, a2, a3
+; RV32I-NEXT: sw zero, 12(a0)
+; RV32I-NEXT: sw zero, 4(a0)
+; RV32I-NEXT: sw a2, 8(a0)
+; RV32I-NEXT: sw a1, 0(a0)
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: ctpop_v2i64:
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll
index 73bfc64..acd63f2 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll
@@ -317,8 +317,6 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB5_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -354,14 +352,13 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB5_2:
; RV64I-NEXT: li a0, 64
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll
index 7feef4d..b0e447b 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll
@@ -307,8 +307,6 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB5_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -344,14 +342,13 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB5_2:
; RV64I-NEXT: li a0, 64
@@ -623,8 +620,6 @@ declare i64 @llvm.ctpop.i64(i64)
define i64 @ctpop_i64(i64 %a) nounwind {
; RV64I-LABEL: ctpop_i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: addiw a2, a2, 1365
@@ -647,14 +642,13 @@ define i64 @ctpop_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctpop_i64:
diff --git a/llvm/test/CodeGen/RISCV/rv64-typepromotion.ll b/llvm/test/CodeGen/RISCV/rv64-typepromotion.ll
new file mode 100644
index 0000000..23eae33
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-typepromotion.ll
@@ -0,0 +1,27 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -mtriple=riscv64 -passes=typepromotion -S %s | FileCheck %s
+
+; Test that this does not crash
+define i16 @test(i8 %a, i32 %b) {
+; CHECK-LABEL: define i16 @test(
+; CHECK-SAME: i8 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[A]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[B]] to i16
+; CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[TMP1]] to i64
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[TMP2]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = and i32 [[TMP0]], 255
+; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
+; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], [[TMP2]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP6]] to i16
+; CHECK-NEXT: ret i16 [[TMP7]]
+;
+entry:
+ %0 = zext i8 %a to i32
+ %1 = trunc i32 %b to i16
+ %2 = icmp eq i16 %1, 0
+ %3 = trunc i32 %0 to i8
+ %4 = zext i8 %3 to i16
+ %5 = xor i16 %4, %1
+ ret i16 %5
+}
diff --git a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll
index 1f62ea9f..6cdab88 100644
--- a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll
@@ -11,8 +11,6 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB0_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -38,14 +36,13 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB0_2:
; RV64I-NEXT: li a0, 32
@@ -66,8 +63,6 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB1_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -93,14 +88,13 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: j .LBB1_3
; RV64I-NEXT: .LBB1_2:
; RV64I-NEXT: li a0, 32
@@ -125,50 +119,45 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
; RV64I-LABEL: log2_ceil_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: addiw a0, a0, -1
-; RV64I-NEXT: li s0, 32
-; RV64I-NEXT: li a1, 32
-; RV64I-NEXT: beqz a0, .LBB2_2
+; RV64I-NEXT: addiw a1, a0, -1
+; RV64I-NEXT: li a0, 32
+; RV64I-NEXT: li a2, 32
+; RV64I-NEXT: beqz a1, .LBB2_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
+; RV64I-NEXT: srliw a2, a1, 1
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 2
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 4
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 8
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 16
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: not a1, a1
+; RV64I-NEXT: srli a2, a1, 1
+; RV64I-NEXT: lui a3, 349525
+; RV64I-NEXT: addiw a3, a3, 1365
+; RV64I-NEXT: and a2, a2, a3
+; RV64I-NEXT: sub a1, a1, a2
+; RV64I-NEXT: lui a2, 209715
+; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: and a3, a1, a2
+; RV64I-NEXT: srli a1, a1, 2
; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a1, a0, 24
+; RV64I-NEXT: add a1, a3, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: lui a2, 61681
+; RV64I-NEXT: addi a2, a2, -241
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 8
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 16
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 24
; RV64I-NEXT: .LBB2_2: # %cond.end
-; RV64I-NEXT: sub a0, s0, a1
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
;
; RV64XTHEADBB-LABEL: log2_ceil_i32:
@@ -189,48 +178,42 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
; RV64I-LABEL: findLastSet_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: srliw a0, a0, 1
-; RV64I-NEXT: or a0, s0, a0
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
+; RV64I-NEXT: srliw a1, a0, 1
+; RV64I-NEXT: or a1, a0, a1
+; RV64I-NEXT: srliw a2, a1, 2
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 4
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 8
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 16
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: not a1, a1
+; RV64I-NEXT: srli a2, a1, 1
+; RV64I-NEXT: lui a3, 349525
+; RV64I-NEXT: addiw a3, a3, 1365
+; RV64I-NEXT: and a2, a2, a3
+; RV64I-NEXT: sub a1, a1, a2
+; RV64I-NEXT: lui a2, 209715
+; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: and a3, a1, a2
+; RV64I-NEXT: srli a1, a1, 2
; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: xori a0, a0, 31
-; RV64I-NEXT: snez a1, s0
-; RV64I-NEXT: addi a1, a1, -1
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: add a1, a3, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: lui a2, 61681
+; RV64I-NEXT: addi a2, a2, -241
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 8
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 16
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: srliw a1, a1, 24
+; RV64I-NEXT: xori a1, a1, 31
+; RV64I-NEXT: snez a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBB-LABEL: findLastSet_i32:
@@ -256,10 +239,6 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: srliw a0, a0, 1
; RV64I-NEXT: beqz a0, .LBB4_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: .cfi_def_cfa_offset 16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: .cfi_offset ra, -8
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -285,14 +264,13 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB4_2:
; RV64I-NEXT: li a0, 32
@@ -317,8 +295,6 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB5_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -354,14 +330,13 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB5_2:
; RV64I-NEXT: li a0, 64
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index f810f51..c81c6ae 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -209,6 +209,24 @@ define i64 @sh1adduw_2(i64 %0, i64 %1) {
ret i64 %5
}
+define i64 @sh1adduw_3(i64 %0, i64 %1) {
+; RV64I-LABEL: sh1adduw_3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 31
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh1adduw_3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh1add.uw a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %3 = shl i64 %0, 1
+ %4 = and i64 %3, 8589934590
+ %5 = or disjoint i64 %4, %1
+ ret i64 %5
+}
+
define signext i32 @sh2adduw(i32 signext %0, ptr %1) {
; RV64I-LABEL: sh2adduw:
; RV64I: # %bb.0:
@@ -247,6 +265,24 @@ define i64 @sh2adduw_2(i64 %0, i64 %1) {
ret i64 %5
}
+define i64 @sh2adduw_3(i64 %0, i64 %1) {
+; RV64I-LABEL: sh2adduw_3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 30
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh2adduw_3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh2add.uw a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %3 = shl i64 %0, 2
+ %4 = and i64 %3, 17179869180
+ %5 = or disjoint i64 %4, %1
+ ret i64 %5
+}
+
define i64 @sh3adduw(i32 signext %0, ptr %1) {
; RV64I-LABEL: sh3adduw:
; RV64I: # %bb.0:
@@ -285,6 +321,24 @@ define i64 @sh3adduw_2(i64 %0, i64 %1) {
ret i64 %5
}
+define i64 @sh3adduw_3(i64 %0, i64 %1) {
+; RV64I-LABEL: sh3adduw_3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 29
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh3adduw_3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh3add.uw a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %3 = shl i64 %0, 3
+ %4 = and i64 %3, 34359738360
+ %5 = or disjoint i64 %4, %1
+ ret i64 %5
+}
+
; Type legalization inserts a sext_inreg after the first add. That add will be
; selected as sh2add which does not sign extend. SimplifyDemandedBits is unable
; to remove the sext_inreg because it has multiple uses. The ashr will use the
@@ -335,6 +389,24 @@ define i64 @addmul6(i64 %a, i64 %b) {
ret i64 %d
}
+define i64 @disjointormul6(i64 %a, i64 %b) {
+; RV64I-LABEL: disjointormul6:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a2, 6
+; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: disjointormul6:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh1add a0, a0, a0
+; RV64ZBA-NEXT: sh1add a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %c = mul i64 %a, 6
+ %d = or disjoint i64 %c, %b
+ ret i64 %d
+}
+
define i64 @addmul10(i64 %a, i64 %b) {
; RV64I-LABEL: addmul10:
; RV64I: # %bb.0:
@@ -1099,6 +1171,23 @@ define i64 @add4104(i64 %a) {
ret i64 %c
}
+define i64 @add4104_2(i64 %a) {
+; RV64I-LABEL: add4104_2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lui a1, 1
+; RV64I-NEXT: addiw a1, a1, 8
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: add4104_2:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: li a1, 1026
+; RV64ZBA-NEXT: sh2add a0, a1, a0
+; RV64ZBA-NEXT: ret
+ %c = or disjoint i64 %a, 4104
+ ret i64 %c
+}
+
define i64 @add8208(i64 %a) {
; RV64I-LABEL: add8208:
; RV64I: # %bb.0:
@@ -1282,6 +1371,96 @@ define zeroext i32 @sext_ashr_zext_i8(i8 %a) nounwind {
ret i32 %1
}
+define i64 @sh6_sh3_add1(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
+; RV64I-LABEL: sh6_sh3_add1:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh6_sh3_add1:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: sh3add a1, a1, a2
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: ret
+entry:
+ %shl = shl i64 %z, 3
+ %shl1 = shl i64 %y, 6
+ %add = add nsw i64 %shl1, %shl
+ %add2 = add nsw i64 %add, %x
+ ret i64 %add2
+}
+
+define i64 @sh6_sh3_add2(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
+; RV64I-LABEL: sh6_sh3_add2:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh6_sh3_add2:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: slli a1, a1, 6
+; RV64ZBA-NEXT: add a0, a1, a0
+; RV64ZBA-NEXT: sh3add a0, a2, a0
+; RV64ZBA-NEXT: ret
+entry:
+ %shl = shl i64 %z, 3
+ %shl1 = shl i64 %y, 6
+ %add = add nsw i64 %shl1, %x
+ %add2 = add nsw i64 %add, %shl
+ ret i64 %add2
+}
+
+define i64 @sh6_sh3_add3(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
+; RV64I-LABEL: sh6_sh3_add3:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh6_sh3_add3:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: sh3add a1, a1, a2
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: ret
+entry:
+ %shl = shl i64 %z, 3
+ %shl1 = shl i64 %y, 6
+ %add = add nsw i64 %shl1, %shl
+ %add2 = add nsw i64 %x, %add
+ ret i64 %add2
+}
+
+define i64 @sh6_sh3_add4(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
+; RV64I-LABEL: sh6_sh3_add4:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh6_sh3_add4:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: slli a1, a1, 6
+; RV64ZBA-NEXT: sh3add a0, a2, a0
+; RV64ZBA-NEXT: add a0, a0, a1
+; RV64ZBA-NEXT: ret
+entry:
+ %shl = shl i64 %z, 3
+ %shl1 = shl i64 %y, 6
+ %add = add nsw i64 %x, %shl
+ %add2 = add nsw i64 %add, %shl1
+ ret i64 %add2
+}
+
; Make sure we use sext.h+slli+srli for Zba+Zbb.
; FIXME: The RV64I and Zba only cases can be done with only 3 shifts.
define zeroext i32 @sext_ashr_zext_i16(i16 %a) nounwind {
diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll
index 2269d8d..4d5ef5d 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll
@@ -11,8 +11,6 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB0_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -38,14 +36,13 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB0_2:
; RV64I-NEXT: li a0, 32
@@ -64,8 +61,6 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB1_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -91,14 +86,13 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: j .LBB1_3
; RV64I-NEXT: .LBB1_2:
; RV64I-NEXT: li a0, 32
@@ -121,50 +115,45 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
; RV64I-LABEL: log2_ceil_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: addiw a0, a0, -1
-; RV64I-NEXT: li s0, 32
-; RV64I-NEXT: li a1, 32
-; RV64I-NEXT: beqz a0, .LBB2_2
+; RV64I-NEXT: addiw a1, a0, -1
+; RV64I-NEXT: li a0, 32
+; RV64I-NEXT: li a2, 32
+; RV64I-NEXT: beqz a1, .LBB2_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
+; RV64I-NEXT: srliw a2, a1, 1
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 2
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 4
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 8
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 16
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: not a1, a1
+; RV64I-NEXT: srli a2, a1, 1
+; RV64I-NEXT: lui a3, 349525
+; RV64I-NEXT: addiw a3, a3, 1365
+; RV64I-NEXT: and a2, a2, a3
+; RV64I-NEXT: sub a1, a1, a2
+; RV64I-NEXT: lui a2, 209715
+; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: and a3, a1, a2
+; RV64I-NEXT: srli a1, a1, 2
; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a1, a0, 24
+; RV64I-NEXT: add a1, a3, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: lui a2, 61681
+; RV64I-NEXT: addi a2, a2, -241
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 8
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 16
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 24
; RV64I-NEXT: .LBB2_2: # %cond.end
-; RV64I-NEXT: sub a0, s0, a1
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: log2_ceil_i32:
@@ -183,48 +172,42 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
; RV64I-LABEL: findLastSet_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: srliw a0, a0, 1
-; RV64I-NEXT: or a0, s0, a0
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
+; RV64I-NEXT: srliw a1, a0, 1
+; RV64I-NEXT: or a1, a0, a1
+; RV64I-NEXT: srliw a2, a1, 2
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 4
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 8
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 16
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: not a1, a1
+; RV64I-NEXT: srli a2, a1, 1
+; RV64I-NEXT: lui a3, 349525
+; RV64I-NEXT: addiw a3, a3, 1365
+; RV64I-NEXT: and a2, a2, a3
+; RV64I-NEXT: sub a1, a1, a2
+; RV64I-NEXT: lui a2, 209715
+; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: and a3, a1, a2
+; RV64I-NEXT: srli a1, a1, 2
; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: xori a0, a0, 31
-; RV64I-NEXT: snez a1, s0
-; RV64I-NEXT: addi a1, a1, -1
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: add a1, a3, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: lui a2, 61681
+; RV64I-NEXT: addi a2, a2, -241
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 8
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 16
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: srliw a1, a1, 24
+; RV64I-NEXT: xori a1, a1, 31
+; RV64I-NEXT: snez a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: findLastSet_i32:
@@ -248,10 +231,6 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: srliw a0, a0, 1
; RV64I-NEXT: beqz a0, .LBB4_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: .cfi_def_cfa_offset 16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: .cfi_offset ra, -8
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -277,14 +256,13 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB4_2:
; RV64I-NEXT: li a0, 32
@@ -307,8 +285,6 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB5_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -344,14 +320,13 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB5_2:
; RV64I-NEXT: li a0, 64
@@ -544,8 +519,6 @@ declare i32 @llvm.ctpop.i32(i32)
define signext i32 @ctpop_i32(i32 signext %a) nounwind {
; RV64I-LABEL: ctpop_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: addiw a2, a2, 1365
@@ -560,14 +533,13 @@ define signext i32 @ctpop_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctpop_i32:
@@ -657,8 +629,6 @@ define i1 @ctpop_i32_ne_one(i32 signext %a) nounwind {
define signext i32 @ctpop_i32_load(ptr %p) nounwind {
; RV64I-LABEL: ctpop_i32_load:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, 0(a0)
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
@@ -674,14 +644,13 @@ define signext i32 @ctpop_i32_load(ptr %p) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctpop_i32_load:
@@ -699,58 +668,42 @@ declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>)
define <2 x i32> @ctpop_v2i32(<2 x i32> %a) nounwind {
; RV64I-LABEL: ctpop_v2i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -64
-; RV64I-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s2, 32(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s3, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s4, 16(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s5, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a1
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw s3, a2, 1365
-; RV64I-NEXT: and a1, a1, s3
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw s4, a1, 819
-; RV64I-NEXT: and a1, a0, s4
+; RV64I-NEXT: srli a2, a0, 1
+; RV64I-NEXT: lui a3, 349525
+; RV64I-NEXT: addiw a3, a3, 1365
+; RV64I-NEXT: and a2, a2, a3
+; RV64I-NEXT: sub a0, a0, a2
+; RV64I-NEXT: lui a2, 209715
+; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: and a4, a0, a2
; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, s4
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw s5, a1, -241
-; RV64I-NEXT: and a0, a0, s5
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw s1, a1, 257
-; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw s2, a0, 24
-; RV64I-NEXT: srli a0, s0, 1
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: sub s0, s0, a0
-; RV64I-NEXT: and a0, s0, s4
-; RV64I-NEXT: srli s0, s0, 2
-; RV64I-NEXT: and a1, s0, s4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: and a0, a0, s5
-; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a1, a0, 24
-; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s2, 32(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s3, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s4, 16(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s5, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 64
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a0, a4, a0
+; RV64I-NEXT: srli a4, a0, 4
+; RV64I-NEXT: add a0, a0, a4
+; RV64I-NEXT: lui a4, 61681
+; RV64I-NEXT: addi a4, a4, -241
+; RV64I-NEXT: and a0, a0, a4
+; RV64I-NEXT: slli a5, a0, 8
+; RV64I-NEXT: add a0, a0, a5
+; RV64I-NEXT: slli a5, a0, 16
+; RV64I-NEXT: add a0, a0, a5
+; RV64I-NEXT: srliw a0, a0, 24
+; RV64I-NEXT: srli a5, a1, 1
+; RV64I-NEXT: and a3, a5, a3
+; RV64I-NEXT: sub a1, a1, a3
+; RV64I-NEXT: and a3, a1, a2
+; RV64I-NEXT: srli a1, a1, 2
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: add a1, a3, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: and a1, a1, a4
+; RV64I-NEXT: slli a2, a1, 8
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 16
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: srliw a1, a1, 24
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctpop_v2i32:
@@ -875,8 +828,6 @@ declare i64 @llvm.ctpop.i64(i64)
define i64 @ctpop_i64(i64 %a) nounwind {
; RV64I-LABEL: ctpop_i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: addiw a2, a2, 1365
@@ -899,14 +850,13 @@ define i64 @ctpop_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctpop_i64:
@@ -998,66 +948,52 @@ declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind {
; RV64I-LABEL: ctpop_v2i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -64
-; RV64I-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s2, 32(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s3, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s4, 16(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s5, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a1
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
-; RV64I-NEXT: slli a3, a2, 32
-; RV64I-NEXT: add s3, a2, a3
-; RV64I-NEXT: and a1, a1, s3
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw a1, a1, 819
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add s4, a1, a2
-; RV64I-NEXT: and a1, a0, s4
+; RV64I-NEXT: srli a2, a0, 1
+; RV64I-NEXT: lui a3, 349525
+; RV64I-NEXT: addiw a3, a3, 1365
+; RV64I-NEXT: slli a4, a3, 32
+; RV64I-NEXT: add a3, a3, a4
+; RV64I-NEXT: and a2, a2, a3
+; RV64I-NEXT: sub a0, a0, a2
+; RV64I-NEXT: lui a2, 209715
+; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: slli a4, a2, 32
+; RV64I-NEXT: add a2, a2, a4
+; RV64I-NEXT: and a4, a0, a2
; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, s4
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a0, a4, a0
+; RV64I-NEXT: srli a4, a0, 4
+; RV64I-NEXT: add a0, a0, a4
+; RV64I-NEXT: lui a4, 61681
+; RV64I-NEXT: addiw a4, a4, -241
+; RV64I-NEXT: slli a5, a4, 32
+; RV64I-NEXT: add a4, a4, a5
+; RV64I-NEXT: and a0, a0, a4
+; RV64I-NEXT: slli a5, a0, 8
+; RV64I-NEXT: add a0, a0, a5
+; RV64I-NEXT: slli a5, a0, 16
+; RV64I-NEXT: add a0, a0, a5
+; RV64I-NEXT: slli a5, a0, 32
+; RV64I-NEXT: add a0, a0, a5
+; RV64I-NEXT: srli a0, a0, 56
+; RV64I-NEXT: srli a5, a1, 1
+; RV64I-NEXT: and a3, a5, a3
+; RV64I-NEXT: sub a1, a1, a3
+; RV64I-NEXT: and a3, a1, a2
+; RV64I-NEXT: srli a1, a1, 2
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: add a1, a3, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: and a1, a1, a4
+; RV64I-NEXT: slli a2, a1, 8
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 16
+; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add s5, a1, a2
-; RV64I-NEXT: and a0, a0, s5
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw s1, a1, 257
-; RV64I-NEXT: slli a1, s1, 32
-; RV64I-NEXT: add s1, s1, a1
-; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srli s2, a0, 56
-; RV64I-NEXT: srli a0, s0, 1
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: sub s0, s0, a0
-; RV64I-NEXT: and a0, s0, s4
-; RV64I-NEXT: srli s0, s0, 2
-; RV64I-NEXT: and a1, s0, s4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: and a0, a0, s5
-; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srli a1, a0, 56
-; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s2, 32(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s3, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s4, 16(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s5, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 64
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: srli a1, a1, 56
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctpop_v2i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/abd.ll b/llvm/test/CodeGen/RISCV/rvv/abd.ll
new file mode 100644
index 0000000..ddbfbd0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/abd.ll
@@ -0,0 +1,343 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+;
+; SABD
+;
+
+define <vscale x 16 x i8> @sabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: sabd_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmin.vv v12, v8, v10
+; CHECK-NEXT: vmax.vv v8, v8, v10
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i16>
+ %b.sext = sext <vscale x 16 x i8> %b to <vscale x 16 x i16>
+ %sub = sub <vscale x 16 x i16> %a.sext, %b.sext
+ %abs = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %sub, i1 true)
+ %trunc = trunc <vscale x 16 x i16> %abs to <vscale x 16 x i8>
+ ret <vscale x 16 x i8> %trunc
+}
+
+define <vscale x 16 x i8> @sabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
+; CHECK-LABEL: sabd_b_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmerge.vim v12, v10, -1, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
+; CHECK-NEXT: vmin.vv v10, v12, v8
+; CHECK-NEXT: vmax.vv v8, v12, v8
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 16 x i1> %a to <vscale x 16 x i8>
+ %b.sext = sext <vscale x 16 x i1> %b to <vscale x 16 x i8>
+ %sub = sub <vscale x 16 x i8> %a.sext, %b.sext
+ %abs = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %sub, i1 true)
+ ret <vscale x 16 x i8> %abs
+}
+
+define <vscale x 8 x i16> @sabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: sabd_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmin.vv v12, v8, v10
+; CHECK-NEXT: vmax.vv v8, v8, v10
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 8 x i16> %a to <vscale x 8 x i32>
+ %b.sext = sext <vscale x 8 x i16> %b to <vscale x 8 x i32>
+ %sub = sub <vscale x 8 x i32> %a.sext, %b.sext
+ %abs = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %sub, i1 true)
+ %trunc = trunc <vscale x 8 x i32> %abs to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %trunc
+}
+
+define <vscale x 8 x i16> @sabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: sabd_h_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v10, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+ %b.sext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+ %sub = sub <vscale x 8 x i16> %a.sext, %b.sext
+ %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
+ ret <vscale x 8 x i16> %abs
+}
+
+define <vscale x 4 x i32> @sabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: sabd_s:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vmin.vv v12, v8, v10
+; CHECK-NEXT: vmax.vv v8, v8, v10
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 4 x i32> %a to <vscale x 4 x i64>
+ %b.sext = sext <vscale x 4 x i32> %b to <vscale x 4 x i64>
+ %sub = sub <vscale x 4 x i64> %a.sext, %b.sext
+ %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true)
+ %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %trunc
+}
+
+define <vscale x 4 x i32> @sabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; CHECK-LABEL: sabd_s_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v10, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+ %b.sext = sext <vscale x 4 x i16> %b to <vscale x 4 x i32>
+ %sub = sub <vscale x 4 x i32> %a.sext, %b.sext
+ %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true)
+ ret <vscale x 4 x i32> %abs
+}
+
+; FIXME: Crashes legalization if enabled
+;; define <vscale x 2 x i64> @sabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+;; %a.sext = sext <vscale x 2 x i64> %a to <vscale x 2 x i128>
+;; %b.sext = sext <vscale x 2 x i64> %b to <vscale x 2 x i128>
+;; %sub = sub <vscale x 2 x i128> %a.sext, %b.sext
+;; %abs = call <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128> %sub, i1 true)
+;; %trunc = trunc <vscale x 2 x i128> %abs to <vscale x 2 x i64>
+;; ret <vscale x 2 x i64> %trunc
+;; }
+
+define <vscale x 2 x i64> @sabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+; CHECK-LABEL: sabd_d_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v10, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+ %b.sext = sext <vscale x 2 x i32> %b to <vscale x 2 x i64>
+ %sub = sub <vscale x 2 x i64> %a.sext, %b.sext
+ %abs = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %sub, i1 true)
+ ret <vscale x 2 x i64> %abs
+}
+
+;
+; UABD
+;
+
+define <vscale x 16 x i8> @uabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: uabd_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vminu.vv v12, v8, v10
+; CHECK-NEXT: vmaxu.vv v8, v8, v10
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 16 x i8> %a to <vscale x 16 x i16>
+ %b.zext = zext <vscale x 16 x i8> %b to <vscale x 16 x i16>
+ %sub = sub <vscale x 16 x i16> %a.zext, %b.zext
+ %abs = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %sub, i1 true)
+ %trunc = trunc <vscale x 16 x i16> %abs to <vscale x 16 x i8>
+ ret <vscale x 16 x i8> %trunc
+}
+
+define <vscale x 16 x i8> @uabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
+; CHECK-LABEL: uabd_b_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmerge.vim v12, v10, 1, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vminu.vv v10, v12, v8
+; CHECK-NEXT: vmaxu.vv v8, v12, v8
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 16 x i1> %a to <vscale x 16 x i8>
+ %b.zext = zext <vscale x 16 x i1> %b to <vscale x 16 x i8>
+ %sub = sub <vscale x 16 x i8> %a.zext, %b.zext
+ %abs = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %sub, i1 true)
+ ret <vscale x 16 x i8> %abs
+}
+
+define <vscale x 8 x i16> @uabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: uabd_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vminu.vv v12, v8, v10
+; CHECK-NEXT: vmaxu.vv v8, v8, v10
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i16> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i16> %b to <vscale x 8 x i32>
+ %sub = sub <vscale x 8 x i32> %a.zext, %b.zext
+ %abs = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %sub, i1 true)
+ %trunc = trunc <vscale x 8 x i32> %abs to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %trunc
+}
+
+define <vscale x 8 x i16> @uabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: uabd_h_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v10, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+ %sub = sub <vscale x 8 x i16> %a.zext, %b.zext
+ %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
+ ret <vscale x 8 x i16> %abs
+}
+
+define <vscale x 4 x i32> @uabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: uabd_s:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vminu.vv v12, v8, v10
+; CHECK-NEXT: vmaxu.vv v8, v8, v10
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64>
+ %b.zext = zext <vscale x 4 x i32> %b to <vscale x 4 x i64>
+ %sub = sub <vscale x 4 x i64> %a.zext, %b.zext
+ %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true)
+ %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %trunc
+}
+
+define <vscale x 4 x i32> @uabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; CHECK-LABEL: uabd_s_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v10, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+ %b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32>
+ %sub = sub <vscale x 4 x i32> %a.zext, %b.zext
+ %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true)
+ ret <vscale x 4 x i32> %abs
+}
+
+; FIXME: Crashes legalization if enabled
+;; define <vscale x 2 x i64> @uabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+;; %a.zext = zext <vscale x 2 x i64> %a to <vscale x 2 x i128>
+;; %b.zext = zext <vscale x 2 x i64> %b to <vscale x 2 x i128>
+;; %sub = sub <vscale x 2 x i128> %a.zext, %b.zext
+;; %abs = call <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128> %sub, i1 true)
+;; %trunc = trunc <vscale x 2 x i128> %abs to <vscale x 2 x i64>
+;; ret <vscale x 2 x i64> %trunc
+;; }
+
+define <vscale x 2 x i64> @uabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+; CHECK-LABEL: uabd_d_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v10, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+ %b.zext = zext <vscale x 2 x i32> %b to <vscale x 2 x i64>
+ %sub = sub <vscale x 2 x i64> %a.zext, %b.zext
+ %abs = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %sub, i1 true)
+ ret <vscale x 2 x i64> %abs
+}
+
+; Test the situation where isLegal(ISD::ABD, typeof(%a)) returns true but %a and
+; %b have differing types.
+define <vscale x 4 x i32> @uabd_non_matching_extension(<vscale x 4 x i32> %a, <vscale x 4 x i8> %b) {
+; CHECK-LABEL: uabd_non_matching_extension:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v12, v10
+; CHECK-NEXT: vminu.vv v10, v8, v12
+; CHECK-NEXT: vmaxu.vv v8, v8, v12
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64>
+ %b.zext = zext <vscale x 4 x i8> %b to <vscale x 4 x i64>
+ %sub = sub <vscale x 4 x i64> %a.zext, %b.zext
+ %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true)
+ %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %trunc
+}
+
+; Test the situation where isLegal(ISD::ABD, typeof(%a.zext)) returns true but
+; %a and %b have differing types.
+define <vscale x 4 x i32> @uabd_non_matching_promoted_ops(<vscale x 4 x i8> %a, <vscale x 4 x i16> %b) {
+; CHECK-LABEL: uabd_non_matching_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vminu.vv v8, v10, v9
+; CHECK-NEXT: vmaxu.vv v9, v10, v9
+; CHECK-NEXT: vsub.vv v10, v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 4 x i8> %a to <vscale x 4 x i32>
+ %b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32>
+ %sub = sub <vscale x 4 x i32> %a.zext, %b.zext
+ %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true)
+ ret <vscale x 4 x i32> %abs
+}
+
+; Test the situation where isLegal(ISD::ABD, typeof(%a)) returns true but %a and
+; %b are promoted differently.
+define <vscale x 4 x i32> @uabd_non_matching_promotion(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; CHECK-LABEL: uabd_non_matching_promotion:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v8, v9
+; CHECK-NEXT: vwsub.wv v10, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vrsub.vi v8, v10, 0
+; CHECK-NEXT: vmax.vv v8, v10, v8
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 4 x i8> %a to <vscale x 4 x i32>
+ %b.zext = sext <vscale x 4 x i8> %b to <vscale x 4 x i32>
+ %sub = sub <vscale x 4 x i32> %a.zext, %b.zext
+ %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true)
+ ret <vscale x 4 x i32> %abs
+}
+
+declare <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8>, i1)
+
+declare <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16>, i1)
+declare <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16>, i1)
+
+declare <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32>, i1)
+declare <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32>, i1)
+
+declare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1)
+declare <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64>, i1)
+
+declare <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128>, i1)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir b/llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir
index 5255728..080a89e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir
@@ -22,7 +22,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 16
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
functionContext: ''
diff --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll
index 1d025a2..1fe91c7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll
@@ -18,15 +18,15 @@ define void @test(ptr %addr) {
; CHECK-NEXT: add a2, a0, a1
; CHECK-NEXT: vl1re64.v v8, (a2)
; CHECK-NEXT: slli a2, a1, 1
-; CHECK-NEXT: add a3, a0, a2
-; CHECK-NEXT: vl1re64.v v9, (a3)
+; CHECK-NEXT: vl1re64.v v9, (a0)
+; CHECK-NEXT: add a0, a0, a2
; CHECK-NEXT: vl1re64.v v10, (a0)
; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs1r.v v9, (a0)
; CHECK-NEXT: add a2, a0, a2
-; CHECK-NEXT: vs1r.v v9, (a2)
-; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: vs1r.v v8, (a1)
-; CHECK-NEXT: vs1r.v v10, (a0)
+; CHECK-NEXT: vs1r.v v10, (a2)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vs1r.v v8, (a0)
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
index 64031f8..a9a680d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
@@ -16,13 +16,13 @@ define <vscale x 1 x double> @test(ptr %addr, i64 %vl) {
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
; CHECK-NEXT: csrrs a2, vlenb, zero
-; CHECK-NEXT: add a3, a0, a2
-; CHECK-NEXT: vl1re64.v v8, (a3)
+; CHECK-NEXT: vl1re64.v v8, (a0)
+; CHECK-NEXT: add a0, a0, a2
; CHECK-NEXT: vl1re64.v v9, (a0)
; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs1r.v v8, (a0)
; CHECK-NEXT: add a2, a0, a2
-; CHECK-NEXT: vs1r.v v8, (a2)
-; CHECK-NEXT: vs1r.v v9, (a0)
+; CHECK-NEXT: vs1r.v v9, (a2)
; CHECK-NEXT: vl1re64.v v8, (a2)
; CHECK-NEXT: vl1re64.v v9, (a0)
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/binop-zext.ll b/llvm/test/CodeGen/RISCV/rvv/binop-zext.ll
new file mode 100644
index 0000000..2d5258f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/binop-zext.ll
@@ -0,0 +1,154 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+
+; Check that we perform binary arithmetic in a narrower type where possible, via
+; combineBinOpOfZExt or otherwise.
+
+define <vscale x 8 x i32> @add(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: add:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwaddu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %add = add <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %add
+}
+
+define <vscale x 8 x i32> @sub(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: sub:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwsubu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %sub = sub <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %sub
+}
+
+define <vscale x 8 x i32> @mul(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: mul:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %mul = mul <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %mul
+}
+
+define <vscale x 8 x i32> @sdiv(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: sdiv:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vdivu.vv v12, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %sdiv = sdiv <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %sdiv
+}
+
+define <vscale x 8 x i32> @udiv(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: udiv:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vdivu.vv v12, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %udiv = udiv <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %udiv
+}
+
+define <vscale x 8 x i32> @srem(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: srem:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vremu.vv v12, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %srem = srem <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %srem
+}
+
+define <vscale x 8 x i32> @urem(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: urem:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vremu.vv v12, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %urem = urem <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %urem
+}
+
+define <vscale x 8 x i32> @and(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: and:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vand.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %shl = and <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %shl
+}
+
+define <vscale x 8 x i32> @or(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: or:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vor.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %or = or <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %or
+}
+
+define <vscale x 8 x i32> @xor(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: xor:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vxor.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %xor = xor <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %xor
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll b/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll
new file mode 100644
index 0000000..84936d8
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+m -mattr=+v -O2 < %s \
+; RUN: | FileCheck --check-prefix=SPILL-O2 %s
+
+define <vscale x 1 x i32> @test_vector_std(<vscale x 1 x i32> %va) nounwind {
+; SPILL-O2-LABEL: test_vector_std:
+; SPILL-O2: # %bb.0: # %entry
+; SPILL-O2-NEXT: addi sp, sp, -16
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a0, a0, 1
+; SPILL-O2-NEXT: sub sp, sp, a0
+; SPILL-O2-NEXT: addi a0, sp, 16
+; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: #APP
+; SPILL-O2-NEXT: #NO_APP
+; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a0, a0, 1
+; SPILL-O2-NEXT: add sp, sp, a0
+; SPILL-O2-NEXT: addi sp, sp, 16
+; SPILL-O2-NEXT: ret
+entry:
+ call void asm sideeffect "",
+ "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+
+ ret <vscale x 1 x i32> %va
+}
+
+define riscv_vector_cc <vscale x 1 x i32> @test_vector_callee(<vscale x 1 x i32> %va) nounwind {
+; SPILL-O2-LABEL: test_vector_callee:
+; SPILL-O2: # %bb.0: # %entry
+; SPILL-O2-NEXT: addi sp, sp, -16
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a0, a0, 4
+; SPILL-O2-NEXT: sub sp, sp, a0
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a1, a0, 4
+; SPILL-O2-NEXT: sub a0, a1, a0
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vs1r.v v1, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: li a1, 13
+; SPILL-O2-NEXT: mul a0, a0, a1
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vs2r.v v2, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a1, a0, 3
+; SPILL-O2-NEXT: add a0, a1, a0
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vs4r.v v4, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: addi a0, sp, 16
+; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: #APP
+; SPILL-O2-NEXT: #NO_APP
+; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a1, a0, 4
+; SPILL-O2-NEXT: sub a0, a1, a0
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vl1r.v v1, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: li a1, 13
+; SPILL-O2-NEXT: mul a0, a0, a1
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vl2r.v v2, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a1, a0, 3
+; SPILL-O2-NEXT: add a0, a1, a0
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a0, a0, 4
+; SPILL-O2-NEXT: add sp, sp, a0
+; SPILL-O2-NEXT: addi sp, sp, 16
+; SPILL-O2-NEXT: ret
+entry:
+ call void asm sideeffect "",
+ "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+
+ ret <vscale x 1 x i32> %va
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
index 78385a8..90edb99 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
@@ -86,3 +86,166 @@ define <vscale x 32 x i32> @caller_scalable_vector_split_indirect(<vscale x 32 x
%a = call <vscale x 32 x i32> @callee_scalable_vector_split_indirect(<vscale x 32 x i32> zeroinitializer, <vscale x 32 x i32> %x)
ret <vscale x 32 x i32> %a
}
+
+define {<vscale x 4 x i32>, <vscale x 4 x i32>} @caller_tuple_return() {
+; RV32-LABEL: caller_tuple_return:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: call callee_tuple_return
+; RV32-NEXT: vmv2r.v v12, v8
+; RV32-NEXT: vmv2r.v v8, v10
+; RV32-NEXT: vmv2r.v v10, v12
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: caller_tuple_return:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: call callee_tuple_return
+; RV64-NEXT: vmv2r.v v12, v8
+; RV64-NEXT: vmv2r.v v8, v10
+; RV64-NEXT: vmv2r.v v10, v12
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+ %a = call {<vscale x 4 x i32>, <vscale x 4 x i32>} @callee_tuple_return()
+ %b = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %a, 0
+ %c = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %a, 1
+ %d = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} poison, <vscale x 4 x i32> %c, 0
+ %e = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %d, <vscale x 4 x i32> %b, 1
+ ret {<vscale x 4 x i32>, <vscale x 4 x i32>} %e
+}
+
+declare {<vscale x 4 x i32>, <vscale x 4 x i32>} @callee_tuple_return()
+
+define void @caller_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>} %x) {
+; RV32-LABEL: caller_tuple_argument:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: vmv2r.v v12, v8
+; RV32-NEXT: vmv2r.v v8, v10
+; RV32-NEXT: vmv2r.v v10, v12
+; RV32-NEXT: call callee_tuple_argument
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: caller_tuple_argument:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: vmv2r.v v12, v8
+; RV64-NEXT: vmv2r.v v8, v10
+; RV64-NEXT: vmv2r.v v10, v12
+; RV64-NEXT: call callee_tuple_argument
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+ %a = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %x, 0
+ %b = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %x, 1
+ %c = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} poison, <vscale x 4 x i32> %b, 0
+ %d = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %c, <vscale x 4 x i32> %a, 1
+ call void @callee_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>} %d)
+ ret void
+}
+
+declare void @callee_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>})
+
+; %0 -> v8
+; %1 -> v9
+define <vscale x 1 x i64> @case1(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1) {
+; CHECK-LABEL: case1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %a = add <vscale x 1 x i64> %0, %1
+ ret <vscale x 1 x i64> %a
+}
+
+; %0 -> v8
+; %1 -> v10-v11
+; %2 -> v9
+define <vscale x 1 x i64> @case2_1(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case2_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %a = add <vscale x 1 x i64> %0, %2
+ ret <vscale x 1 x i64> %a
+}
+define <vscale x 2 x i64> @case2_2(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case2_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v10, v10
+; CHECK-NEXT: ret
+ %a = add <vscale x 2 x i64> %1, %1
+ ret <vscale x 2 x i64> %a
+}
+
+; %0 -> v8
+; %1 -> {v10-v11, v12-v13}
+; %2 -> v9
+define <vscale x 1 x i64> @case3_1(<vscale x 1 x i64> %0, {<vscale x 2 x i64>, <vscale x 2 x i64>} %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case3_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %add = add <vscale x 1 x i64> %0, %2
+ ret <vscale x 1 x i64> %add
+}
+define <vscale x 2 x i64> @case3_2(<vscale x 1 x i64> %0, {<vscale x 2 x i64>, <vscale x 2 x i64>} %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case3_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: ret
+ %a = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } %1, 0
+ %b = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } %1, 1
+ %add = add <vscale x 2 x i64> %a, %b
+ ret <vscale x 2 x i64> %add
+}
+
+; %0 -> v8
+; %1 -> {by-ref, by-ref}
+; %2 -> v9
+define <vscale x 8 x i64> @case4_1(<vscale x 1 x i64> %0, {<vscale x 8 x i64>, <vscale x 8 x i64>} %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case4_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, a0, a1
+; CHECK-NEXT: vl8re64.v v8, (a1)
+; CHECK-NEXT: vl8re64.v v16, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT: vadd.vv v8, v16, v8
+; CHECK-NEXT: ret
+ %a = extractvalue { <vscale x 8 x i64>, <vscale x 8 x i64> } %1, 0
+ %b = extractvalue { <vscale x 8 x i64>, <vscale x 8 x i64> } %1, 1
+ %add = add <vscale x 8 x i64> %a, %b
+ ret <vscale x 8 x i64> %add
+}
+define <vscale x 1 x i64> @case4_2(<vscale x 1 x i64> %0, {<vscale x 8 x i64>, <vscale x 8 x i64>} %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case4_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %add = add <vscale x 1 x i64> %0, %2
+ ret <vscale x 1 x i64> %add
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
new file mode 100644
index 0000000..673008d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
@@ -0,0 +1,871 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -verify-machineinstrs -mtriple=riscv64 -mattr=+v,+d,+m,+zbb %s -o - | FileCheck %s --check-prefix=RV64
+; RUN: llc -verify-machineinstrs -mtriple=riscv32 -mattr=+v,+d,+m,+zbb %s -o - | FileCheck %s --check-prefix=RV32
+
+; Compress + store for i8 type
+
+define void @test_compresstore_v1i8(ptr %p, <1 x i1> %mask, <1 x i8> %data) {
+; RV64-LABEL: test_compresstore_v1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; RV64-NEXT: vse8.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; RV32-NEXT: vse8.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v1i8(<1 x i8> %data, ptr align 1 %p, <1 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v2i8(ptr %p, <2 x i1> %mask, <2 x i8> %data) {
+; RV64-LABEL: test_compresstore_v2i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; RV64-NEXT: vse8.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v2i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; RV32-NEXT: vse8.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v2i8(<2 x i8> %data, ptr align 1 %p, <2 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v4i8(ptr %p, <4 x i1> %mask, <4 x i8> %data) {
+; RV64-LABEL: test_compresstore_v4i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; RV64-NEXT: vse8.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v4i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; RV32-NEXT: vse8.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v4i8(<4 x i8> %data, ptr align 1 %p, <4 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v8i8(ptr %p, <8 x i1> %mask, <8 x i8> %data) {
+; RV64-LABEL: test_compresstore_v8i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; RV64-NEXT: vse8.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v8i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; RV32-NEXT: vse8.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v8i8(<8 x i8> %data, ptr align 1 %p, <8 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v16i8(ptr %p, <16 x i1> %mask, <16 x i8> %data) {
+; RV64-LABEL: test_compresstore_v16i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vse8.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v16i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vse8.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v16i8(<16 x i8> %data, ptr align 1 %p, <16 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v32i8(ptr %p, <32 x i1> %mask, <32 x i8> %data) {
+; RV64-LABEL: test_compresstore_v32i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a1, 32
+; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV64-NEXT: vcompress.vm v10, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV64-NEXT: vse8.v v10, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v32i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV32-NEXT: vcompress.vm v10, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV32-NEXT: vse8.v v10, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v32i8(<32 x i8> %data, ptr align 1 %p, <32 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v64i8(ptr %p, <64 x i1> %mask, <64 x i8> %data) {
+; RV64-LABEL: test_compresstore_v64i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a1, 64
+; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV64-NEXT: vcompress.vm v12, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV64-NEXT: vse8.v v12, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v64i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: li a1, 64
+; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV32-NEXT: vcompress.vm v12, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV32-NEXT: vse8.v v12, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v64i8(<64 x i8> %data, ptr align 1 %p, <64 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v128i8(ptr %p, <128 x i1> %mask, <128 x i8> %data) {
+; RV64-LABEL: test_compresstore_v128i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a1, 128
+; RV64-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT: vcompress.vm v16, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT: vse8.v v16, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v128i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: li a1, 128
+; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT: vcompress.vm v16, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT: vse8.v v16, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v128i8(<128 x i8> %data, ptr align 1 %p, <128 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v256i8(ptr %p, <256 x i1> %mask, <256 x i8> %data) {
+; RV64-LABEL: test_compresstore_v256i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v7, v8
+; RV64-NEXT: li a2, 128
+; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT: vle8.v v24, (a1)
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v9, v0, 1
+; RV64-NEXT: vmv.x.s a1, v9
+; RV64-NEXT: vmv.x.s a3, v0
+; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT: vcompress.vm v8, v16, v0
+; RV64-NEXT: vcpop.m a4, v0
+; RV64-NEXT: vsetvli zero, a4, e8, m8, ta, ma
+; RV64-NEXT: vse8.v v8, (a0)
+; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT: vcompress.vm v8, v24, v7
+; RV64-NEXT: vcpop.m a2, v7
+; RV64-NEXT: cpop a3, a3
+; RV64-NEXT: cpop a1, a1
+; RV64-NEXT: add a0, a0, a3
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT: vse8.v v8, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v256i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vmv1r.v v7, v8
+; RV32-NEXT: li a2, 128
+; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT: vle8.v v24, (a1)
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v9, v0, 1
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsrl.vx v10, v9, a1
+; RV32-NEXT: vmv.x.s a3, v10
+; RV32-NEXT: vsrl.vx v10, v0, a1
+; RV32-NEXT: vmv.x.s a1, v10
+; RV32-NEXT: vmv.x.s a4, v9
+; RV32-NEXT: vmv.x.s a5, v0
+; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT: vcompress.vm v8, v16, v0
+; RV32-NEXT: vcpop.m a6, v0
+; RV32-NEXT: vsetvli zero, a6, e8, m8, ta, ma
+; RV32-NEXT: vse8.v v8, (a0)
+; RV32-NEXT: cpop a1, a1
+; RV32-NEXT: cpop a5, a5
+; RV32-NEXT: add a1, a5, a1
+; RV32-NEXT: cpop a3, a3
+; RV32-NEXT: cpop a4, a4
+; RV32-NEXT: add a3, a4, a3
+; RV32-NEXT: add a1, a1, a3
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT: vcompress.vm v8, v24, v7
+; RV32-NEXT: vcpop.m a1, v7
+; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT: vse8.v v8, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v256i8(<256 x i8> %data, ptr align 1 %p, <256 x i1> %mask)
+ ret void
+}
+
+; Compress + store for i16 type
+
+define void @test_compresstore_v1i16(ptr %p, <1 x i1> %mask, <1 x i16> %data) {
+; RV64-LABEL: test_compresstore_v1i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV64-NEXT: vse16.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v1i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV32-NEXT: vse16.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v1i16(<1 x i16> %data, ptr align 2 %p, <1 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v2i16(ptr %p, <2 x i1> %mask, <2 x i16> %data) {
+; RV64-LABEL: test_compresstore_v2i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV64-NEXT: vse16.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v2i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV32-NEXT: vse16.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v2i16(<2 x i16> %data, ptr align 2 %p, <2 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v4i16(ptr %p, <4 x i1> %mask, <4 x i16> %data) {
+; RV64-LABEL: test_compresstore_v4i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; RV64-NEXT: vse16.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v4i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; RV32-NEXT: vse16.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v4i16(<4 x i16> %data, ptr align 2 %p, <4 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v8i16(ptr %p, <8 x i1> %mask, <8 x i16> %data) {
+; RV64-LABEL: test_compresstore_v8i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; RV64-NEXT: vse16.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v8i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; RV32-NEXT: vse16.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v8i16(<8 x i16> %data, ptr align 2 %p, <8 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v16i16(ptr %p, <16 x i1> %mask, <16 x i16> %data) {
+; RV64-LABEL: test_compresstore_v16i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vcompress.vm v10, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vse16.v v10, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v16i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vcompress.vm v10, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vse16.v v10, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v16i16(<16 x i16> %data, ptr align 2 %p, <16 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v32i16(ptr %p, <32 x i1> %mask, <32 x i16> %data) {
+; RV64-LABEL: test_compresstore_v32i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a1, 32
+; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; RV64-NEXT: vcompress.vm v12, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; RV64-NEXT: vse16.v v12, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v32i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; RV32-NEXT: vcompress.vm v12, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; RV32-NEXT: vse16.v v12, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v32i16(<32 x i16> %data, ptr align 2 %p, <32 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v64i16(ptr %p, <64 x i1> %mask, <64 x i16> %data) {
+; RV64-LABEL: test_compresstore_v64i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a1, 64
+; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT: vcompress.vm v16, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT: vse16.v v16, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v64i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: li a1, 64
+; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT: vcompress.vm v16, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT: vse16.v v16, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v64i16(<64 x i16> %data, ptr align 2 %p, <64 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v128i16(ptr %p, <128 x i1> %mask, <128 x i16> %data) {
+; RV64-LABEL: test_compresstore_v128i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a1, 64
+; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT: vcompress.vm v24, v8, v0
+; RV64-NEXT: vcpop.m a2, v0
+; RV64-NEXT: vsetvli zero, a2, e16, m8, ta, ma
+; RV64-NEXT: vse16.v v24, (a0)
+; RV64-NEXT: vsetivli zero, 8, e8, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v0, 8
+; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT: vcompress.vm v24, v16, v8
+; RV64-NEXT: vcpop.m a2, v8
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmv.x.s a1, v0
+; RV64-NEXT: cpop a1, a1
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: vsetvli zero, a2, e16, m8, ta, ma
+; RV64-NEXT: vse16.v v24, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v128i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: li a1, 64
+; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT: vcompress.vm v24, v8, v0
+; RV32-NEXT: vcpop.m a2, v0
+; RV32-NEXT: vsetvli zero, a2, e16, m8, ta, ma
+; RV32-NEXT: vse16.v v24, (a0)
+; RV32-NEXT: vsetivli zero, 8, e8, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v0, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT: vcompress.vm v8, v16, v24
+; RV32-NEXT: vcpop.m a1, v24
+; RV32-NEXT: li a2, 32
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vsrl.vx v16, v0, a2
+; RV32-NEXT: vmv.x.s a2, v16
+; RV32-NEXT: cpop a2, a2
+; RV32-NEXT: vmv.x.s a3, v0
+; RV32-NEXT: cpop a3, a3
+; RV32-NEXT: add a2, a3, a2
+; RV32-NEXT: slli a2, a2, 1
+; RV32-NEXT: add a0, a0, a2
+; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT: vse16.v v8, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v128i16(<128 x i16> %data, ptr align 2 %p, <128 x i1> %mask)
+ ret void
+}
+
+; Compress + store for i32 type
+
+define void @test_compresstore_v1i32(ptr %p, <1 x i1> %mask, <1 x i32> %data) {
+; RV64-LABEL: test_compresstore_v1i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV64-NEXT: vse32.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v1i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV32-NEXT: vse32.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v1i32(<1 x i32> %data, ptr align 4 %p, <1 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v2i32(ptr %p, <2 x i1> %mask, <2 x i32> %data) {
+; RV64-LABEL: test_compresstore_v2i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV64-NEXT: vse32.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v2i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV32-NEXT: vse32.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v2i32(<2 x i32> %data, ptr align 4 %p, <2 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v4i32(ptr %p, <4 x i1> %mask, <4 x i32> %data) {
+; RV64-LABEL: test_compresstore_v4i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; RV64-NEXT: vse32.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v4i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; RV32-NEXT: vse32.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v4i32(<4 x i32> %data, ptr align 4 %p, <4 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v8i32(ptr %p, <8 x i1> %mask, <8 x i32> %data) {
+; RV64-LABEL: test_compresstore_v8i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64-NEXT: vcompress.vm v10, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; RV64-NEXT: vse32.v v10, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v8i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vcompress.vm v10, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; RV32-NEXT: vse32.v v10, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v8i32(<8 x i32> %data, ptr align 4 %p, <8 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v16i32(ptr %p, <16 x i1> %mask, <16 x i32> %data) {
+; RV64-LABEL: test_compresstore_v16i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64-NEXT: vcompress.vm v12, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vse32.v v12, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v16i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vcompress.vm v12, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV32-NEXT: vse32.v v12, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v16i32(<16 x i32> %data, ptr align 4 %p, <16 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v32i32(ptr %p, <32 x i1> %mask, <32 x i32> %data) {
+; RV64-LABEL: test_compresstore_v32i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a1, 32
+; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT: vcompress.vm v16, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT: vse32.v v16, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v32i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: vcompress.vm v16, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: vse32.v v16, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v32i32(<32 x i32> %data, ptr align 4 %p, <32 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v64i32(ptr %p, <64 x i1> %mask, <64 x i32> %data) {
+; RV64-LABEL: test_compresstore_v64i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a1, 32
+; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT: vcompress.vm v24, v8, v0
+; RV64-NEXT: vcpop.m a2, v0
+; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV64-NEXT: vse32.v v24, (a0)
+; RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v0, 4
+; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT: vcompress.vm v24, v16, v8
+; RV64-NEXT: vcpop.m a1, v8
+; RV64-NEXT: vmv.x.s a2, v0
+; RV64-NEXT: cpopw a2, a2
+; RV64-NEXT: slli a2, a2, 2
+; RV64-NEXT: add a0, a0, a2
+; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT: vse32.v v24, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v64i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: vcompress.vm v24, v8, v0
+; RV32-NEXT: vcpop.m a2, v0
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vse32.v v24, (a0)
+; RV32-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v0, 4
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: vcompress.vm v24, v16, v8
+; RV32-NEXT: vcpop.m a1, v8
+; RV32-NEXT: vmv.x.s a2, v0
+; RV32-NEXT: cpop a2, a2
+; RV32-NEXT: slli a2, a2, 2
+; RV32-NEXT: add a0, a0, a2
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: vse32.v v24, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v64i32(<64 x i32> %data, ptr align 4 %p, <64 x i1> %mask)
+ ret void
+}
+
+; Compress + store for i64 type
+
+define void @test_compresstore_v1i64(ptr %p, <1 x i1> %mask, <1 x i64> %data) {
+; RV64-LABEL: test_compresstore_v1i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vse64.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v1i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV32-NEXT: vse64.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v1i64(<1 x i64> %data, ptr align 8 %p, <1 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v2i64(ptr %p, <2 x i1> %mask, <2 x i64> %data) {
+; RV64-LABEL: test_compresstore_v2i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vse64.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v2i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV32-NEXT: vse64.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v2i64(<2 x i64> %data, ptr align 8 %p, <2 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v4i64(ptr %p, <4 x i1> %mask, <4 x i64> %data) {
+; RV64-LABEL: test_compresstore_v4i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vcompress.vm v10, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vse64.v v10, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v4i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vcompress.vm v10, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV32-NEXT: vse64.v v10, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v4i64(<4 x i64> %data, ptr align 8 %p, <4 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v8i64(ptr %p, <8 x i1> %mask, <8 x i64> %data) {
+; RV64-LABEL: test_compresstore_v8i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT: vcompress.vm v12, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vse64.v v12, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v8i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT: vcompress.vm v12, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV32-NEXT: vse64.v v12, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v8i64(<8 x i64> %data, ptr align 8 %p, <8 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v16i64(ptr %p, <16 x i1> %mask, <16 x i64> %data) {
+; RV64-LABEL: test_compresstore_v16i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vcompress.vm v16, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vse64.v v16, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v16i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vcompress.vm v16, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vse64.v v16, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v16i64(<16 x i64> %data, ptr align 8 %p, <16 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v32i64(ptr %p, <32 x i1> %mask, <32 x i64> %data) {
+; RV64-LABEL: test_compresstore_v32i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vcompress.vm v24, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vse64.v v24, (a0)
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vi v24, v0, 2
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vcompress.vm v8, v16, v24
+; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; RV64-NEXT: vmv.x.s a1, v0
+; RV64-NEXT: zext.h a1, a1
+; RV64-NEXT: cpopw a1, a1
+; RV64-NEXT: slli a1, a1, 3
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: vcpop.m a1, v24
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vse64.v v8, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v32i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vcompress.vm v24, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vse64.v v24, (a0)
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vcompress.vm v8, v16, v24
+; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; RV32-NEXT: vmv.x.s a1, v0
+; RV32-NEXT: zext.h a1, a1
+; RV32-NEXT: cpop a1, a1
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: vcpop.m a1, v24
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vse64.v v8, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v32i64(<32 x i64> %data, ptr align 8 %p, <32 x i1> %mask)
+ ret void
+}
+
+declare void @llvm.masked.compressstore.v1i8(<1 x i8>, ptr, <1 x i1>)
+declare void @llvm.masked.compressstore.v2i8(<2 x i8>, ptr, <2 x i1>)
+declare void @llvm.masked.compressstore.v4i8(<4 x i8>, ptr, <4 x i1>)
+declare void @llvm.masked.compressstore.v8i8(<8 x i8>, ptr, <8 x i1>)
+declare void @llvm.masked.compressstore.v16i8(<16 x i8>, ptr, <16 x i1>)
+declare void @llvm.masked.compressstore.v32i8(<32 x i8>, ptr, <32 x i1>)
+declare void @llvm.masked.compressstore.v64i8(<64 x i8>, ptr, <64 x i1>)
+declare void @llvm.masked.compressstore.v128i8(<128 x i8>, ptr, <128 x i1>)
+declare void @llvm.masked.compressstore.v256i8(<256 x i8>, ptr, <256 x i1>)
+
+declare void @llvm.masked.compressstore.v1i16(<1 x i16>, ptr, <1 x i1>)
+declare void @llvm.masked.compressstore.v2i16(<2 x i16>, ptr, <2 x i1>)
+declare void @llvm.masked.compressstore.v4i16(<4 x i16>, ptr, <4 x i1>)
+declare void @llvm.masked.compressstore.v8i16(<8 x i16>, ptr, <8 x i1>)
+declare void @llvm.masked.compressstore.v16i16(<16 x i16>, ptr, <16 x i1>)
+declare void @llvm.masked.compressstore.v32i16(<32 x i16>, ptr, <32 x i1>)
+declare void @llvm.masked.compressstore.v64i16(<64 x i16>, ptr, <64 x i1>)
+declare void @llvm.masked.compressstore.v128i16(<128 x i16>, ptr, <128 x i1>)
+
+declare void @llvm.masked.compressstore.v1i32(<1 x i32>, ptr, <1 x i1>)
+declare void @llvm.masked.compressstore.v2i32(<2 x i32>, ptr, <2 x i1>)
+declare void @llvm.masked.compressstore.v4i32(<4 x i32>, ptr, <4 x i1>)
+declare void @llvm.masked.compressstore.v8i32(<8 x i32>, ptr, <8 x i1>)
+declare void @llvm.masked.compressstore.v16i32(<16 x i32>, ptr, <16 x i1>)
+declare void @llvm.masked.compressstore.v32i32(<32 x i32>, ptr, <32 x i1>)
+declare void @llvm.masked.compressstore.v64i32(<64 x i32>, ptr, <64 x i1>)
+
+declare void @llvm.masked.compressstore.v1i64(<1 x i64>, ptr, <1 x i1>)
+declare void @llvm.masked.compressstore.v2i64(<2 x i64>, ptr, <2 x i1>)
+declare void @llvm.masked.compressstore.v4i64(<4 x i64>, ptr, <4 x i1>)
+declare void @llvm.masked.compressstore.v8i64(<8 x i64>, ptr, <8 x i1>)
+declare void @llvm.masked.compressstore.v16i64(<16 x i64>, ptr, <16 x i1>)
+declare void @llvm.masked.compressstore.v32i64(<32 x i64>, ptr, <32 x i1>)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
new file mode 100644
index 0000000..bd1209a17b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
@@ -0,0 +1,727 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+;
+; SABD
+;
+
+define <8 x i8> @sabd_8b_as_16b(<8 x i8> %a, <8 x i8> %b) {
+;
+; CHECK-LABEL: sabd_8b_as_16b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <8 x i8> %a to <8 x i16>
+ %b.sext = sext <8 x i8> %b to <8 x i16>
+ %sub = sub <8 x i16> %a.sext, %b.sext
+ %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+ %trunc = trunc <8 x i16> %abs to <8 x i8>
+ ret <8 x i8> %trunc
+}
+
+define <8 x i8> @sabd_8b_as_32b(<8 x i8> %a, <8 x i8> %b) {
+;
+; CHECK-LABEL: sabd_8b_as_32b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <8 x i8> %a to <8 x i32>
+ %b.sext = sext <8 x i8> %b to <8 x i32>
+ %sub = sub <8 x i32> %a.sext, %b.sext
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
+ %trunc = trunc <8 x i32> %abs to <8 x i8>
+ ret <8 x i8> %trunc
+}
+
+define <16 x i8> @sabd_16b(<16 x i8> %a, <16 x i8> %b) {
+;
+; CHECK-LABEL: sabd_16b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <16 x i8> %a to <16 x i16>
+ %b.sext = sext <16 x i8> %b to <16 x i16>
+ %sub = sub <16 x i16> %a.sext, %b.sext
+ %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
+ %trunc = trunc <16 x i16> %abs to <16 x i8>
+ ret <16 x i8> %trunc
+}
+
+define <4 x i16> @sabd_4h(<4 x i16> %a, <4 x i16> %b) {
+;
+; CHECK-LABEL: sabd_4h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <4 x i16> %a to <4 x i32>
+ %b.sext = sext <4 x i16> %b to <4 x i32>
+ %sub = sub <4 x i32> %a.sext, %b.sext
+ %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+ %trunc = trunc <4 x i32> %abs to <4 x i16>
+ ret <4 x i16> %trunc
+}
+
+define <4 x i16> @sabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) {
+;
+; CHECK-LABEL: sabd_4h_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.sext = sext <4 x i8> %a to <4 x i16>
+ %b.sext = sext <4 x i8> %b to <4 x i16>
+ %sub = sub <4 x i16> %a.sext, %b.sext
+ %abs = call <4 x i16> @llvm.abs.v4i16(<4 x i16> %sub, i1 true)
+ ret <4 x i16> %abs
+}
+
+define <8 x i16> @sabd_8h(<8 x i16> %a, <8 x i16> %b) {
+;
+; CHECK-LABEL: sabd_8h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <8 x i16> %a to <8 x i32>
+ %b.sext = sext <8 x i16> %b to <8 x i32>
+ %sub = sub <8 x i32> %a.sext, %b.sext
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
+ %trunc = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %trunc
+}
+
+define <8 x i16> @sabd_8h_promoted_ops(<8 x i8> %a, <8 x i8> %b) {
+;
+; CHECK-LABEL: sabd_8h_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.sext = sext <8 x i8> %a to <8 x i16>
+ %b.sext = sext <8 x i8> %b to <8 x i16>
+ %sub = sub <8 x i16> %a.sext, %b.sext
+ %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+ ret <8 x i16> %abs
+}
+
+define <2 x i32> @sabd_2s(<2 x i32> %a, <2 x i32> %b) {
+;
+; CHECK-LABEL: sabd_2s:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <2 x i32> %a to <2 x i64>
+ %b.sext = sext <2 x i32> %b to <2 x i64>
+ %sub = sub <2 x i64> %a.sext, %b.sext
+ %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+ %trunc = trunc <2 x i64> %abs to <2 x i32>
+ ret <2 x i32> %trunc
+}
+
+define <2 x i32> @sabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) {
+;
+; CHECK-LABEL: sabd_2s_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.sext = sext <2 x i16> %a to <2 x i32>
+ %b.sext = sext <2 x i16> %b to <2 x i32>
+ %sub = sub <2 x i32> %a.sext, %b.sext
+ %abs = call <2 x i32> @llvm.abs.v2i32(<2 x i32> %sub, i1 true)
+ ret <2 x i32> %abs
+}
+
+define <4 x i32> @sabd_4s(<4 x i32> %a, <4 x i32> %b) {
+;
+; CHECK-LABEL: sabd_4s:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <4 x i32> %a to <4 x i64>
+ %b.sext = sext <4 x i32> %b to <4 x i64>
+ %sub = sub <4 x i64> %a.sext, %b.sext
+ %abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
+ %trunc = trunc <4 x i64> %abs to <4 x i32>
+ ret <4 x i32> %trunc
+}
+
+define <4 x i32> @sabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) {
+;
+; CHECK-LABEL: sabd_4s_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.sext = sext <4 x i16> %a to <4 x i32>
+ %b.sext = sext <4 x i16> %b to <4 x i32>
+ %sub = sub <4 x i32> %a.sext, %b.sext
+ %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+ ret <4 x i32> %abs
+}
+
+define <2 x i64> @sabd_2d(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: sabd_2d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <2 x i64> %a to <2 x i128>
+ %b.sext = sext <2 x i64> %b to <2 x i128>
+ %sub = sub <2 x i128> %a.sext, %b.sext
+ %abs = call <2 x i128> @llvm.abs.v2i128(<2 x i128> %sub, i1 true)
+ %trunc = trunc <2 x i128> %abs to <2 x i64>
+ ret <2 x i64> %trunc
+}
+
+define <2 x i64> @sabd_2d_promoted_ops(<2 x i32> %a, <2 x i32> %b) {
+;
+; CHECK-LABEL: sabd_2d_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.sext = sext <2 x i32> %a to <2 x i64>
+ %b.sext = sext <2 x i32> %b to <2 x i64>
+ %sub = sub <2 x i64> %a.sext, %b.sext
+ %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+ ret <2 x i64> %abs
+}
+
+;
+; UABD
+;
+
+define <8 x i8> @uabd_8b(<8 x i8> %a, <8 x i8> %b) {
+;
+; CHECK-LABEL: uabd_8b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <8 x i8> %a to <8 x i16>
+ %b.zext = zext <8 x i8> %b to <8 x i16>
+ %sub = sub <8 x i16> %a.zext, %b.zext
+ %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+ %trunc = trunc <8 x i16> %abs to <8 x i8>
+ ret <8 x i8> %trunc
+}
+
+define <16 x i8> @uabd_16b(<16 x i8> %a, <16 x i8> %b) {
+;
+; CHECK-LABEL: uabd_16b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <16 x i8> %a to <16 x i16>
+ %b.zext = zext <16 x i8> %b to <16 x i16>
+ %sub = sub <16 x i16> %a.zext, %b.zext
+ %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
+ %trunc = trunc <16 x i16> %abs to <16 x i8>
+ ret <16 x i8> %trunc
+}
+
+define <4 x i16> @uabd_4h(<4 x i16> %a, <4 x i16> %b) {
+;
+; CHECK-LABEL: uabd_4h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <4 x i16> %a to <4 x i32>
+ %b.zext = zext <4 x i16> %b to <4 x i32>
+ %sub = sub <4 x i32> %a.zext, %b.zext
+ %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+ %trunc = trunc <4 x i32> %abs to <4 x i16>
+ ret <4 x i16> %trunc
+}
+
+define <4 x i16> @uabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) {
+;
+; CHECK-LABEL: uabd_4h_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.zext = zext <4 x i8> %a to <4 x i16>
+ %b.zext = zext <4 x i8> %b to <4 x i16>
+ %sub = sub <4 x i16> %a.zext, %b.zext
+ %abs = call <4 x i16> @llvm.abs.v4i16(<4 x i16> %sub, i1 true)
+ ret <4 x i16> %abs
+}
+
+define <8 x i16> @uabd_8h(<8 x i16> %a, <8 x i16> %b) {
+;
+; CHECK-LABEL: uabd_8h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <8 x i16> %a to <8 x i32>
+ %b.zext = zext <8 x i16> %b to <8 x i32>
+ %sub = sub <8 x i32> %a.zext, %b.zext
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
+ %trunc = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %trunc
+}
+
+define <8 x i16> @uabd_8h_promoted_ops(<8 x i8> %a, <8 x i8> %b) {
+;
+; CHECK-LABEL: uabd_8h_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.zext = zext <8 x i8> %a to <8 x i16>
+ %b.zext = zext <8 x i8> %b to <8 x i16>
+ %sub = sub <8 x i16> %a.zext, %b.zext
+ %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+ ret <8 x i16> %abs
+}
+
+define <2 x i32> @uabd_2s(<2 x i32> %a, <2 x i32> %b) {
+;
+; CHECK-LABEL: uabd_2s:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <2 x i32> %a to <2 x i64>
+ %b.zext = zext <2 x i32> %b to <2 x i64>
+ %sub = sub <2 x i64> %a.zext, %b.zext
+ %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+ %trunc = trunc <2 x i64> %abs to <2 x i32>
+ ret <2 x i32> %trunc
+}
+
+define <2 x i32> @uabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) {
+;
+; CHECK-LABEL: uabd_2s_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.zext = zext <2 x i16> %a to <2 x i32>
+ %b.zext = zext <2 x i16> %b to <2 x i32>
+ %sub = sub <2 x i32> %a.zext, %b.zext
+ %abs = call <2 x i32> @llvm.abs.v2i32(<2 x i32> %sub, i1 true)
+ ret <2 x i32> %abs
+}
+
+define <4 x i32> @uabd_4s(<4 x i32> %a, <4 x i32> %b) {
+;
+; CHECK-LABEL: uabd_4s:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <4 x i32> %a to <4 x i64>
+ %b.zext = zext <4 x i32> %b to <4 x i64>
+ %sub = sub <4 x i64> %a.zext, %b.zext
+ %abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
+ %trunc = trunc <4 x i64> %abs to <4 x i32>
+ ret <4 x i32> %trunc
+}
+
+define <4 x i32> @uabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) {
+;
+; CHECK-LABEL: uabd_4s_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.zext = zext <4 x i16> %a to <4 x i32>
+ %b.zext = zext <4 x i16> %b to <4 x i32>
+ %sub = sub <4 x i32> %a.zext, %b.zext
+ %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+ ret <4 x i32> %abs
+}
+
+define <2 x i64> @uabd_2d(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: uabd_2d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <2 x i64> %a to <2 x i128>
+ %b.zext = zext <2 x i64> %b to <2 x i128>
+ %sub = sub <2 x i128> %a.zext, %b.zext
+ %abs = call <2 x i128> @llvm.abs.v2i128(<2 x i128> %sub, i1 true)
+ %trunc = trunc <2 x i128> %abs to <2 x i64>
+ ret <2 x i64> %trunc
+}
+
+define <2 x i64> @uabd_2d_promoted_ops(<2 x i32> %a, <2 x i32> %b) {
+;
+; CHECK-LABEL: uabd_2d_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.zext = zext <2 x i32> %a to <2 x i64>
+ %b.zext = zext <2 x i32> %b to <2 x i64>
+ %sub = sub <2 x i64> %a.zext, %b.zext
+ %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+ ret <2 x i64> %abs
+}
+
+define <16 x i8> @uabd_v16i8_nuw(<16 x i8> %a, <16 x i8> %b) {
+;
+; CHECK-LABEL: uabd_v16i8_nuw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: vrsub.vi v9, v8, 0
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %sub = sub nuw <16 x i8> %a, %b
+ %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
+ ret <16 x i8> %abs
+}
+
+define <8 x i16> @uabd_v8i16_nuw(<8 x i16> %a, <8 x i16> %b) {
+;
+; CHECK-LABEL: uabd_v8i16_nuw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: vrsub.vi v9, v8, 0
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %sub = sub nuw <8 x i16> %a, %b
+ %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+ ret <8 x i16> %abs
+}
+
+define <4 x i32> @uabd_v4i32_nuw(<4 x i32> %a, <4 x i32> %b) {
+;
+; CHECK-LABEL: uabd_v4i32_nuw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: vrsub.vi v9, v8, 0
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %sub = sub nuw <4 x i32> %a, %b
+ %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+ ret <4 x i32> %abs
+}
+
+define <2 x i64> @uabd_v2i64_nuw(<2 x i64> %a, <2 x i64> %b) {
+;
+; CHECK-LABEL: uabd_v2i64_nuw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: vrsub.vi v9, v8, 0
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %sub = sub nuw <2 x i64> %a, %b
+ %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+ ret <2 x i64> %abs
+}
+
+define <16 x i8> @sabd_v16i8_nsw(<16 x i8> %a, <16 x i8> %b) {
+;
+; CHECK-LABEL: sabd_v16i8_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %sub = sub nsw <16 x i8> %a, %b
+ %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
+ ret <16 x i8> %abs
+}
+
+define <8 x i16> @sabd_v8i16_nsw(<8 x i16> %a, <8 x i16> %b) {
+;
+; CHECK-LABEL: sabd_v8i16_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %sub = sub nsw <8 x i16> %a, %b
+ %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+ ret <8 x i16> %abs
+}
+
+define <4 x i32> @sabd_v4i32_nsw(<4 x i32> %a, <4 x i32> %b) {
+;
+; CHECK-LABEL: sabd_v4i32_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %sub = sub nsw <4 x i32> %a, %b
+ %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+ ret <4 x i32> %abs
+}
+
+define <2 x i64> @sabd_v2i64_nsw(<2 x i64> %a, <2 x i64> %b) {
+;
+; CHECK-LABEL: sabd_v2i64_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %sub = sub nsw <2 x i64> %a, %b
+ %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+ ret <2 x i64> %abs
+}
+
+define <16 x i8> @smaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
+;
+; CHECK-LABEL: smaxmin_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <16 x i8> @llvm.smax.v16i8(<16 x i8> %0, <16 x i8> %1)
+ %b = tail call <16 x i8> @llvm.smin.v16i8(<16 x i8> %0, <16 x i8> %1)
+ %sub = sub <16 x i8> %a, %b
+ ret <16 x i8> %sub
+}
+
+define <8 x i16> @smaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
+;
+; CHECK-LABEL: smaxmin_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <8 x i16> @llvm.smax.v8i16(<8 x i16> %0, <8 x i16> %1)
+ %b = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> %0, <8 x i16> %1)
+ %sub = sub <8 x i16> %a, %b
+ ret <8 x i16> %sub
+}
+
+define <4 x i32> @smaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
+;
+; CHECK-LABEL: smaxmin_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> %0, <4 x i32> %1)
+ %b = tail call <4 x i32> @llvm.smin.v4i32(<4 x i32> %0, <4 x i32> %1)
+ %sub = sub <4 x i32> %a, %b
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @smaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
+;
+; CHECK-LABEL: smaxmin_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <2 x i64> @llvm.smax.v2i64(<2 x i64> %0, <2 x i64> %1)
+ %b = tail call <2 x i64> @llvm.smin.v2i64(<2 x i64> %0, <2 x i64> %1)
+ %sub = sub <2 x i64> %a, %b
+ ret <2 x i64> %sub
+}
+
+define <16 x i8> @umaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
+;
+; CHECK-LABEL: umaxmin_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <16 x i8> @llvm.umax.v16i8(<16 x i8> %0, <16 x i8> %1)
+ %b = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %0, <16 x i8> %1)
+ %sub = sub <16 x i8> %a, %b
+ ret <16 x i8> %sub
+}
+
+define <8 x i16> @umaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
+;
+; CHECK-LABEL: umaxmin_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <8 x i16> @llvm.umax.v8i16(<8 x i16> %0, <8 x i16> %1)
+ %b = tail call <8 x i16> @llvm.umin.v8i16(<8 x i16> %0, <8 x i16> %1)
+ %sub = sub <8 x i16> %a, %b
+ ret <8 x i16> %sub
+}
+
+define <4 x i32> @umaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
+;
+; CHECK-LABEL: umaxmin_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <4 x i32> @llvm.umax.v4i32(<4 x i32> %0, <4 x i32> %1)
+ %b = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %0, <4 x i32> %1)
+ %sub = sub <4 x i32> %a, %b
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @umaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
+;
+; CHECK-LABEL: umaxmin_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <2 x i64> @llvm.umax.v2i64(<2 x i64> %0, <2 x i64> %1)
+ %b = tail call <2 x i64> @llvm.umin.v2i64(<2 x i64> %0, <2 x i64> %1)
+ %sub = sub <2 x i64> %a, %b
+ ret <2 x i64> %sub
+}
+
+define <16 x i8> @umaxmin_v16i8_com1(<16 x i8> %0, <16 x i8> %1) {
+;
+; CHECK-LABEL: umaxmin_v16i8_com1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <16 x i8> @llvm.umax.v16i8(<16 x i8> %0, <16 x i8> %1)
+ %b = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %1, <16 x i8> %0)
+ %sub = sub <16 x i8> %a, %b
+ ret <16 x i8> %sub
+}
+
+declare <8 x i8> @llvm.abs.v8i8(<8 x i8>, i1)
+declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1)
+
+declare <4 x i16> @llvm.abs.v4i16(<4 x i16>, i1)
+declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1)
+declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)
+
+declare <2 x i32> @llvm.abs.v2i32(<2 x i32>, i1)
+declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1)
+declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1)
+
+declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1)
+declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)
+
+declare <2 x i128> @llvm.abs.v2i128(<2 x i128>, i1)
+
+declare <16 x i8> @llvm.smax.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.smax.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>)
+declare <16 x i8> @llvm.smin.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.smin.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>)
+declare <16 x i8> @llvm.umax.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.umax.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.umax.v2i64(<2 x i64>, <2 x i64>)
+declare <16 x i8> @llvm.umin.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.umin.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>)
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
index b7afee7..5252eb7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
@@ -416,8 +416,8 @@ define double @bitcast_v1i64_f64(<1 x i64> %a) {
; RV32ELEN32: # %bb.0:
; RV32ELEN32-NEXT: addi sp, sp, -16
; RV32ELEN32-NEXT: .cfi_def_cfa_offset 16
-; RV32ELEN32-NEXT: sw a1, 12(sp)
; RV32ELEN32-NEXT: sw a0, 8(sp)
+; RV32ELEN32-NEXT: sw a1, 12(sp)
; RV32ELEN32-NEXT: fld fa0, 8(sp)
; RV32ELEN32-NEXT: addi sp, sp, 16
; RV32ELEN32-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll
index 52c5292..36fbdd8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll
@@ -6,24 +6,20 @@ declare void @llvm.masked.compressstore.v1f16(<1 x half>, ptr, <1 x i1>)
define void @compressstore_v1f16(ptr %base, <1 x half> %v, <1 x i1> %mask) {
; RV32-LABEL: compressstore_v1f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV32-NEXT: vfirst.m a1, v0
-; RV32-NEXT: bnez a1, .LBB0_2
-; RV32-NEXT: # %bb.1: # %cond.store
; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; RV32-NEXT: vse16.v v8, (a0)
-; RV32-NEXT: .LBB0_2: # %else
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV32-NEXT: vse16.v v9, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v1f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV64-NEXT: vfirst.m a1, v0
-; RV64-NEXT: bnez a1, .LBB0_2
-; RV64-NEXT: # %bb.1: # %cond.store
; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; RV64-NEXT: vse16.v v8, (a0)
-; RV64-NEXT: .LBB0_2: # %else
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV64-NEXT: vse16.v v9, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v1f16(<1 x half> %v, ptr align 2 %base, <1 x i1> %mask)
ret void
@@ -33,48 +29,20 @@ declare void @llvm.masked.compressstore.v2f16(<2 x half>, ptr, <2 x i1>)
define void @compressstore_v2f16(ptr %base, <2 x half> %v, <2 x i1> %mask) {
; RV32-LABEL: compressstore_v2f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB1_3
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: bnez a1, .LBB1_4
-; RV32-NEXT: .LBB1_2: # %else2
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB1_3: # %cond.store
-; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; RV32-NEXT: vse16.v v8, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: beqz a1, .LBB1_2
-; RV32-NEXT: .LBB1_4: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vse16.v v8, (a0)
+; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV32-NEXT: vse16.v v9, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v2f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB1_3
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: bnez a1, .LBB1_4
-; RV64-NEXT: .LBB1_2: # %else2
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB1_3: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; RV64-NEXT: vse16.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: beqz a1, .LBB1_2
-; RV64-NEXT: .LBB1_4: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vse16.v v8, (a0)
+; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV64-NEXT: vse16.v v9, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v2f16(<2 x half> %v, ptr align 2 %base, <2 x i1> %mask)
ret void
@@ -84,88 +52,20 @@ declare void @llvm.masked.compressstore.v4f16(<4 x half>, ptr, <4 x i1>)
define void @compressstore_v4f16(ptr %base, <4 x half> %v, <4 x i1> %mask) {
; RV32-LABEL: compressstore_v4f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB2_5
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB2_6
-; RV32-NEXT: .LBB2_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB2_7
-; RV32-NEXT: .LBB2_3: # %else5
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: bnez a1, .LBB2_8
-; RV32-NEXT: .LBB2_4: # %else8
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB2_5: # %cond.store
-; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV32-NEXT: vse16.v v8, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB2_2
-; RV32-NEXT: .LBB2_6: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 1
-; RV32-NEXT: vse16.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB2_3
-; RV32-NEXT: .LBB2_7: # %cond.store4
-; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 2
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; RV32-NEXT: vse16.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: beqz a1, .LBB2_4
-; RV32-NEXT: .LBB2_8: # %cond.store7
-; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vse16.v v8, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v4f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB2_5
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB2_6
-; RV64-NEXT: .LBB2_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB2_7
-; RV64-NEXT: .LBB2_3: # %else5
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: bnez a1, .LBB2_8
-; RV64-NEXT: .LBB2_4: # %else8
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB2_5: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64-NEXT: vse16.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB2_2
-; RV64-NEXT: .LBB2_6: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 1
-; RV64-NEXT: vse16.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB2_3
-; RV64-NEXT: .LBB2_7: # %cond.store4
-; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 2
+; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; RV64-NEXT: vse16.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: beqz a1, .LBB2_4
-; RV64-NEXT: .LBB2_8: # %cond.store7
-; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vse16.v v8, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v4f16(<4 x half> %v, ptr align 2 %base, <4 x i1> %mask)
ret void
@@ -175,168 +75,20 @@ declare void @llvm.masked.compressstore.v8f16(<8 x half>, ptr, <8 x i1>)
define void @compressstore_v8f16(ptr %base, <8 x half> %v, <8 x i1> %mask) {
; RV32-LABEL: compressstore_v8f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB3_9
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB3_10
-; RV32-NEXT: .LBB3_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB3_11
-; RV32-NEXT: .LBB3_3: # %else5
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: bnez a2, .LBB3_12
-; RV32-NEXT: .LBB3_4: # %else8
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: bnez a2, .LBB3_13
-; RV32-NEXT: .LBB3_5: # %else11
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: bnez a2, .LBB3_14
-; RV32-NEXT: .LBB3_6: # %else14
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: bnez a2, .LBB3_15
-; RV32-NEXT: .LBB3_7: # %else17
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: bnez a1, .LBB3_16
-; RV32-NEXT: .LBB3_8: # %else20
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB3_9: # %cond.store
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vse16.v v8, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB3_2
-; RV32-NEXT: .LBB3_10: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 1
-; RV32-NEXT: vse16.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB3_3
-; RV32-NEXT: .LBB3_11: # %cond.store4
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 2
-; RV32-NEXT: vse16.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: beqz a2, .LBB3_4
-; RV32-NEXT: .LBB3_12: # %cond.store7
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 3
-; RV32-NEXT: vse16.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: beqz a2, .LBB3_5
-; RV32-NEXT: .LBB3_13: # %cond.store10
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 4
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV32-NEXT: vse16.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: beqz a2, .LBB3_6
-; RV32-NEXT: .LBB3_14: # %cond.store13
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 5
-; RV32-NEXT: vse16.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: beqz a2, .LBB3_7
-; RV32-NEXT: .LBB3_15: # %cond.store16
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 6
-; RV32-NEXT: vse16.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: beqz a1, .LBB3_8
-; RV32-NEXT: .LBB3_16: # %cond.store19
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vse16.v v8, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v8f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB3_9
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB3_10
-; RV64-NEXT: .LBB3_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB3_11
-; RV64-NEXT: .LBB3_3: # %else5
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: bnez a2, .LBB3_12
-; RV64-NEXT: .LBB3_4: # %else8
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: bnez a2, .LBB3_13
-; RV64-NEXT: .LBB3_5: # %else11
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: bnez a2, .LBB3_14
-; RV64-NEXT: .LBB3_6: # %else14
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: bnez a2, .LBB3_15
-; RV64-NEXT: .LBB3_7: # %else17
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: bnez a1, .LBB3_16
-; RV64-NEXT: .LBB3_8: # %else20
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB3_9: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vse16.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB3_2
-; RV64-NEXT: .LBB3_10: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 1
-; RV64-NEXT: vse16.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB3_3
-; RV64-NEXT: .LBB3_11: # %cond.store4
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 2
-; RV64-NEXT: vse16.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: beqz a2, .LBB3_4
-; RV64-NEXT: .LBB3_12: # %cond.store7
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 3
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV64-NEXT: vse16.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: beqz a2, .LBB3_5
-; RV64-NEXT: .LBB3_13: # %cond.store10
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 4
-; RV64-NEXT: vse16.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: beqz a2, .LBB3_6
-; RV64-NEXT: .LBB3_14: # %cond.store13
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 5
-; RV64-NEXT: vse16.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: beqz a2, .LBB3_7
-; RV64-NEXT: .LBB3_15: # %cond.store16
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 6
-; RV64-NEXT: vse16.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: beqz a1, .LBB3_8
-; RV64-NEXT: .LBB3_16: # %cond.store19
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vse16.v v8, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v8f16(<8 x half> %v, ptr align 2 %base, <8 x i1> %mask)
ret void
@@ -346,24 +98,20 @@ declare void @llvm.masked.compressstore.v1f32(<1 x float>, ptr, <1 x i1>)
define void @compressstore_v1f32(ptr %base, <1 x float> %v, <1 x i1> %mask) {
; RV32-LABEL: compressstore_v1f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV32-NEXT: vfirst.m a1, v0
-; RV32-NEXT: bnez a1, .LBB4_2
-; RV32-NEXT: # %bb.1: # %cond.store
; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; RV32-NEXT: vse32.v v8, (a0)
-; RV32-NEXT: .LBB4_2: # %else
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV32-NEXT: vse32.v v9, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v1f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV64-NEXT: vfirst.m a1, v0
-; RV64-NEXT: bnez a1, .LBB4_2
-; RV64-NEXT: # %bb.1: # %cond.store
; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; RV64-NEXT: vse32.v v8, (a0)
-; RV64-NEXT: .LBB4_2: # %else
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV64-NEXT: vse32.v v9, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v1f32(<1 x float> %v, ptr align 4 %base, <1 x i1> %mask)
ret void
@@ -373,48 +121,20 @@ declare void @llvm.masked.compressstore.v2f32(<2 x float>, ptr, <2 x i1>)
define void @compressstore_v2f32(ptr %base, <2 x float> %v, <2 x i1> %mask) {
; RV32-LABEL: compressstore_v2f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB5_3
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: bnez a1, .LBB5_4
-; RV32-NEXT: .LBB5_2: # %else2
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB5_3: # %cond.store
-; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; RV32-NEXT: vse32.v v8, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: beqz a1, .LBB5_2
-; RV32-NEXT: .LBB5_4: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vse32.v v8, (a0)
+; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV32-NEXT: vse32.v v9, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v2f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB5_3
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: bnez a1, .LBB5_4
-; RV64-NEXT: .LBB5_2: # %else2
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB5_3: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; RV64-NEXT: vse32.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: beqz a1, .LBB5_2
-; RV64-NEXT: .LBB5_4: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vse32.v v8, (a0)
+; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV64-NEXT: vse32.v v9, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v2f32(<2 x float> %v, ptr align 4 %base, <2 x i1> %mask)
ret void
@@ -424,88 +144,20 @@ declare void @llvm.masked.compressstore.v4f32(<4 x float>, ptr, <4 x i1>)
define void @compressstore_v4f32(ptr %base, <4 x float> %v, <4 x i1> %mask) {
; RV32-LABEL: compressstore_v4f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB6_5
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB6_6
-; RV32-NEXT: .LBB6_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB6_7
-; RV32-NEXT: .LBB6_3: # %else5
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: bnez a1, .LBB6_8
-; RV32-NEXT: .LBB6_4: # %else8
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB6_5: # %cond.store
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vse32.v v8, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB6_2
-; RV32-NEXT: .LBB6_6: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 1
-; RV32-NEXT: vse32.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB6_3
-; RV32-NEXT: .LBB6_7: # %cond.store4
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 2
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; RV32-NEXT: vse32.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: beqz a1, .LBB6_4
-; RV32-NEXT: .LBB6_8: # %cond.store7
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vse32.v v8, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v4f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB6_5
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB6_6
-; RV64-NEXT: .LBB6_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB6_7
-; RV64-NEXT: .LBB6_3: # %else5
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: bnez a1, .LBB6_8
-; RV64-NEXT: .LBB6_4: # %else8
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB6_5: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vse32.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB6_2
-; RV64-NEXT: .LBB6_6: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 1
-; RV64-NEXT: vse32.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB6_3
-; RV64-NEXT: .LBB6_7: # %cond.store4
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 2
+; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; RV64-NEXT: vse32.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: beqz a1, .LBB6_4
-; RV64-NEXT: .LBB6_8: # %cond.store7
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vse32.v v8, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v4f32(<4 x float> %v, ptr align 4 %base, <4 x i1> %mask)
ret void
@@ -515,176 +167,20 @@ declare void @llvm.masked.compressstore.v8f32(<8 x float>, ptr, <8 x i1>)
define void @compressstore_v8f32(ptr %base, <8 x float> %v, <8 x i1> %mask) {
; RV32-LABEL: compressstore_v8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB7_9
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB7_10
-; RV32-NEXT: .LBB7_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB7_11
-; RV32-NEXT: .LBB7_3: # %else5
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: bnez a2, .LBB7_12
-; RV32-NEXT: .LBB7_4: # %else8
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: bnez a2, .LBB7_13
-; RV32-NEXT: .LBB7_5: # %else11
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: bnez a2, .LBB7_14
-; RV32-NEXT: .LBB7_6: # %else14
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: bnez a2, .LBB7_15
-; RV32-NEXT: .LBB7_7: # %else17
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: bnez a1, .LBB7_16
-; RV32-NEXT: .LBB7_8: # %else20
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB7_9: # %cond.store
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vse32.v v8, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB7_2
-; RV32-NEXT: .LBB7_10: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 1
-; RV32-NEXT: vse32.v v10, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB7_3
-; RV32-NEXT: .LBB7_11: # %cond.store4
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 2
-; RV32-NEXT: vse32.v v10, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: beqz a2, .LBB7_4
-; RV32-NEXT: .LBB7_12: # %cond.store7
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 3
-; RV32-NEXT: vse32.v v10, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: beqz a2, .LBB7_5
-; RV32-NEXT: .LBB7_13: # %cond.store10
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 4
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vse32.v v10, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: beqz a2, .LBB7_6
-; RV32-NEXT: .LBB7_14: # %cond.store13
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 5
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vse32.v v10, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: beqz a2, .LBB7_7
-; RV32-NEXT: .LBB7_15: # %cond.store16
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 6
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vcompress.vm v10, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV32-NEXT: vse32.v v10, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: beqz a1, .LBB7_8
-; RV32-NEXT: .LBB7_16: # %cond.store19
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vse32.v v8, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB7_9
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB7_10
-; RV64-NEXT: .LBB7_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB7_11
-; RV64-NEXT: .LBB7_3: # %else5
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: bnez a2, .LBB7_12
-; RV64-NEXT: .LBB7_4: # %else8
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: bnez a2, .LBB7_13
-; RV64-NEXT: .LBB7_5: # %else11
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: bnez a2, .LBB7_14
-; RV64-NEXT: .LBB7_6: # %else14
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: bnez a2, .LBB7_15
-; RV64-NEXT: .LBB7_7: # %else17
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: bnez a1, .LBB7_16
-; RV64-NEXT: .LBB7_8: # %else20
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB7_9: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vse32.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB7_2
-; RV64-NEXT: .LBB7_10: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 1
-; RV64-NEXT: vse32.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB7_3
-; RV64-NEXT: .LBB7_11: # %cond.store4
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 2
-; RV64-NEXT: vse32.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: beqz a2, .LBB7_4
-; RV64-NEXT: .LBB7_12: # %cond.store7
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 3
-; RV64-NEXT: vse32.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: beqz a2, .LBB7_5
-; RV64-NEXT: .LBB7_13: # %cond.store10
-; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 4
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vse32.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: beqz a2, .LBB7_6
-; RV64-NEXT: .LBB7_14: # %cond.store13
-; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 5
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vse32.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: beqz a2, .LBB7_7
-; RV64-NEXT: .LBB7_15: # %cond.store16
-; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 6
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64-NEXT: vcompress.vm v10, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV64-NEXT: vse32.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: beqz a1, .LBB7_8
-; RV64-NEXT: .LBB7_16: # %cond.store19
-; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vse32.v v8, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v8f32(<8 x float> %v, ptr align 4 %base, <8 x i1> %mask)
ret void
@@ -694,24 +190,20 @@ declare void @llvm.masked.compressstore.v1f64(<1 x double>, ptr, <1 x i1>)
define void @compressstore_v1f64(ptr %base, <1 x double> %v, <1 x i1> %mask) {
; RV32-LABEL: compressstore_v1f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV32-NEXT: vfirst.m a1, v0
-; RV32-NEXT: bnez a1, .LBB8_2
-; RV32-NEXT: # %bb.1: # %cond.store
; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vse64.v v8, (a0)
-; RV32-NEXT: .LBB8_2: # %else
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV32-NEXT: vse64.v v9, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v1f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV64-NEXT: vfirst.m a1, v0
-; RV64-NEXT: bnez a1, .LBB8_2
-; RV64-NEXT: # %bb.1: # %cond.store
; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: .LBB8_2: # %else
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vse64.v v9, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v1f64(<1 x double> %v, ptr align 8 %base, <1 x i1> %mask)
ret void
@@ -721,48 +213,20 @@ declare void @llvm.masked.compressstore.v2f64(<2 x double>, ptr, <2 x i1>)
define void @compressstore_v2f64(ptr %base, <2 x double> %v, <2 x i1> %mask) {
; RV32-LABEL: compressstore_v2f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB9_3
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: bnez a1, .LBB9_4
-; RV32-NEXT: .LBB9_2: # %else2
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB9_3: # %cond.store
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vse64.v v8, (a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: beqz a1, .LBB9_2
-; RV32-NEXT: .LBB9_4: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vse64.v v8, (a0)
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV32-NEXT: vse64.v v9, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v2f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB9_3
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: bnez a1, .LBB9_4
-; RV64-NEXT: .LBB9_2: # %else2
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB9_3: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: beqz a1, .LBB9_2
-; RV64-NEXT: .LBB9_4: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vse64.v v8, (a0)
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vse64.v v9, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v2f64(<2 x double> %v, ptr align 8 %base, <2 x i1> %mask)
ret void
@@ -772,92 +236,20 @@ declare void @llvm.masked.compressstore.v4f64(<4 x double>, ptr, <4 x i1>)
define void @compressstore_v4f64(ptr %base, <4 x double> %v, <4 x i1> %mask) {
; RV32-LABEL: compressstore_v4f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB10_5
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB10_6
-; RV32-NEXT: .LBB10_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB10_7
-; RV32-NEXT: .LBB10_3: # %else5
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: bnez a1, .LBB10_8
-; RV32-NEXT: .LBB10_4: # %else8
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB10_5: # %cond.store
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vse64.v v8, (a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB10_2
-; RV32-NEXT: .LBB10_6: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 1
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vcompress.vm v10, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV32-NEXT: vse64.v v10, (a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB10_3
-; RV32-NEXT: .LBB10_7: # %cond.store4
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 2
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vse64.v v10, (a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: beqz a1, .LBB10_4
-; RV32-NEXT: .LBB10_8: # %cond.store7
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vse64.v v8, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v4f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB10_5
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB10_6
-; RV64-NEXT: .LBB10_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB10_7
-; RV64-NEXT: .LBB10_3: # %else5
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: bnez a1, .LBB10_8
-; RV64-NEXT: .LBB10_4: # %else8
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB10_5: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB10_2
-; RV64-NEXT: .LBB10_6: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 1
-; RV64-NEXT: vse64.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB10_3
-; RV64-NEXT: .LBB10_7: # %cond.store4
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 2
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vcompress.vm v10, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vse64.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: beqz a1, .LBB10_4
-; RV64-NEXT: .LBB10_8: # %cond.store7
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v4f64(<4 x double> %v, ptr align 8 %base, <4 x i1> %mask)
ret void
@@ -867,213 +259,21 @@ declare void @llvm.masked.compressstore.v8f64(<8 x double>, ptr, <8 x i1>)
define void @compressstore_v8f64(ptr %base, <8 x double> %v, <8 x i1> %mask) {
; RV32-LABEL: compressstore_v8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB11_11
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB11_12
-; RV32-NEXT: .LBB11_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB11_13
-; RV32-NEXT: .LBB11_3: # %else5
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: beqz a2, .LBB11_5
-; RV32-NEXT: .LBB11_4: # %cond.store7
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 3
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vse64.v v12, (a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: .LBB11_5: # %else8
-; RV32-NEXT: addi sp, sp, -320
-; RV32-NEXT: .cfi_def_cfa_offset 320
-; RV32-NEXT: sw ra, 316(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 312(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: addi s0, sp, 320
-; RV32-NEXT: .cfi_def_cfa s0, 0
-; RV32-NEXT: andi sp, sp, -64
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: bnez a2, .LBB11_14
-; RV32-NEXT: # %bb.6: # %else11
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: bnez a2, .LBB11_15
-; RV32-NEXT: .LBB11_7: # %else14
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: bnez a2, .LBB11_16
-; RV32-NEXT: .LBB11_8: # %else17
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: beqz a1, .LBB11_10
-; RV32-NEXT: .LBB11_9: # %cond.store19
-; RV32-NEXT: mv a1, sp
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vse64.v v8, (a1)
-; RV32-NEXT: fld fa5, 56(sp)
-; RV32-NEXT: fsd fa5, 0(a0)
-; RV32-NEXT: .LBB11_10: # %else20
-; RV32-NEXT: addi sp, s0, -320
-; RV32-NEXT: lw ra, 316(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 312(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 320
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB11_11: # %cond.store
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vse64.v v8, (a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB11_2
-; RV32-NEXT: .LBB11_12: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 1
+; RV32-NEXT: vcompress.vm v12, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vse64.v v12, (a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB11_3
-; RV32-NEXT: .LBB11_13: # %cond.store4
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 2
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vse64.v v12, (a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: bnez a2, .LBB11_4
-; RV32-NEXT: j .LBB11_5
-; RV32-NEXT: .LBB11_14: # %cond.store10
-; RV32-NEXT: addi a2, sp, 192
-; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vse64.v v8, (a2)
-; RV32-NEXT: fld fa5, 224(sp)
-; RV32-NEXT: fsd fa5, 0(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: beqz a2, .LBB11_7
-; RV32-NEXT: .LBB11_15: # %cond.store13
-; RV32-NEXT: addi a2, sp, 128
-; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vse64.v v8, (a2)
-; RV32-NEXT: fld fa5, 168(sp)
-; RV32-NEXT: fsd fa5, 0(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: beqz a2, .LBB11_8
-; RV32-NEXT: .LBB11_16: # %cond.store16
-; RV32-NEXT: addi a2, sp, 64
-; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vse64.v v8, (a2)
-; RV32-NEXT: fld fa5, 112(sp)
-; RV32-NEXT: fsd fa5, 0(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: bnez a1, .LBB11_9
-; RV32-NEXT: j .LBB11_10
+; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB11_11
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB11_12
-; RV64-NEXT: .LBB11_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB11_13
-; RV64-NEXT: .LBB11_3: # %else5
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: beqz a2, .LBB11_5
-; RV64-NEXT: .LBB11_4: # %cond.store7
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v12, v8, 3
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v12, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: .LBB11_5: # %else8
-; RV64-NEXT: addi sp, sp, -320
-; RV64-NEXT: .cfi_def_cfa_offset 320
-; RV64-NEXT: sd ra, 312(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 304(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: addi s0, sp, 320
-; RV64-NEXT: .cfi_def_cfa s0, 0
-; RV64-NEXT: andi sp, sp, -64
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: bnez a2, .LBB11_14
-; RV64-NEXT: # %bb.6: # %else11
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: bnez a2, .LBB11_15
-; RV64-NEXT: .LBB11_7: # %else14
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: bnez a2, .LBB11_16
-; RV64-NEXT: .LBB11_8: # %else17
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: beqz a1, .LBB11_10
-; RV64-NEXT: .LBB11_9: # %cond.store19
-; RV64-NEXT: mv a1, sp
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vse64.v v8, (a1)
-; RV64-NEXT: fld fa5, 56(sp)
-; RV64-NEXT: fsd fa5, 0(a0)
-; RV64-NEXT: .LBB11_10: # %else20
-; RV64-NEXT: addi sp, s0, -320
-; RV64-NEXT: ld ra, 312(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 304(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 320
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB11_11: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB11_2
-; RV64-NEXT: .LBB11_12: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v12, v8, 1
+; RV64-NEXT: vcompress.vm v12, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vse64.v v12, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB11_3
-; RV64-NEXT: .LBB11_13: # %cond.store4
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v12, v8, 2
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v12, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: bnez a2, .LBB11_4
-; RV64-NEXT: j .LBB11_5
-; RV64-NEXT: .LBB11_14: # %cond.store10
-; RV64-NEXT: addi a2, sp, 192
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vse64.v v8, (a2)
-; RV64-NEXT: fld fa5, 224(sp)
-; RV64-NEXT: fsd fa5, 0(a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: beqz a2, .LBB11_7
-; RV64-NEXT: .LBB11_15: # %cond.store13
-; RV64-NEXT: addi a2, sp, 128
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vse64.v v8, (a2)
-; RV64-NEXT: fld fa5, 168(sp)
-; RV64-NEXT: fsd fa5, 0(a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: beqz a2, .LBB11_8
-; RV64-NEXT: .LBB11_16: # %cond.store16
-; RV64-NEXT: addi a2, sp, 64
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vse64.v v8, (a2)
-; RV64-NEXT: fld fa5, 112(sp)
-; RV64-NEXT: fsd fa5, 0(a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: bnez a1, .LBB11_9
-; RV64-NEXT: j .LBB11_10
+; RV64-NEXT: ret
call void @llvm.masked.compressstore.v8f64(<8 x double> %v, ptr align 8 %base, <8 x i1> %mask)
ret void
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll
index eb0096d..a388ba9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll
@@ -6,13 +6,11 @@ declare void @llvm.masked.compressstore.v1i8(<1 x i8>, ptr, <1 x i1>)
define void @compressstore_v1i8(ptr %base, <1 x i8> %v, <1 x i1> %mask) {
; CHECK-LABEL: compressstore_v1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vfirst.m a1, v0
-; CHECK-NEXT: bnez a1, .LBB0_2
-; CHECK-NEXT: # %bb.1: # %cond.store
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vse8.v v8, (a0)
-; CHECK-NEXT: .LBB0_2: # %else
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vse8.v v9, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v1i8(<1 x i8> %v, ptr %base, <1 x i1> %mask)
ret void
@@ -22,25 +20,11 @@ declare void @llvm.masked.compressstore.v2i8(<2 x i8>, ptr, <2 x i1>)
define void @compressstore_v2i8(ptr %base, <2 x i8> %v, <2 x i1> %mask) {
; CHECK-LABEL: compressstore_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB1_3
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a1, a1, 2
-; CHECK-NEXT: bnez a1, .LBB1_4
-; CHECK-NEXT: .LBB1_2: # %else2
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB1_3: # %cond.store
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vse8.v v8, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a1, a1, 2
-; CHECK-NEXT: beqz a1, .LBB1_2
-; CHECK-NEXT: .LBB1_4: # %cond.store1
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vse8.v v9, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v2i8(<2 x i8> %v, ptr %base, <2 x i1> %mask)
ret void
@@ -50,45 +34,11 @@ declare void @llvm.masked.compressstore.v4i8(<4 x i8>, ptr, <4 x i1>)
define void @compressstore_v4i8(ptr %base, <4 x i8> %v, <4 x i1> %mask) {
; CHECK-LABEL: compressstore_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB2_5
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: bnez a2, .LBB2_6
-; CHECK-NEXT: .LBB2_2: # %else2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: bnez a2, .LBB2_7
-; CHECK-NEXT: .LBB2_3: # %else5
-; CHECK-NEXT: andi a1, a1, 8
-; CHECK-NEXT: bnez a1, .LBB2_8
-; CHECK-NEXT: .LBB2_4: # %else8
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB2_5: # %cond.store
-; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; CHECK-NEXT: vse8.v v8, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: beqz a2, .LBB2_2
-; CHECK-NEXT: .LBB2_6: # %cond.store1
-; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vse8.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: beqz a2, .LBB2_3
-; CHECK-NEXT: .LBB2_7: # %cond.store4
-; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 2
-; CHECK-NEXT: vse8.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a1, a1, 8
-; CHECK-NEXT: beqz a1, .LBB2_4
-; CHECK-NEXT: .LBB2_8: # %cond.store7
-; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 3
-; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v4i8(<4 x i8> %v, ptr %base, <4 x i1> %mask)
ret void
@@ -98,85 +48,11 @@ declare void @llvm.masked.compressstore.v8i8(<8 x i8>, ptr, <8 x i1>)
define void @compressstore_v8i8(ptr %base, <8 x i8> %v, <8 x i1> %mask) {
; CHECK-LABEL: compressstore_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB3_9
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: bnez a2, .LBB3_10
-; CHECK-NEXT: .LBB3_2: # %else2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: bnez a2, .LBB3_11
-; CHECK-NEXT: .LBB3_3: # %else5
-; CHECK-NEXT: andi a2, a1, 8
-; CHECK-NEXT: bnez a2, .LBB3_12
-; CHECK-NEXT: .LBB3_4: # %else8
-; CHECK-NEXT: andi a2, a1, 16
-; CHECK-NEXT: bnez a2, .LBB3_13
-; CHECK-NEXT: .LBB3_5: # %else11
-; CHECK-NEXT: andi a2, a1, 32
-; CHECK-NEXT: bnez a2, .LBB3_14
-; CHECK-NEXT: .LBB3_6: # %else14
-; CHECK-NEXT: andi a2, a1, 64
-; CHECK-NEXT: bnez a2, .LBB3_15
-; CHECK-NEXT: .LBB3_7: # %else17
-; CHECK-NEXT: andi a1, a1, -128
-; CHECK-NEXT: bnez a1, .LBB3_16
-; CHECK-NEXT: .LBB3_8: # %else20
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB3_9: # %cond.store
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-NEXT: vse8.v v8, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: beqz a2, .LBB3_2
-; CHECK-NEXT: .LBB3_10: # %cond.store1
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
-; CHECK-NEXT: vse8.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: beqz a2, .LBB3_3
-; CHECK-NEXT: .LBB3_11: # %cond.store4
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 2
-; CHECK-NEXT: vse8.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 8
-; CHECK-NEXT: beqz a2, .LBB3_4
-; CHECK-NEXT: .LBB3_12: # %cond.store7
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 3
-; CHECK-NEXT: vse8.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 16
-; CHECK-NEXT: beqz a2, .LBB3_5
-; CHECK-NEXT: .LBB3_13: # %cond.store10
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 4
-; CHECK-NEXT: vse8.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 32
-; CHECK-NEXT: beqz a2, .LBB3_6
-; CHECK-NEXT: .LBB3_14: # %cond.store13
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 5
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vse8.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 64
-; CHECK-NEXT: beqz a2, .LBB3_7
-; CHECK-NEXT: .LBB3_15: # %cond.store16
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 6
-; CHECK-NEXT: vse8.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a1, a1, -128
-; CHECK-NEXT: beqz a1, .LBB3_8
-; CHECK-NEXT: .LBB3_16: # %cond.store19
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 7
-; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v8i8(<8 x i8> %v, ptr %base, <8 x i1> %mask)
ret void
@@ -186,13 +62,11 @@ declare void @llvm.masked.compressstore.v1i16(<1 x i16>, ptr, <1 x i1>)
define void @compressstore_v1i16(ptr %base, <1 x i16> %v, <1 x i1> %mask) {
; CHECK-LABEL: compressstore_v1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vfirst.m a1, v0
-; CHECK-NEXT: bnez a1, .LBB4_2
-; CHECK-NEXT: # %bb.1: # %cond.store
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT: vse16.v v8, (a0)
-; CHECK-NEXT: .LBB4_2: # %else
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vse16.v v9, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v1i16(<1 x i16> %v, ptr align 2 %base, <1 x i1> %mask)
ret void
@@ -202,25 +76,11 @@ declare void @llvm.masked.compressstore.v2i16(<2 x i16>, ptr, <2 x i1>)
define void @compressstore_v2i16(ptr %base, <2 x i16> %v, <2 x i1> %mask) {
; CHECK-LABEL: compressstore_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB5_3
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a1, a1, 2
-; CHECK-NEXT: bnez a1, .LBB5_4
-; CHECK-NEXT: .LBB5_2: # %else2
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB5_3: # %cond.store
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT: vse16.v v8, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a1, a1, 2
-; CHECK-NEXT: beqz a1, .LBB5_2
-; CHECK-NEXT: .LBB5_4: # %cond.store1
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vse16.v v9, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v2i16(<2 x i16> %v, ptr align 2 %base, <2 x i1> %mask)
ret void
@@ -230,45 +90,11 @@ declare void @llvm.masked.compressstore.v4i16(<4 x i16>, ptr, <4 x i1>)
define void @compressstore_v4i16(ptr %base, <4 x i16> %v, <4 x i1> %mask) {
; CHECK-LABEL: compressstore_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB6_5
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: bnez a2, .LBB6_6
-; CHECK-NEXT: .LBB6_2: # %else2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: bnez a2, .LBB6_7
-; CHECK-NEXT: .LBB6_3: # %else5
-; CHECK-NEXT: andi a1, a1, 8
-; CHECK-NEXT: bnez a1, .LBB6_8
-; CHECK-NEXT: .LBB6_4: # %else8
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB6_5: # %cond.store
-; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; CHECK-NEXT: vse16.v v8, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: beqz a2, .LBB6_2
-; CHECK-NEXT: .LBB6_6: # %cond.store1
-; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
-; CHECK-NEXT: vse16.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: beqz a2, .LBB6_3
-; CHECK-NEXT: .LBB6_7: # %cond.store4
-; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 2
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vse16.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a1, a1, 8
-; CHECK-NEXT: beqz a1, .LBB6_4
-; CHECK-NEXT: .LBB6_8: # %cond.store7
-; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 3
-; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v4i16(<4 x i16> %v, ptr align 2 %base, <4 x i1> %mask)
ret void
@@ -278,85 +104,11 @@ declare void @llvm.masked.compressstore.v8i16(<8 x i16>, ptr, <8 x i1>)
define void @compressstore_v8i16(ptr %base, <8 x i16> %v, <8 x i1> %mask) {
; CHECK-LABEL: compressstore_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB7_9
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: bnez a2, .LBB7_10
-; CHECK-NEXT: .LBB7_2: # %else2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: bnez a2, .LBB7_11
-; CHECK-NEXT: .LBB7_3: # %else5
-; CHECK-NEXT: andi a2, a1, 8
-; CHECK-NEXT: bnez a2, .LBB7_12
-; CHECK-NEXT: .LBB7_4: # %else8
-; CHECK-NEXT: andi a2, a1, 16
-; CHECK-NEXT: bnez a2, .LBB7_13
-; CHECK-NEXT: .LBB7_5: # %else11
-; CHECK-NEXT: andi a2, a1, 32
-; CHECK-NEXT: bnez a2, .LBB7_14
-; CHECK-NEXT: .LBB7_6: # %else14
-; CHECK-NEXT: andi a2, a1, 64
-; CHECK-NEXT: bnez a2, .LBB7_15
-; CHECK-NEXT: .LBB7_7: # %else17
-; CHECK-NEXT: andi a1, a1, -128
-; CHECK-NEXT: bnez a1, .LBB7_16
-; CHECK-NEXT: .LBB7_8: # %else20
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB7_9: # %cond.store
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; CHECK-NEXT: vse16.v v8, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: beqz a2, .LBB7_2
-; CHECK-NEXT: .LBB7_10: # %cond.store1
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
-; CHECK-NEXT: vse16.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: beqz a2, .LBB7_3
-; CHECK-NEXT: .LBB7_11: # %cond.store4
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 2
-; CHECK-NEXT: vse16.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 8
-; CHECK-NEXT: beqz a2, .LBB7_4
-; CHECK-NEXT: .LBB7_12: # %cond.store7
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 3
-; CHECK-NEXT: vse16.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 16
-; CHECK-NEXT: beqz a2, .LBB7_5
-; CHECK-NEXT: .LBB7_13: # %cond.store10
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 4
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vse16.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 32
-; CHECK-NEXT: beqz a2, .LBB7_6
-; CHECK-NEXT: .LBB7_14: # %cond.store13
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 5
-; CHECK-NEXT: vse16.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 64
-; CHECK-NEXT: beqz a2, .LBB7_7
-; CHECK-NEXT: .LBB7_15: # %cond.store16
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 6
-; CHECK-NEXT: vse16.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a1, a1, -128
-; CHECK-NEXT: beqz a1, .LBB7_8
-; CHECK-NEXT: .LBB7_16: # %cond.store19
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 7
-; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v8i16(<8 x i16> %v, ptr align 2 %base, <8 x i1> %mask)
ret void
@@ -366,13 +118,11 @@ declare void @llvm.masked.compressstore.v1i32(<1 x i32>, ptr, <1 x i1>)
define void @compressstore_v1i32(ptr %base, <1 x i32> %v, <1 x i1> %mask) {
; CHECK-LABEL: compressstore_v1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vfirst.m a1, v0
-; CHECK-NEXT: bnez a1, .LBB8_2
-; CHECK-NEXT: # %bb.1: # %cond.store
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vse32.v v8, (a0)
-; CHECK-NEXT: .LBB8_2: # %else
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vse32.v v9, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v1i32(<1 x i32> %v, ptr align 4 %base, <1 x i1> %mask)
ret void
@@ -382,25 +132,11 @@ declare void @llvm.masked.compressstore.v2i32(<2 x i32>, ptr, <2 x i1>)
define void @compressstore_v2i32(ptr %base, <2 x i32> %v, <2 x i1> %mask) {
; CHECK-LABEL: compressstore_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB9_3
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a1, a1, 2
-; CHECK-NEXT: bnez a1, .LBB9_4
-; CHECK-NEXT: .LBB9_2: # %else2
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB9_3: # %cond.store
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vse32.v v8, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a1, a1, 2
-; CHECK-NEXT: beqz a1, .LBB9_2
-; CHECK-NEXT: .LBB9_4: # %cond.store1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vse32.v v9, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v2i32(<2 x i32> %v, ptr align 4 %base, <2 x i1> %mask)
ret void
@@ -410,45 +146,11 @@ declare void @llvm.masked.compressstore.v4i32(<4 x i32>, ptr, <4 x i1>)
define void @compressstore_v4i32(ptr %base, <4 x i32> %v, <4 x i1> %mask) {
; CHECK-LABEL: compressstore_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB10_5
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: bnez a2, .LBB10_6
-; CHECK-NEXT: .LBB10_2: # %else2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: bnez a2, .LBB10_7
-; CHECK-NEXT: .LBB10_3: # %else5
-; CHECK-NEXT: andi a1, a1, 8
-; CHECK-NEXT: bnez a1, .LBB10_8
-; CHECK-NEXT: .LBB10_4: # %else8
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB10_5: # %cond.store
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vse32.v v8, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: beqz a2, .LBB10_2
-; CHECK-NEXT: .LBB10_6: # %cond.store1
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
-; CHECK-NEXT: vse32.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: beqz a2, .LBB10_3
-; CHECK-NEXT: .LBB10_7: # %cond.store4
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 2
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a1, a1, 8
-; CHECK-NEXT: beqz a1, .LBB10_4
-; CHECK-NEXT: .LBB10_8: # %cond.store7
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 3
-; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v4i32(<4 x i32> %v, ptr align 4 %base, <4 x i1> %mask)
ret void
@@ -458,89 +160,11 @@ declare void @llvm.masked.compressstore.v8i32(<8 x i32>, ptr, <8 x i1>)
define void @compressstore_v8i32(ptr %base, <8 x i32> %v, <8 x i1> %mask) {
; CHECK-LABEL: compressstore_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB11_9
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: bnez a2, .LBB11_10
-; CHECK-NEXT: .LBB11_2: # %else2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: bnez a2, .LBB11_11
-; CHECK-NEXT: .LBB11_3: # %else5
-; CHECK-NEXT: andi a2, a1, 8
-; CHECK-NEXT: bnez a2, .LBB11_12
-; CHECK-NEXT: .LBB11_4: # %else8
-; CHECK-NEXT: andi a2, a1, 16
-; CHECK-NEXT: bnez a2, .LBB11_13
-; CHECK-NEXT: .LBB11_5: # %else11
-; CHECK-NEXT: andi a2, a1, 32
-; CHECK-NEXT: bnez a2, .LBB11_14
-; CHECK-NEXT: .LBB11_6: # %else14
-; CHECK-NEXT: andi a2, a1, 64
-; CHECK-NEXT: bnez a2, .LBB11_15
-; CHECK-NEXT: .LBB11_7: # %else17
-; CHECK-NEXT: andi a1, a1, -128
-; CHECK-NEXT: bnez a1, .LBB11_16
-; CHECK-NEXT: .LBB11_8: # %else20
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB11_9: # %cond.store
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vse32.v v8, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: beqz a2, .LBB11_2
-; CHECK-NEXT: .LBB11_10: # %cond.store1
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 1
-; CHECK-NEXT: vse32.v v10, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: beqz a2, .LBB11_3
-; CHECK-NEXT: .LBB11_11: # %cond.store4
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 2
-; CHECK-NEXT: vse32.v v10, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 8
-; CHECK-NEXT: beqz a2, .LBB11_4
-; CHECK-NEXT: .LBB11_12: # %cond.store7
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 3
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vcompress.vm v10, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vse32.v v10, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 16
-; CHECK-NEXT: beqz a2, .LBB11_5
-; CHECK-NEXT: .LBB11_13: # %cond.store10
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 4
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vse32.v v10, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 32
-; CHECK-NEXT: beqz a2, .LBB11_6
-; CHECK-NEXT: .LBB11_14: # %cond.store13
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 5
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vse32.v v10, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 64
-; CHECK-NEXT: beqz a2, .LBB11_7
-; CHECK-NEXT: .LBB11_15: # %cond.store16
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 6
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vse32.v v10, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a1, a1, -128
-; CHECK-NEXT: beqz a1, .LBB11_8
-; CHECK-NEXT: .LBB11_16: # %cond.store19
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 7
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v8i32(<8 x i32> %v, ptr align 4 %base, <8 x i1> %mask)
ret void
@@ -548,439 +172,59 @@ define void @compressstore_v8i32(ptr %base, <8 x i32> %v, <8 x i1> %mask) {
declare void @llvm.masked.compressstore.v1i64(<1 x i64>, ptr, <1 x i1>)
define void @compressstore_v1i64(ptr %base, <1 x i64> %v, <1 x i1> %mask) {
-; RV32-LABEL: compressstore_v1i64:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV32-NEXT: vfirst.m a1, v0
-; RV32-NEXT: bnez a1, .LBB12_2
-; RV32-NEXT: # %bb.1: # %cond.store
-; RV32-NEXT: li a1, 32
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vsrl.vx v9, v8, a1
-; RV32-NEXT: vmv.x.s a1, v9
-; RV32-NEXT: vmv.x.s a2, v8
-; RV32-NEXT: sw a2, 0(a0)
-; RV32-NEXT: sw a1, 4(a0)
-; RV32-NEXT: .LBB12_2: # %else
-; RV32-NEXT: ret
-;
-; RV64-LABEL: compressstore_v1i64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV64-NEXT: vfirst.m a1, v0
-; RV64-NEXT: bnez a1, .LBB12_2
-; RV64-NEXT: # %bb.1: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: .LBB12_2: # %else
-; RV64-NEXT: ret
+; CHECK-LABEL: compressstore_v1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vse64.v v9, (a0)
+; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v1i64(<1 x i64> %v, ptr align 8 %base, <1 x i1> %mask)
ret void
}
declare void @llvm.masked.compressstore.v2i64(<2 x i64>, ptr, <2 x i1>)
define void @compressstore_v2i64(ptr %base, <2 x i64> %v, <2 x i1> %mask) {
-; RV32-LABEL: compressstore_v2i64:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB13_3
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: bnez a1, .LBB13_4
-; RV32-NEXT: .LBB13_2: # %else2
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB13_3: # %cond.store
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vsrl.vx v9, v8, a2
-; RV32-NEXT: vmv.x.s a2, v9
-; RV32-NEXT: vmv.x.s a3, v8
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: beqz a1, .LBB13_2
-; RV32-NEXT: .LBB13_4: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: li a1, 32
-; RV32-NEXT: vsrl.vx v9, v8, a1
-; RV32-NEXT: vmv.x.s a1, v9
-; RV32-NEXT: vmv.x.s a2, v8
-; RV32-NEXT: sw a2, 0(a0)
-; RV32-NEXT: sw a1, 4(a0)
-; RV32-NEXT: ret
-;
-; RV64-LABEL: compressstore_v2i64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB13_3
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: bnez a1, .LBB13_4
-; RV64-NEXT: .LBB13_2: # %else2
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB13_3: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: beqz a1, .LBB13_2
-; RV64-NEXT: .LBB13_4: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: ret
+; CHECK-LABEL: compressstore_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vse64.v v9, (a0)
+; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v2i64(<2 x i64> %v, ptr align 8 %base, <2 x i1> %mask)
ret void
}
declare void @llvm.masked.compressstore.v4i64(<4 x i64>, ptr, <4 x i1>)
define void @compressstore_v4i64(ptr %base, <4 x i64> %v, <4 x i1> %mask) {
-; RV32-LABEL: compressstore_v4i64:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB14_5
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB14_6
-; RV32-NEXT: .LBB14_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB14_7
-; RV32-NEXT: .LBB14_3: # %else5
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: bnez a1, .LBB14_8
-; RV32-NEXT: .LBB14_4: # %else8
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB14_5: # %cond.store
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vsrl.vx v10, v8, a2
-; RV32-NEXT: vmv.x.s a2, v10
-; RV32-NEXT: vmv.x.s a3, v8
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB14_2
-; RV32-NEXT: .LBB14_6: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 1
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v12, v10, a2
-; RV32-NEXT: vmv.x.s a2, v12
-; RV32-NEXT: vmv.x.s a3, v10
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB14_3
-; RV32-NEXT: .LBB14_7: # %cond.store4
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 2
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v12, v10, a2
-; RV32-NEXT: vmv.x.s a2, v12
-; RV32-NEXT: vmv.x.s a3, v10
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: beqz a1, .LBB14_4
-; RV32-NEXT: .LBB14_8: # %cond.store7
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: li a1, 32
-; RV32-NEXT: vsrl.vx v10, v8, a1
-; RV32-NEXT: vmv.x.s a1, v10
-; RV32-NEXT: vmv.x.s a2, v8
-; RV32-NEXT: sw a2, 0(a0)
-; RV32-NEXT: sw a1, 4(a0)
-; RV32-NEXT: ret
-;
-; RV64-LABEL: compressstore_v4i64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB14_5
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB14_6
-; RV64-NEXT: .LBB14_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB14_7
-; RV64-NEXT: .LBB14_3: # %else5
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: bnez a1, .LBB14_8
-; RV64-NEXT: .LBB14_4: # %else8
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB14_5: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB14_2
-; RV64-NEXT: .LBB14_6: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 1
-; RV64-NEXT: vse64.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB14_3
-; RV64-NEXT: .LBB14_7: # %cond.store4
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 2
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: beqz a1, .LBB14_4
-; RV64-NEXT: .LBB14_8: # %cond.store7
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: ret
+; CHECK-LABEL: compressstore_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vcompress.vm v10, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vse64.v v10, (a0)
+; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v4i64(<4 x i64> %v, ptr align 8 %base, <4 x i1> %mask)
ret void
}
declare void @llvm.masked.compressstore.v8i64(<8 x i64>, ptr, <8 x i1>)
define void @compressstore_v8i64(ptr %base, <8 x i64> %v, <8 x i1> %mask) {
-; RV32-LABEL: compressstore_v8i64:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB15_9
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB15_10
-; RV32-NEXT: .LBB15_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB15_11
-; RV32-NEXT: .LBB15_3: # %else5
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: bnez a2, .LBB15_12
-; RV32-NEXT: .LBB15_4: # %else8
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: bnez a2, .LBB15_13
-; RV32-NEXT: .LBB15_5: # %else11
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: bnez a2, .LBB15_14
-; RV32-NEXT: .LBB15_6: # %else14
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: bnez a2, .LBB15_15
-; RV32-NEXT: .LBB15_7: # %else17
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: bnez a1, .LBB15_16
-; RV32-NEXT: .LBB15_8: # %else20
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB15_9: # %cond.store
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vsrl.vx v12, v8, a2
-; RV32-NEXT: vmv.x.s a2, v12
-; RV32-NEXT: vmv.x.s a3, v8
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB15_2
-; RV32-NEXT: .LBB15_10: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 1
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v16, v12, a2
-; RV32-NEXT: vmv.x.s a2, v16
-; RV32-NEXT: vmv.x.s a3, v12
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB15_3
-; RV32-NEXT: .LBB15_11: # %cond.store4
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 2
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v16, v12, a2
-; RV32-NEXT: vmv.x.s a2, v16
-; RV32-NEXT: vmv.x.s a3, v12
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: beqz a2, .LBB15_4
-; RV32-NEXT: .LBB15_12: # %cond.store7
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 3
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v16, v12, a2
-; RV32-NEXT: vmv.x.s a2, v16
-; RV32-NEXT: vmv.x.s a3, v12
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: beqz a2, .LBB15_5
-; RV32-NEXT: .LBB15_13: # %cond.store10
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 4
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v16, v12, a2
-; RV32-NEXT: vmv.x.s a2, v16
-; RV32-NEXT: vmv.x.s a3, v12
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: beqz a2, .LBB15_6
-; RV32-NEXT: .LBB15_14: # %cond.store13
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 5
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v16, v12, a2
-; RV32-NEXT: vmv.x.s a2, v16
-; RV32-NEXT: vmv.x.s a3, v12
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: beqz a2, .LBB15_7
-; RV32-NEXT: .LBB15_15: # %cond.store16
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 6
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v16, v12, a2
-; RV32-NEXT: vmv.x.s a2, v16
-; RV32-NEXT: vmv.x.s a3, v12
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: beqz a1, .LBB15_8
-; RV32-NEXT: .LBB15_16: # %cond.store19
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: li a1, 32
-; RV32-NEXT: vsrl.vx v12, v8, a1
-; RV32-NEXT: vmv.x.s a1, v12
-; RV32-NEXT: vmv.x.s a2, v8
-; RV32-NEXT: sw a2, 0(a0)
-; RV32-NEXT: sw a1, 4(a0)
-; RV32-NEXT: ret
-;
-; RV64-LABEL: compressstore_v8i64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB15_11
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB15_12
-; RV64-NEXT: .LBB15_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB15_13
-; RV64-NEXT: .LBB15_3: # %else5
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: beqz a2, .LBB15_5
-; RV64-NEXT: .LBB15_4: # %cond.store7
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v12, v8, 3
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v12, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: .LBB15_5: # %else8
-; RV64-NEXT: addi sp, sp, -320
-; RV64-NEXT: .cfi_def_cfa_offset 320
-; RV64-NEXT: sd ra, 312(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 304(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: addi s0, sp, 320
-; RV64-NEXT: .cfi_def_cfa s0, 0
-; RV64-NEXT: andi sp, sp, -64
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: bnez a2, .LBB15_14
-; RV64-NEXT: # %bb.6: # %else11
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: bnez a2, .LBB15_15
-; RV64-NEXT: .LBB15_7: # %else14
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: bnez a2, .LBB15_16
-; RV64-NEXT: .LBB15_8: # %else17
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: beqz a1, .LBB15_10
-; RV64-NEXT: .LBB15_9: # %cond.store19
-; RV64-NEXT: mv a1, sp
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vse64.v v8, (a1)
-; RV64-NEXT: ld a1, 56(sp)
-; RV64-NEXT: sd a1, 0(a0)
-; RV64-NEXT: .LBB15_10: # %else20
-; RV64-NEXT: addi sp, s0, -320
-; RV64-NEXT: ld ra, 312(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 304(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 320
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB15_11: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB15_2
-; RV64-NEXT: .LBB15_12: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v12, v8, 1
-; RV64-NEXT: vse64.v v12, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB15_3
-; RV64-NEXT: .LBB15_13: # %cond.store4
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v12, v8, 2
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v12, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: bnez a2, .LBB15_4
-; RV64-NEXT: j .LBB15_5
-; RV64-NEXT: .LBB15_14: # %cond.store10
-; RV64-NEXT: addi a2, sp, 192
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vse64.v v8, (a2)
-; RV64-NEXT: ld a2, 224(sp)
-; RV64-NEXT: sd a2, 0(a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: beqz a2, .LBB15_7
-; RV64-NEXT: .LBB15_15: # %cond.store13
-; RV64-NEXT: addi a2, sp, 128
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vse64.v v8, (a2)
-; RV64-NEXT: ld a2, 168(sp)
-; RV64-NEXT: sd a2, 0(a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: beqz a2, .LBB15_8
-; RV64-NEXT: .LBB15_16: # %cond.store16
-; RV64-NEXT: addi a2, sp, 64
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vse64.v v8, (a2)
-; RV64-NEXT: ld a2, 112(sp)
-; RV64-NEXT: sd a2, 0(a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: bnez a1, .LBB15_9
-; RV64-NEXT: j .LBB15_10
+; CHECK-LABEL: compressstore_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT: vcompress.vm v12, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vse64.v v12, (a0)
+; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v8i64(<8 x i64> %v, ptr align 8 %base, <8 x i1> %mask)
ret void
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
index 68740ee..7dcfb24 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
@@ -1599,15 +1599,16 @@ define float @vreduce_fminimum_v2f32(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v9, v8, v8
+; CHECK-NEXT: vcpop.m a0, v9
+; CHECK-NEXT: beqz a0, .LBB99_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB99_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <2 x float>, ptr %x
@@ -1619,15 +1620,8 @@ define float @vreduce_fminimum_v2f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v2f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <2 x float>, ptr %x
@@ -1641,24 +1635,16 @@ define float @vreduce_fminimum_v4f32(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v9, v8, v8
+; CHECK-NEXT: vcpop.m a0, v9
+; CHECK-NEXT: beqz a0, .LBB101_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB101_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <4 x float>, ptr %x
@@ -1670,24 +1656,8 @@ define float @vreduce_fminimum_v4f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v4f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <4 x float>, ptr %x
@@ -1701,33 +1671,16 @@ define float @vreduce_fminimum_v8f32(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v10, v8, v8
+; CHECK-NEXT: vcpop.m a0, v10
+; CHECK-NEXT: beqz a0, .LBB103_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB103_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <8 x float>, ptr %x
@@ -1739,33 +1692,8 @@ define float @vreduce_fminimum_v8f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <8 x float>, ptr %x
@@ -1779,42 +1707,16 @@ define float @vreduce_fminimum_v16f32(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vle32.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v12, v8, v8
+; CHECK-NEXT: vcpop.m a0, v12
+; CHECK-NEXT: beqz a0, .LBB105_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB105_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x float>, ptr %x
@@ -1826,42 +1728,8 @@ define float @vreduce_fminimum_v16f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v16f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vle32.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x float>, ptr %x
@@ -1876,51 +1744,16 @@ define float @vreduce_fminimum_v32f32(ptr %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vle32.v v16, (a0)
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB107_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB107_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <32 x float>, ptr %x
@@ -1933,51 +1766,8 @@ define float @vreduce_fminimum_v32f32_nonans(ptr %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vle32.v v16, (a0)
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <32 x float>, ptr %x
@@ -2009,52 +1799,18 @@ define float @vreduce_fminimum_v64f32(ptr %x) {
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfmin.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB109_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: j .LBB109_3
+; CHECK-NEXT: .LBB109_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: .LBB109_3:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
@@ -2073,51 +1829,8 @@ define float @vreduce_fminimum_v64f32_nonans(ptr %x) {
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vle32.v v16, (a0)
-; CHECK-NEXT: vfmin.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <64 x float>, ptr %x
@@ -2208,52 +1921,18 @@ define float @vreduce_fminimum_v128f32(ptr %x) {
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfmin.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB111_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: j .LBB111_3
+; CHECK-NEXT: .LBB111_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: .LBB111_3:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: mv a1, a0
@@ -2281,51 +1960,8 @@ define float @vreduce_fminimum_v128f32_nonans(ptr %x) {
; CHECK-NEXT: vle32.v v0, (a1)
; CHECK-NEXT: vfmin.vv v16, v24, v16
; CHECK-NEXT: vfmin.vv v8, v8, v0
-; CHECK-NEXT: vfmin.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <128 x float>, ptr %x
@@ -2339,15 +1975,16 @@ define double @vreduce_fminimum_v2f64(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v9, (a0)
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v9, v8, v8
+; CHECK-NEXT: vcpop.m a0, v9
+; CHECK-NEXT: beqz a0, .LBB113_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI113_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI113_0)(a0)
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB113_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <2 x double>, ptr %x
@@ -2359,15 +1996,8 @@ define double @vreduce_fminimum_v2f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v2f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v9, (a0)
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <2 x double>, ptr %x
@@ -2381,24 +2011,16 @@ define double @vreduce_fminimum_v4f64(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vle64.v v10, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v10, v8, v8
+; CHECK-NEXT: vcpop.m a0, v10
+; CHECK-NEXT: beqz a0, .LBB115_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI115_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI115_0)(a0)
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB115_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <4 x double>, ptr %x
@@ -2410,24 +2032,8 @@ define double @vreduce_fminimum_v4f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v4f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vle64.v v10, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <4 x double>, ptr %x
@@ -2441,33 +2047,16 @@ define double @vreduce_fminimum_v8f64(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vle64.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v12, v8, v8
+; CHECK-NEXT: vcpop.m a0, v12
+; CHECK-NEXT: beqz a0, .LBB117_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI117_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI117_0)(a0)
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB117_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <8 x double>, ptr %x
@@ -2479,33 +2068,8 @@ define double @vreduce_fminimum_v8f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vle64.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <8 x double>, ptr %x
@@ -2519,42 +2083,16 @@ define double @vreduce_fminimum_v16f64(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v16f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v16, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB119_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI119_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI119_0)(a0)
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB119_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x double>, ptr %x
@@ -2566,42 +2104,8 @@ define double @vreduce_fminimum_v16f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v16f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v16, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x double>, ptr %x
@@ -2632,43 +2136,18 @@ define double @vreduce_fminimum_v32f64(ptr %x) {
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfmin.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB121_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI121_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI121_0)(a0)
+; CHECK-NEXT: j .LBB121_3
+; CHECK-NEXT: .LBB121_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: .LBB121_3:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
@@ -2686,42 +2165,8 @@ define double @vreduce_fminimum_v32f64_nonans(ptr %x) {
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vle64.v v16, (a0)
-; CHECK-NEXT: vfmin.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <32 x double>, ptr %x
@@ -2811,43 +2256,18 @@ define double @vreduce_fminimum_v64f64(ptr %x) {
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfmin.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB123_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI123_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI123_0)(a0)
+; CHECK-NEXT: j .LBB123_3
+; CHECK-NEXT: .LBB123_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: .LBB123_3:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: mv a1, a0
@@ -2874,42 +2294,8 @@ define double @vreduce_fminimum_v64f64_nonans(ptr %x) {
; CHECK-NEXT: vle64.v v0, (a1)
; CHECK-NEXT: vfmin.vv v16, v24, v16
; CHECK-NEXT: vfmin.vv v8, v8, v0
-; CHECK-NEXT: vfmin.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <64 x double>, ptr %x
@@ -2923,15 +2309,16 @@ define float @vreduce_fmaximum_v2f32(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v9, v8, v8
+; CHECK-NEXT: vcpop.m a0, v9
+; CHECK-NEXT: beqz a0, .LBB125_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB125_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <2 x float>, ptr %x
@@ -2943,15 +2330,8 @@ define float @vreduce_fmaximum_v2f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v2f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <2 x float>, ptr %x
@@ -2965,24 +2345,16 @@ define float @vreduce_fmaximum_v4f32(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v9, v8, v8
+; CHECK-NEXT: vcpop.m a0, v9
+; CHECK-NEXT: beqz a0, .LBB127_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB127_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <4 x float>, ptr %x
@@ -2994,24 +2366,8 @@ define float @vreduce_fmaximum_v4f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v4f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <4 x float>, ptr %x
@@ -3025,33 +2381,16 @@ define float @vreduce_fmaximum_v8f32(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v10, v8, v8
+; CHECK-NEXT: vcpop.m a0, v10
+; CHECK-NEXT: beqz a0, .LBB129_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB129_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <8 x float>, ptr %x
@@ -3063,33 +2402,8 @@ define float @vreduce_fmaximum_v8f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <8 x float>, ptr %x
@@ -3103,42 +2417,16 @@ define float @vreduce_fmaximum_v16f32(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vle32.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v12, v8, v8
+; CHECK-NEXT: vcpop.m a0, v12
+; CHECK-NEXT: beqz a0, .LBB131_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB131_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x float>, ptr %x
@@ -3150,42 +2438,8 @@ define float @vreduce_fmaximum_v16f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v16f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vle32.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x float>, ptr %x
@@ -3200,51 +2454,16 @@ define float @vreduce_fmaximum_v32f32(ptr %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vle32.v v16, (a0)
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB133_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB133_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <32 x float>, ptr %x
@@ -3257,51 +2476,8 @@ define float @vreduce_fmaximum_v32f32_nonans(ptr %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vle32.v v16, (a0)
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <32 x float>, ptr %x
@@ -3333,52 +2509,18 @@ define float @vreduce_fmaximum_v64f32(ptr %x) {
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfmax.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vfmax.vv v8, v8, v16
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB135_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: j .LBB135_3
+; CHECK-NEXT: .LBB135_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: .LBB135_3:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
@@ -3397,51 +2539,8 @@ define float @vreduce_fmaximum_v64f32_nonans(ptr %x) {
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vle32.v v16, (a0)
-; CHECK-NEXT: vfmax.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vfmax.vv v8, v8, v16
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <64 x float>, ptr %x
@@ -3532,52 +2631,18 @@ define float @vreduce_fmaximum_v128f32(ptr %x) {
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfmax.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vfmax.vv v8, v8, v16
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB137_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: j .LBB137_3
+; CHECK-NEXT: .LBB137_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: .LBB137_3:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: mv a1, a0
@@ -3605,51 +2670,8 @@ define float @vreduce_fmaximum_v128f32_nonans(ptr %x) {
; CHECK-NEXT: vle32.v v0, (a1)
; CHECK-NEXT: vfmax.vv v16, v24, v16
; CHECK-NEXT: vfmax.vv v8, v8, v0
-; CHECK-NEXT: vfmax.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vfmax.vv v8, v8, v16
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <128 x float>, ptr %x
@@ -3663,15 +2685,16 @@ define double @vreduce_fmaximum_v2f64(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v9, (a0)
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v9, v8, v8
+; CHECK-NEXT: vcpop.m a0, v9
+; CHECK-NEXT: beqz a0, .LBB139_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI139_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI139_0)(a0)
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB139_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <2 x double>, ptr %x
@@ -3683,15 +2706,8 @@ define double @vreduce_fmaximum_v2f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v2f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v9, (a0)
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <2 x double>, ptr %x
@@ -3705,24 +2721,16 @@ define double @vreduce_fmaximum_v4f64(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vle64.v v10, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v10, v8, v8
+; CHECK-NEXT: vcpop.m a0, v10
+; CHECK-NEXT: beqz a0, .LBB141_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI141_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI141_0)(a0)
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB141_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <4 x double>, ptr %x
@@ -3734,24 +2742,8 @@ define double @vreduce_fmaximum_v4f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v4f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vle64.v v10, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <4 x double>, ptr %x
@@ -3765,33 +2757,16 @@ define double @vreduce_fmaximum_v8f64(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vle64.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v12, v8, v8
+; CHECK-NEXT: vcpop.m a0, v12
+; CHECK-NEXT: beqz a0, .LBB143_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI143_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI143_0)(a0)
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB143_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <8 x double>, ptr %x
@@ -3803,33 +2778,8 @@ define double @vreduce_fmaximum_v8f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vle64.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <8 x double>, ptr %x
@@ -3843,42 +2793,16 @@ define double @vreduce_fmaximum_v16f64(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v16f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v16, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB145_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI145_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI145_0)(a0)
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB145_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x double>, ptr %x
@@ -3890,42 +2814,8 @@ define double @vreduce_fmaximum_v16f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v16f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v16, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x double>, ptr %x
@@ -3956,43 +2846,18 @@ define double @vreduce_fmaximum_v32f64(ptr %x) {
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfmax.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vfmax.vv v8, v8, v16
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB147_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI147_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI147_0)(a0)
+; CHECK-NEXT: j .LBB147_3
+; CHECK-NEXT: .LBB147_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: .LBB147_3:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
@@ -4010,42 +2875,8 @@ define double @vreduce_fmaximum_v32f64_nonans(ptr %x) {
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vle64.v v16, (a0)
-; CHECK-NEXT: vfmax.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vfmax.vv v8, v8, v16
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <32 x double>, ptr %x
@@ -4135,43 +2966,18 @@ define double @vreduce_fmaximum_v64f64(ptr %x) {
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfmax.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vfmax.vv v8, v8, v16
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB149_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI149_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI149_0)(a0)
+; CHECK-NEXT: j .LBB149_3
+; CHECK-NEXT: .LBB149_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: .LBB149_3:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: mv a1, a0
@@ -4198,42 +3004,8 @@ define double @vreduce_fmaximum_v64f64_nonans(ptr %x) {
; CHECK-NEXT: vle64.v v0, (a1)
; CHECK-NEXT: vfmax.vv v16, v24, v16
; CHECK-NEXT: vfmax.vv v8, v8, v0
-; CHECK-NEXT: vfmax.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vfmax.vv v8, v8, v16
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <64 x double>, ptr %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
new file mode 100644
index 0000000..a4ab67f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
@@ -0,0 +1,189 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v | FileCheck %s
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s
+
+define signext i16 @sad_4x8_as_i16(<4 x i8> %a, <4 x i8> %b) {
+; CHECK-LABEL: sad_4x8_as_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.s.x v9, zero
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vwredsumu.vs v8, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %1 = zext <4 x i8> %a to <4 x i16>
+ %3 = zext <4 x i8> %b to <4 x i16>
+ %4 = sub nsw <4 x i16> %1, %3
+ %5 = tail call <4 x i16> @llvm.abs.v4i16(<4 x i16> %4, i1 true)
+ %6 = tail call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %5)
+ ret i16 %6
+}
+
+define signext i32 @sad_4x8_as_i32(<4 x i8> %a, <4 x i8> %b) {
+; CHECK-LABEL: sad_4x8_as_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v9, v8
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: vredsum.vs v8, v9, v8
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %1 = zext <4 x i8> %a to <4 x i32>
+ %3 = zext <4 x i8> %b to <4 x i32>
+ %4 = sub nsw <4 x i32> %1, %3
+ %5 = tail call <4 x i32> @llvm.abs.v4i32(<4 x i32> %4, i1 true)
+ %6 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %5)
+ ret i32 %6
+}
+
+define signext i16 @sad_16x8_as_i16(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: sad_16x8_as_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmv.s.x v9, zero
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwredsumu.vs v8, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %1 = zext <16 x i8> %a to <16 x i16>
+ %3 = zext <16 x i8> %b to <16 x i16>
+ %4 = sub nsw <16 x i16> %1, %3
+ %5 = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> %4, i1 true)
+ %6 = tail call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %5)
+ ret i16 %6
+}
+
+define signext i32 @sad_16x8_as_i32(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: sad_16x8_as_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v12, v8
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: vredsum.vs v8, v12, v8
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %1 = zext <16 x i8> %a to <16 x i32>
+ %3 = zext <16 x i8> %b to <16 x i32>
+ %4 = sub nsw <16 x i32> %1, %3
+ %5 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %4, i1 true)
+ %6 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %5)
+ ret i32 %6
+}
+
+define signext i32 @sad_2block_16xi8_as_i32(ptr %a, ptr %b, i32 signext %stridea, i32 signext %strideb) {
+; CHECK-LABEL: sad_2block_16xi8_as_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v9, (a1)
+; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: add a1, a1, a3
+; CHECK-NEXT: vle8.v v10, (a0)
+; CHECK-NEXT: vle8.v v11, (a1)
+; CHECK-NEXT: vminu.vv v12, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vminu.vv v9, v10, v11
+; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: add a1, a1, a3
+; CHECK-NEXT: vle8.v v12, (a0)
+; CHECK-NEXT: vle8.v v13, (a1)
+; CHECK-NEXT: vmaxu.vv v10, v10, v11
+; CHECK-NEXT: vsub.vv v9, v10, v9
+; CHECK-NEXT: vwaddu.vv v10, v9, v8
+; CHECK-NEXT: vminu.vv v8, v12, v13
+; CHECK-NEXT: vmaxu.vv v9, v12, v13
+; CHECK-NEXT: vsub.vv v8, v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: add a1, a1, a3
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vle8.v v12, (a1)
+; CHECK-NEXT: vzext.vf2 v14, v8
+; CHECK-NEXT: vwaddu.vv v16, v14, v10
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v8, v9, v12
+; CHECK-NEXT: vmaxu.vv v9, v9, v12
+; CHECK-NEXT: vsub.vv v8, v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vwaddu.wv v16, v16, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: vredsum.vs v8, v16, v8
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %idx.ext8 = sext i32 %strideb to i64
+ %idx.ext = sext i32 %stridea to i64
+ %0 = load <16 x i8>, ptr %a, align 1
+ %1 = zext <16 x i8> %0 to <16 x i32>
+ %2 = load <16 x i8>, ptr %b, align 1
+ %3 = zext <16 x i8> %2 to <16 x i32>
+ %4 = sub nsw <16 x i32> %1, %3
+ %5 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %4, i1 true)
+ %6 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %5)
+ %add.ptr = getelementptr inbounds i8, ptr %a, i64 %idx.ext
+ %add.ptr9 = getelementptr inbounds i8, ptr %b, i64 %idx.ext8
+ %7 = load <16 x i8>, ptr %add.ptr, align 1
+ %8 = zext <16 x i8> %7 to <16 x i32>
+ %9 = load <16 x i8>, ptr %add.ptr9, align 1
+ %10 = zext <16 x i8> %9 to <16 x i32>
+ %11 = sub nsw <16 x i32> %8, %10
+ %12 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %11, i1 true)
+ %13 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %12)
+ %op.rdx.1 = add i32 %13, %6
+ %add.ptr.1 = getelementptr inbounds i8, ptr %add.ptr, i64 %idx.ext
+ %add.ptr9.1 = getelementptr inbounds i8, ptr %add.ptr9, i64 %idx.ext8
+ %14 = load <16 x i8>, ptr %add.ptr.1, align 1
+ %15 = zext <16 x i8> %14 to <16 x i32>
+ %16 = load <16 x i8>, ptr %add.ptr9.1, align 1
+ %17 = zext <16 x i8> %16 to <16 x i32>
+ %18 = sub nsw <16 x i32> %15, %17
+ %19 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %18, i1 true)
+ %20 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %19)
+ %op.rdx.2 = add i32 %20, %op.rdx.1
+ %add.ptr.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 %idx.ext
+ %add.ptr9.2 = getelementptr inbounds i8, ptr %add.ptr9.1, i64 %idx.ext8
+ %21 = load <16 x i8>, ptr %add.ptr.2, align 1
+ %22 = zext <16 x i8> %21 to <16 x i32>
+ %23 = load <16 x i8>, ptr %add.ptr9.2, align 1
+ %24 = zext <16 x i8> %23 to <16 x i32>
+ %25 = sub nsw <16 x i32> %22, %24
+ %26 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %25, i1 true)
+ %27 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %26)
+ %op.rdx.3 = add i32 %27, %op.rdx.2
+ ret i32 %op.rdx.3
+}
+
+declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
+declare <4 x i16> @llvm.abs.v4i16(<4 x i16>, i1)
+declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>)
+
+declare <16 x i32> @llvm.abs.v16i32(<16 x i32>, i1)
+declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
+declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)
+declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
index 8474f95..98e6b8f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
@@ -5,59 +5,6 @@
; RUN: llc < %s -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-max=128 -verify-machineinstrs | FileCheck -check-prefixes=CHECK,VLS %s
; RUN: llc < %s -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -verify-machineinstrs | FileCheck -check-prefixes=CHECK,VLS %s
-define <8 x i16> @concat_2xv4i16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK-LABEL: concat_2xv4i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 4
-; CHECK-NEXT: ret
- %ab = shufflevector <4 x i16> %a, <4 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- ret <8 x i16> %ab
-}
-
-define <8 x i16> @concat_4xv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) {
-; CHECK-LABEL: concat_4xv2i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v10, v11, 2
-; CHECK-NEXT: vslideup.vi v8, v9, 2
-; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v10, 4
-; CHECK-NEXT: ret
- %ab = shufflevector <2 x i16> %a, <2 x i16> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %cd = shufflevector <2 x i16> %c, <2 x i16> %d, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %abcd = shufflevector <4 x i16> %ab, <4 x i16> %cd, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- ret <8 x i16> %abcd
-}
-
-define <8 x i16> @concat_8xv1i16(<1 x i16> %a, <1 x i16> %b, <1 x i16> %c, <1 x i16> %d, <1 x i16> %e, <1 x i16> %f, <1 x i16> %g, <1 x i16> %h) {
-; CHECK-LABEL: concat_8xv1i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v12, v13, 1
-; CHECK-NEXT: vsetivli zero, 3, e16, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v12, v14, 2
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v12, v15, 3
-; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 1
-; CHECK-NEXT: vsetivli zero, 3, e16, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v10, 2
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v11, 3
-; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v12, 4
-; CHECK-NEXT: ret
- %ab = shufflevector <1 x i16> %a, <1 x i16> %b, <2 x i32> <i32 0, i32 1>
- %cd = shufflevector <1 x i16> %c, <1 x i16> %d, <2 x i32> <i32 0, i32 1>
- %abcd = shufflevector <2 x i16> %ab, <2 x i16> %cd, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %ef = shufflevector <1 x i16> %e, <1 x i16> %f, <2 x i32> <i32 0, i32 1>
- %gh = shufflevector <1 x i16> %g, <1 x i16> %h, <2 x i32> <i32 0, i32 1>
- %efgh = shufflevector <2 x i16> %ef, <2 x i16> %gh, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %abcdefgh = shufflevector <4 x i16> %abcd, <4 x i16> %efgh, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- ret <8 x i16> %abcdefgh
-}
-
define <8 x i32> @concat_2xv4i32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: concat_2xv4i32:
; CHECK: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
index 37902aa..657d523 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
@@ -24,15 +24,18 @@ define void @widen_2xv4i16(ptr %x, ptr %z) {
define void @widen_3xv4i16(ptr %x, ptr %z) {
; CHECK-LABEL: widen_3xv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, a0, 16
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT: vle16.v v8, (a2)
-; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: addi a2, a0, 8
+; CHECK-NEXT: vle16.v v9, (a2)
+; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v9, 4
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 8
+; CHECK-NEXT: vslideup.vi v8, v10, 8
; CHECK-NEXT: vsetivli zero, 12, e16, m2, ta, ma
-; CHECK-NEXT: vse16.v v10, (a1)
+; CHECK-NEXT: vse16.v v8, (a1)
; CHECK-NEXT: ret
%a = load <4 x i16>, ptr %x
%b.gep = getelementptr i8, ptr %x, i64 8
@@ -181,14 +184,20 @@ define void @strided_constant_0(ptr %x, ptr %z) {
define void @strided_constant_mismatch_4xv4i16(ptr %x, ptr %z) {
; CHECK-LABEL: strided_constant_mismatch_4xv4i16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: addi a2, a0, 6
-; CHECK-NEXT: li a3, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v8, (a0), a3
-; CHECK-NEXT: vlse64.v v10, (a2), a3
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v10, 2
-; CHECK-NEXT: vse64.v v8, (a1)
+; CHECK-NEXT: vle16.v v10, (a2)
+; CHECK-NEXT: addi a2, a0, 2
+; CHECK-NEXT: addi a0, a0, 8
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vle16.v v11, (a2)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v10, v9, 4
+; CHECK-NEXT: vslideup.vi v8, v11, 4
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v10, 8
+; CHECK-NEXT: vse16.v v8, (a1)
; CHECK-NEXT: ret
%a = load <4 x i16>, ptr %x
%b.gep = getelementptr i8, ptr %x, i64 2
@@ -244,38 +253,56 @@ define void @strided_runtime_4xv4i16(ptr %x, ptr %z, i64 %s) {
define void @strided_runtime_mismatch_4xv4i16(ptr %x, ptr %z, i64 %s, i64 %t) {
; RV32-LABEL: strided_runtime_mismatch_4xv4i16:
; RV32: # %bb.0:
-; RV32-NEXT: add a3, a0, a2
-; RV32-NEXT: add a3, a3, a4
-; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v8, (a0), a2
-; RV32-NEXT: vlse64.v v10, (a3), a2
-; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT: vslideup.vi v8, v10, 2
-; RV32-NEXT: vse64.v v8, (a1)
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: add a0, a0, a2
+; RV32-NEXT: add a4, a0, a4
+; RV32-NEXT: vle16.v v10, (a4)
+; RV32-NEXT: add a2, a4, a2
+; RV32-NEXT: vle16.v v9, (a2)
+; RV32-NEXT: vle16.v v11, (a0)
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslideup.vi v10, v9, 4
+; RV32-NEXT: vslideup.vi v8, v11, 4
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslideup.vi v8, v10, 8
+; RV32-NEXT: vse16.v v8, (a1)
; RV32-NEXT: ret
;
; RV64-LABEL: strided_runtime_mismatch_4xv4i16:
; RV64: # %bb.0:
-; RV64-NEXT: add a4, a0, a2
-; RV64-NEXT: add a3, a4, a3
-; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV64-NEXT: vlse64.v v8, (a0), a2
-; RV64-NEXT: vlse64.v v10, (a3), a2
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT: vslideup.vi v8, v10, 2
-; RV64-NEXT: vse64.v v8, (a1)
+; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: add a0, a0, a2
+; RV64-NEXT: add a3, a0, a3
+; RV64-NEXT: vle16.v v10, (a3)
+; RV64-NEXT: add a2, a3, a2
+; RV64-NEXT: vle16.v v9, (a2)
+; RV64-NEXT: vle16.v v11, (a0)
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslideup.vi v10, v9, 4
+; RV64-NEXT: vslideup.vi v8, v11, 4
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslideup.vi v8, v10, 8
+; RV64-NEXT: vse16.v v8, (a1)
; RV64-NEXT: ret
;
; ZVE64F-LABEL: strided_runtime_mismatch_4xv4i16:
; ZVE64F: # %bb.0:
-; ZVE64F-NEXT: add a4, a0, a2
-; ZVE64F-NEXT: add a3, a4, a3
-; ZVE64F-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; ZVE64F-NEXT: vlse64.v v8, (a0), a2
-; ZVE64F-NEXT: vlse64.v v10, (a3), a2
-; ZVE64F-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; ZVE64F-NEXT: vslideup.vi v8, v10, 2
-; ZVE64F-NEXT: vse64.v v8, (a1)
+; ZVE64F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; ZVE64F-NEXT: vle16.v v8, (a0)
+; ZVE64F-NEXT: add a0, a0, a2
+; ZVE64F-NEXT: add a3, a0, a3
+; ZVE64F-NEXT: vle16.v v10, (a3)
+; ZVE64F-NEXT: add a2, a3, a2
+; ZVE64F-NEXT: vle16.v v9, (a2)
+; ZVE64F-NEXT: vle16.v v11, (a0)
+; ZVE64F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZVE64F-NEXT: vslideup.vi v10, v9, 4
+; ZVE64F-NEXT: vslideup.vi v8, v11, 4
+; ZVE64F-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; ZVE64F-NEXT: vslideup.vi v8, v10, 8
+; ZVE64F-NEXT: vse16.v v8, (a1)
; ZVE64F-NEXT: ret
%a = load <4 x i16>, ptr %x
%b.gep = getelementptr i8, ptr %x, i64 %s
@@ -534,3 +561,28 @@ define void @reverse_strided_runtime_4xv2f32(ptr %x, ptr %z, i64 %s) {
store <8 x float> %e.2, ptr %z
ret void
}
+
+; The middle end sometimes produces this pattern of shuffles, where the
+; intermediate shuffles are the full result vector size padded with poison
+; elements.
+define <16 x i8> @widen_4xv4i8_immediate_expand(ptr %p, i64 %s) {
+; CHECK-LABEL: widen_4xv4i8_immediate_expand:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vlse32.v v8, (a0), a1
+; CHECK-NEXT: ret
+ %a = load <4 x i8>, ptr %p
+ %b.ptr = getelementptr i8, ptr %p, i64 %s
+ %b = load <4 x i8>, ptr %b.ptr
+ %c.ptr = getelementptr i8, ptr %b.ptr, i64 %s
+ %c = load <4 x i8>, ptr %c.ptr
+ %d.ptr = getelementptr i8, ptr %c.ptr, i64 %s
+ %d = load <4 x i8>, ptr %d.ptr
+
+ %ab = shufflevector <4 x i8> %a, <4 x i8> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %cx = shufflevector <4 x i8> %c, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %dx = shufflevector <4 x i8> %d, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %abcx = shufflevector <16 x i8> %ab, <16 x i8> %cx, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
+ %abcd = shufflevector <16 x i8> %abcx, <16 x i8> %dx, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+ ret <16 x i8> %abcd
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
index 57a72c6..bc0bf5d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
@@ -385,12 +385,12 @@ define <32 x i64> @vwaddu_v32i64(ptr %x, ptr %y) nounwind {
define <2 x i32> @vwaddu_v2i32_v2i8(ptr %x, ptr %y) {
; CHECK-LABEL: vwaddu_v2i32_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vle8.v v9, (a1)
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vzext.vf2 v11, v9
-; CHECK-NEXT: vwaddu.vv v8, v10, v11
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%a = load <2 x i8>, ptr %x
%b = load <2 x i8>, ptr %y
@@ -912,12 +912,12 @@ define <4 x i64> @crash(<4 x i16> %x, <4 x i16> %y) {
define <2 x i32> @vwaddu_v2i32_of_v2i8(ptr %x, ptr %y) {
; CHECK-LABEL: vwaddu_v2i32_of_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vle8.v v9, (a1)
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vzext.vf2 v11, v9
-; CHECK-NEXT: vwaddu.vv v8, v10, v11
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%a = load <2 x i8>, ptr %x
%b = load <2 x i8>, ptr %y
@@ -930,12 +930,12 @@ define <2 x i32> @vwaddu_v2i32_of_v2i8(ptr %x, ptr %y) {
define <2 x i64> @vwaddu_v2i64_of_v2i8(ptr %x, ptr %y) {
; CHECK-LABEL: vwaddu_v2i64_of_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vle8.v v9, (a1)
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v11, v9
-; CHECK-NEXT: vwaddu.vv v8, v10, v11
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%a = load <2 x i8>, ptr %x
%b = load <2 x i8>, ptr %y
@@ -948,12 +948,12 @@ define <2 x i64> @vwaddu_v2i64_of_v2i8(ptr %x, ptr %y) {
define <2 x i64> @vwaddu_v2i64_of_v2i16(ptr %x, ptr %y) {
; CHECK-LABEL: vwaddu_v2i64_of_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vle16.v v9, (a1)
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vzext.vf2 v11, v9
-; CHECK-NEXT: vwaddu.vv v8, v10, v11
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%a = load <2 x i16>, ptr %x
%b = load <2 x i16>, ptr %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
index bff7ef8..b97c965 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
@@ -391,12 +391,12 @@ define <32 x i64> @vwmulu_v32i64(ptr %x, ptr %y) {
define <2 x i32> @vwmulu_v2i32_v2i8(ptr %x, ptr %y) {
; CHECK-LABEL: vwmulu_v2i32_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vle8.v v9, (a1)
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vzext.vf2 v11, v9
-; CHECK-NEXT: vwmulu.vv v8, v10, v11
+; CHECK-NEXT: vwmulu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%a = load <2 x i8>, ptr %x
%b = load <2 x i8>, ptr %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsll.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsll.ll
new file mode 100644
index 0000000..f5305a1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsll.ll
@@ -0,0 +1,920 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB
+
+; ==============================================================================
+; i32 -> i64
+; ==============================================================================
+
+define <4 x i64> @vwsll_vv_v4i64_sext(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vwsll_vv_v4i64_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_v4i64_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <4 x i32> %a to <4 x i64>
+ %y = sext <4 x i32> %b to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vv_v4i64_zext(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vwsll_vv_v4i64_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_v4i64_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <4 x i32> %a to <4 x i64>
+ %y = zext <4 x i32> %b to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i64_v4i64(<4 x i32> %a, i64 %b) {
+; CHECK-LABEL: vwsll_vx_i64_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsll.vx v8, v10, a0
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i64_v4i64:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsll.vx v8, v10, a0
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i64> poison, i64 %b, i32 0
+ %splat = shufflevector <4 x i64> %head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i32> %a to <4 x i64>
+ %z = shl <4 x i64> %x, %splat
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i32_v4i64_sext(<4 x i32> %a, i32 %b) {
+; CHECK-LABEL: vwsll_vx_i32_v4i64_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i32_v4i64_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i32> poison, i32 %b, i32 0
+ %splat = shufflevector <4 x i32> %head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i32> %a to <4 x i64>
+ %y = sext <4 x i32> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i32_v4i64_zext(<4 x i32> %a, i32 %b) {
+; CHECK-LABEL: vwsll_vx_i32_v4i64_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i32_v4i64_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i32> poison, i32 %b, i32 0
+ %splat = shufflevector <4 x i32> %head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i32> %a to <4 x i64>
+ %y = zext <4 x i32> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i16_v4i64_sext(<4 x i32> %a, i16 %b) {
+; CHECK-LABEL: vwsll_vx_i16_v4i64_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf4 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i16_v4i64_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf4 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i16> poison, i16 %b, i32 0
+ %splat = shufflevector <4 x i16> %head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i32> %a to <4 x i64>
+ %y = sext <4 x i16> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i16_v4i64_zext(<4 x i32> %a, i16 %b) {
+; CHECK-LABEL: vwsll_vx_i16_v4i64_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf4 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i16_v4i64_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf4 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i16> poison, i16 %b, i32 0
+ %splat = shufflevector <4 x i16> %head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i32> %a to <4 x i64>
+ %y = zext <4 x i16> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i8_v4i64_sext(<4 x i32> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_v4i64_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf8 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_v4i64_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf8 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %splat = shufflevector <4 x i8> %head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i32> %a to <4 x i64>
+ %y = sext <4 x i8> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i8_v4i64_zext(<4 x i32> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_v4i64_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf8 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_v4i64_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf8 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %splat = shufflevector <4 x i8> %head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i32> %a to <4 x i64>
+ %y = zext <4 x i8> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vi_v4i64(<4 x i32> %a) {
+; CHECK-LABEL: vwsll_vi_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsll.vi v8, v10, 2
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vi_v4i64:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsll.vi v8, v10, 2
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <4 x i32> %a to <4 x i64>
+ %z = shl <4 x i64> %x, splat (i64 2)
+ ret <4 x i64> %z
+}
+
+; ==============================================================================
+; i16 -> i32
+; ==============================================================================
+
+define <8 x i32> @vwsll_vv_v8i32_sext(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vwsll_vv_v8i32_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_v8i32_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <8 x i16> %a to <8 x i32>
+ %y = sext <8 x i16> %b to <8 x i32>
+ %z = shl <8 x i32> %x, %y
+ ret <8 x i32> %z
+}
+
+define <8 x i32> @vwsll_vv_v8i32_zext(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vwsll_vv_v8i32_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_v8i32_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <8 x i16> %a to <8 x i32>
+ %y = zext <8 x i16> %b to <8 x i32>
+ %z = shl <8 x i32> %x, %y
+ ret <8 x i32> %z
+}
+
+define <8 x i32> @vwsll_vx_i64_v8i32(<8 x i16> %a, i64 %b) {
+; CHECK-LABEL: vwsll_vx_i64_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsll.vx v8, v10, a0
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i64_v8i32:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsll.vx v8, v10, a0
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <8 x i64> poison, i64 %b, i32 0
+ %splat = shufflevector <8 x i64> %head, <8 x i64> poison, <8 x i32> zeroinitializer
+ %x = zext <8 x i16> %a to <8 x i32>
+ %y = trunc <8 x i64> %splat to <8 x i32>
+ %z = shl <8 x i32> %x, %y
+ ret <8 x i32> %z
+}
+
+define <8 x i32> @vwsll_vx_i32_v8i32(<8 x i16> %a, i32 %b) {
+; CHECK-LABEL: vwsll_vx_i32_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsll.vx v8, v10, a0
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i32_v8i32:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsll.vx v8, v10, a0
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <8 x i32> poison, i32 %b, i32 0
+ %splat = shufflevector <8 x i32> %head, <8 x i32> poison, <8 x i32> zeroinitializer
+ %x = zext <8 x i16> %a to <8 x i32>
+ %z = shl <8 x i32> %x, %splat
+ ret <8 x i32> %z
+}
+
+define <8 x i32> @vwsll_vx_i16_v8i32_sext(<8 x i16> %a, i16 %b) {
+; CHECK-LABEL: vwsll_vx_i16_v8i32_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i16_v8i32_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <8 x i16> poison, i16 %b, i32 0
+ %splat = shufflevector <8 x i16> %head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %x = zext <8 x i16> %a to <8 x i32>
+ %y = sext <8 x i16> %splat to <8 x i32>
+ %z = shl <8 x i32> %x, %y
+ ret <8 x i32> %z
+}
+
+define <8 x i32> @vwsll_vx_i16_v8i32_zext(<8 x i16> %a, i16 %b) {
+; CHECK-LABEL: vwsll_vx_i16_v8i32_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i16_v8i32_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <8 x i16> poison, i16 %b, i32 0
+ %splat = shufflevector <8 x i16> %head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %x = zext <8 x i16> %a to <8 x i32>
+ %y = zext <8 x i16> %splat to <8 x i32>
+ %z = shl <8 x i32> %x, %y
+ ret <8 x i32> %z
+}
+
+define <8 x i32> @vwsll_vx_i8_v8i32_sext(<8 x i16> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_v8i32_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf4 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_v8i32_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf4 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <8 x i8> poison, i8 %b, i32 0
+ %splat = shufflevector <8 x i8> %head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %x = zext <8 x i16> %a to <8 x i32>
+ %y = sext <8 x i8> %splat to <8 x i32>
+ %z = shl <8 x i32> %x, %y
+ ret <8 x i32> %z
+}
+
+define <8 x i32> @vwsll_vx_i8_v8i32_zext(<8 x i16> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_v8i32_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf4 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_v8i32_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf4 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <8 x i8> poison, i8 %b, i32 0
+ %splat = shufflevector <8 x i8> %head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %x = zext <8 x i16> %a to <8 x i32>
+ %y = zext <8 x i8> %splat to <8 x i32>
+ %z = shl <8 x i32> %x, %y
+ ret <8 x i32> %z
+}
+
+define <8 x i32> @vwsll_vi_v8i32(<8 x i16> %a) {
+; CHECK-LABEL: vwsll_vi_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsll.vi v8, v10, 2
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vi_v8i32:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsll.vi v8, v10, 2
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <8 x i16> %a to <8 x i32>
+ %z = shl <8 x i32> %x, splat (i32 2)
+ ret <8 x i32> %z
+}
+
+; ==============================================================================
+; i8 -> i16
+; ==============================================================================
+
+define <16 x i16> @vwsll_vv_v16i16_sext(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vwsll_vv_v16i16_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_v16i16_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <16 x i8> %a to <16 x i16>
+ %y = sext <16 x i8> %b to <16 x i16>
+ %z = shl <16 x i16> %x, %y
+ ret <16 x i16> %z
+}
+
+define <16 x i16> @vwsll_vv_v16i16_zext(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vwsll_vv_v16i16_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_v16i16_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <16 x i8> %a to <16 x i16>
+ %y = zext <16 x i8> %b to <16 x i16>
+ %z = shl <16 x i16> %x, %y
+ ret <16 x i16> %z
+}
+
+define <16 x i16> @vwsll_vx_i64_v16i16(<16 x i8> %a, i64 %b) {
+ %head = insertelement <8 x i64> poison, i64 %b, i32 0
+ %splat = shufflevector <8 x i64> %head, <8 x i64> poison, <16 x i32> zeroinitializer
+ %x = zext <16 x i8> %a to <16 x i16>
+ %y = trunc <16 x i64> %splat to <16 x i16>
+ %z = shl <16 x i16> %x, %y
+ ret <16 x i16> %z
+}
+
+define <16 x i16> @vwsll_vx_i32_v16i16(<16 x i8> %a, i32 %b) {
+; CHECK-LABEL: vwsll_vx_i32_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vmv.v.x v12, a0
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: vsll.vv v8, v10, v8
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i32_v16i16:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v12, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v8
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <16 x i32> poison, i32 %b, i32 0
+ %splat = shufflevector <16 x i32> %head, <16 x i32> poison, <16 x i32> zeroinitializer
+ %x = zext <16 x i8> %a to <16 x i16>
+ %y = trunc <16 x i32> %splat to <16 x i16>
+ %z = shl <16 x i16> %x, %y
+ ret <16 x i16> %z
+}
+
+define <16 x i16> @vwsll_vx_i16_v16i16(<16 x i8> %a, i16 %b) {
+; CHECK-LABEL: vwsll_vx_i16_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsll.vx v8, v10, a0
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i16_v16i16:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsll.vx v8, v10, a0
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <16 x i16> poison, i16 %b, i32 0
+ %splat = shufflevector <16 x i16> %head, <16 x i16> poison, <16 x i32> zeroinitializer
+ %x = zext <16 x i8> %a to <16 x i16>
+ %z = shl <16 x i16> %x, %splat
+ ret <16 x i16> %z
+}
+
+define <16 x i16> @vwsll_vx_i8_v16i16_sext(<16 x i8> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_v16i16_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_v16i16_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <16 x i8> poison, i8 %b, i32 0
+ %splat = shufflevector <16 x i8> %head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %x = zext <16 x i8> %a to <16 x i16>
+ %y = sext <16 x i8> %splat to <16 x i16>
+ %z = shl <16 x i16> %x, %y
+ ret <16 x i16> %z
+}
+
+define <16 x i16> @vwsll_vx_i8_v16i16_zext(<16 x i8> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_v16i16_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_v16i16_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <16 x i8> poison, i8 %b, i32 0
+ %splat = shufflevector <16 x i8> %head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %x = zext <16 x i8> %a to <16 x i16>
+ %y = zext <16 x i8> %splat to <16 x i16>
+ %z = shl <16 x i16> %x, %y
+ ret <16 x i16> %z
+}
+
+define <16 x i16> @vwsll_vi_v16i16(<16 x i8> %a) {
+; CHECK-LABEL: vwsll_vi_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsll.vi v8, v10, 2
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vi_v16i16:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsll.vi v8, v10, 2
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <16 x i8> %a to <16 x i16>
+ %z = shl <16 x i16> %x, splat (i16 2)
+ ret <16 x i16> %z
+}
+
+; ==============================================================================
+; i8 -> i64
+; ==============================================================================
+
+define <4 x i64> @vwsll_vv_v4i64_v4i8_sext(<4 x i8> %a, <4 x i8> %b) {
+; CHECK-LABEL: vwsll_vv_v4i64_v4i8_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vsext.vf8 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_v4i64_v4i8_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf8 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <4 x i8> %a to <4 x i64>
+ %y = sext <4 x i8> %b to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vv_v4i64_v4i8_zext(<4 x i8> %a, <4 x i8> %b) {
+; CHECK-LABEL: vwsll_vv_v4i64_v4i8_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vzext.vf8 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_v4i64_v4i8_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf8 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <4 x i8> %a to <4 x i64>
+ %y = zext <4 x i8> %b to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i64_v4i64_v4i8(<4 x i8> %a, i64 %b) {
+; CHECK-LABEL: vwsll_vx_i64_v4i64_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vsll.vx v8, v10, a0
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i64_v4i64_v4i8:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vsll.vx v8, v10, a0
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i64> poison, i64 %b, i32 0
+ %splat = shufflevector <4 x i64> %head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i8> %a to <4 x i64>
+ %z = shl <4 x i64> %x, %splat
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i32_v4i64_v4i8_sext(<4 x i8> %a, i32 %b) {
+; CHECK-LABEL: vwsll_vx_i32_v4i64_v4i8_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vsext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i32_v4i64_v4i8_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i32> poison, i32 %b, i32 0
+ %splat = shufflevector <4 x i32> %head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i8> %a to <4 x i64>
+ %y = sext <4 x i32> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i32_v4i64_v4i8_zext(<4 x i8> %a, i32 %b) {
+; CHECK-LABEL: vwsll_vx_i32_v4i64_v4i8_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vzext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i32_v4i64_v4i8_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i32> poison, i32 %b, i32 0
+ %splat = shufflevector <4 x i32> %head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i8> %a to <4 x i64>
+ %y = zext <4 x i32> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i16_v4i64_v4i8_sext(<4 x i8> %a, i16 %b) {
+; CHECK-LABEL: vwsll_vx_i16_v4i64_v4i8_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vsext.vf4 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i16_v4i64_v4i8_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf4 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i16> poison, i16 %b, i32 0
+ %splat = shufflevector <4 x i16> %head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i8> %a to <4 x i64>
+ %y = sext <4 x i16> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i16_v4i64_v4i8_zext(<4 x i8> %a, i16 %b) {
+; CHECK-LABEL: vwsll_vx_i16_v4i64_v4i8_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vzext.vf4 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i16_v4i64_v4i8_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf4 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i16> poison, i16 %b, i32 0
+ %splat = shufflevector <4 x i16> %head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i8> %a to <4 x i64>
+ %y = zext <4 x i16> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i8_v4i64_v4i8_sext(<4 x i8> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_v4i64_v4i8_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vsext.vf8 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_v4i64_v4i8_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf8 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %splat = shufflevector <4 x i8> %head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i8> %a to <4 x i64>
+ %y = sext <4 x i8> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i8_v4i64_v4i8_zext(<4 x i8> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_v4i64_v4i8_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vzext.vf8 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_v4i64_v4i8_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf8 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %splat = shufflevector <4 x i8> %head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i8> %a to <4 x i64>
+ %y = zext <4 x i8> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vi_v4i64_v4i8(<4 x i8> %a) {
+; CHECK-LABEL: vwsll_vi_v4i64_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vsll.vi v8, v10, 2
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vi_v4i64_v4i8:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vsll.vi v8, v10, 2
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <4 x i8> %a to <4 x i64>
+ %z = shl <4 x i64> %x, splat (i64 2)
+ ret <4 x i64> %z
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
index 0544204..52bd157 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
@@ -16,8 +16,8 @@ define void @vpmerge_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
; CHECK-NEXT: $v0 = COPY [[COPY1]]
- ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0 /* tu, mu */
- ; CHECK-NEXT: VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store unknown-size into %ir.p, align 8)
+ ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size from %ir.p, align 8)
+ ; CHECK-NEXT: VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store (<vscale x 1 x s64>) into %ir.p)
; CHECK-NEXT: PseudoRET
%splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
%mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
@@ -37,8 +37,8 @@ define void @vpselect_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
; CHECK-NEXT: $v0 = COPY [[COPY1]]
- ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 1 /* ta, mu */
- ; CHECK-NEXT: VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store unknown-size into %ir.p, align 8)
+ ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 1 /* ta, mu */ :: (load unknown-size from %ir.p, align 8)
+ ; CHECK-NEXT: VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store (<vscale x 1 x s64>) into %ir.p)
; CHECK-NEXT: PseudoRET
%splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
%mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
index a4aef57..7cc4a9d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
@@ -1187,3 +1187,30 @@ define <vscale x 2 x i32> @vmerge_larger_vl_false_becomes_tail(<vscale x 2 x i32
%b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %false, <vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i64 3)
ret <vscale x 2 x i32> %b
}
+
+; Test widening pseudos with their TIED variant (passthru same as first op).
+define <vscale x 2 x i64> @vpmerge_vwsub.w_tied(<vscale x 2 x i64> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 zeroext %vl) {
+; CHECK-LABEL: vpmerge_vwsub.w_tied:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: vwsub.wv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %vl.zext = zext i32 %vl to i64
+ %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(<vscale x 2 x i64> %passthru, <vscale x 2 x i64> %passthru, <vscale x 2 x i32> %y, i64 %vl.zext)
+ %b = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %a, <vscale x 2 x i64> %passthru, i32 %vl)
+ ret <vscale x 2 x i64> %b
+}
+
+define <vscale x 2 x double> @vpmerge_vfwsub.w_tied(<vscale x 2 x double> %passthru, <vscale x 2 x double> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %mask, i32 zeroext %vl) {
+; CHECK-LABEL: vpmerge_vfwsub.w_tied:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: fsrmi a0, 1
+; CHECK-NEXT: vfwsub.wv v8, v8, v12, v0.t
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+ %vl.zext = zext i32 %vl to i64
+ %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(<vscale x 2 x double> %passthru, <vscale x 2 x double> %passthru, <vscale x 2 x float> %y, i64 1, i64 %vl.zext)
+ %b = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %mask, <vscale x 2 x double> %a, <vscale x 2 x double> %passthru, i32 %vl)
+ ret <vscale x 2 x double> %b
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir b/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
index 6ea6fb1..749bd4c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
@@ -159,7 +159,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 8
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
@@ -204,7 +204,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 16
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
@@ -249,7 +249,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 32
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
index a320aec..6a71208 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
@@ -18,10 +18,10 @@ define {<vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_load_nxv16i
; CHECK-NEXT: vmerge.vim v14, v10, 1, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmerge.vim v12, v10, 1, v0
-; CHECK-NEXT: vnsrl.wi v8, v12, 0
-; CHECK-NEXT: vmsne.vi v0, v8, 0
-; CHECK-NEXT: vnsrl.wi v10, v12, 8
+; CHECK-NEXT: vnsrl.wi v10, v12, 0
; CHECK-NEXT: vmsne.vi v8, v10, 0
+; CHECK-NEXT: vnsrl.wi v10, v12, 8
+; CHECK-NEXT: vmsne.vi v9, v10, 0
; CHECK-NEXT: ret
%vec = load <vscale x 32 x i1>, ptr %p
%retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.experimental.vector.deinterleave2.nxv32i1(<vscale x 32 x i1> %vec)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
index ef4baf3..d98597fa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
@@ -8,18 +8,18 @@ define {<vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_nxv16i1_nxv
; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmv.v.i v10, 0
-; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a0
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
-; CHECK-NEXT: vnsrl.wi v12, v8, 0
-; CHECK-NEXT: vmsne.vi v0, v12, 0
-; CHECK-NEXT: vnsrl.wi v12, v8, 8
-; CHECK-NEXT: vmsne.vi v8, v12, 0
+; CHECK-NEXT: vmerge.vim v14, v8, 1, v0
+; CHECK-NEXT: vnsrl.wi v10, v12, 0
+; CHECK-NEXT: vmsne.vi v8, v10, 0
+; CHECK-NEXT: vnsrl.wi v10, v12, 8
+; CHECK-NEXT: vmsne.vi v9, v10, 0
; CHECK-NEXT: ret
%retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.experimental.vector.deinterleave2.nxv32i1(<vscale x 32 x i1> %vec)
ret {<vscale x 16 x i1>, <vscale x 16 x i1>} %retval
@@ -102,12 +102,13 @@ define {<vscale x 64 x i1>, <vscale x 64 x i1>} @vector_deinterleave_nxv64i1_nxv
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v28, v8, 0
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v24, 0
+; CHECK-NEXT: vmsne.vi v7, v24, 0
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v24, v16, 8
; CHECK-NEXT: vnsrl.wi v28, v8, 8
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmsne.vi v8, v24, 0
+; CHECK-NEXT: vmsne.vi v9, v24, 0
+; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: ret
%retval = call {<vscale x 64 x i1>, <vscale x 64 x i1>} @llvm.experimental.vector.deinterleave2.nxv128i1(<vscale x 128 x i1> %vec)
ret {<vscale x 64 x i1>, <vscale x 64 x i1>} %retval
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
index 4aae8b8..9a5e86d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
@@ -101,40 +101,36 @@ define void @vector_interleave_store_nxv16i64_nxv8i64(<vscale x 8 x i64> %a, <vs
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 1
; CHECK-NEXT: vsetvli a3, zero, e16, m2, ta, mu
; CHECK-NEXT: vid.v v24
; CHECK-NEXT: vand.vi v26, v24, 1
-; CHECK-NEXT: vmsne.vi v0, v26, 0
-; CHECK-NEXT: vsrl.vi v6, v24, 1
-; CHECK-NEXT: csrr a3, vlenb
-; CHECK-NEXT: slli a3, a3, 3
-; CHECK-NEXT: add a3, sp, a3
-; CHECK-NEXT: addi a3, a3, 16
-; CHECK-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
-; CHECK-NEXT: vadd.vx v6, v6, a2, v0.t
+; CHECK-NEXT: vmsne.vi v28, v26, 0
+; CHECK-NEXT: vsrl.vi v24, v24, 1
+; CHECK-NEXT: vmv1r.v v0, v28
+; CHECK-NEXT: vadd.vx v24, v24, a2, v0.t
; CHECK-NEXT: vmv4r.v v12, v16
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v24, v8, v6
-; CHECK-NEXT: addi a2, sp, 16
-; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vrgatherei16.vv v0, v8, v24
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 3
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vs8r.v v0, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vmv4r.v v16, v12
-; CHECK-NEXT: vrgatherei16.vv v8, v16, v6
+; CHECK-NEXT: vrgatherei16.vv v8, v16, v24
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: vs8r.v v8, (a1)
-; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
index 1acc0fe..0992c9f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
@@ -656,6 +656,24 @@ define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv8f64(<vscale x 8 x
ret <vscale x 16 x double> %res
}
+define <vscale x 8 x i32> @vector_interleave_nxv8i32_nxv4i32_poison(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32_poison:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32_poison:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; ZVBB-NEXT: vzext.vf2 v12, v8
+; ZVBB-NEXT: vmv.v.v v8, v12
+; ZVBB-NEXT: ret
+ %res = call <vscale x 8 x i32> @llvm.experimental.vector.interleave2.nxv8i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> poison)
+ ret <vscale x 8 x i32> %res
+}
+
declare <vscale x 64 x half> @llvm.experimental.vector.interleave2.nxv64f16(<vscale x 32 x half>, <vscale x 32 x half>)
declare <vscale x 32 x float> @llvm.experimental.vector.interleave2.nxv32f32(<vscale x 16 x float>, <vscale x 16 x float>)
declare <vscale x 16 x double> @llvm.experimental.vector.interleave2.nxv16f64(<vscale x 8 x double>, <vscale x 8 x double>)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll b/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll
index 972fa66..e56dca0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll
@@ -283,18 +283,19 @@ define <vscale x 2 x i32> @vwop_vscale_sext_i8i32_multiple_users(ptr %x, ptr %y,
;
; FOLDING-LABEL: vwop_vscale_sext_i8i32_multiple_users:
; FOLDING: # %bb.0:
-; FOLDING-NEXT: vsetvli a3, zero, e32, m1, ta, ma
+; FOLDING-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
; FOLDING-NEXT: vle8.v v8, (a0)
; FOLDING-NEXT: vle8.v v9, (a1)
; FOLDING-NEXT: vle8.v v10, (a2)
-; FOLDING-NEXT: vsext.vf4 v11, v8
-; FOLDING-NEXT: vsext.vf4 v8, v9
-; FOLDING-NEXT: vsext.vf4 v9, v10
-; FOLDING-NEXT: vmul.vv v8, v11, v8
-; FOLDING-NEXT: vadd.vv v10, v11, v9
-; FOLDING-NEXT: vsub.vv v9, v11, v9
-; FOLDING-NEXT: vor.vv v8, v8, v10
-; FOLDING-NEXT: vor.vv v8, v8, v9
+; FOLDING-NEXT: vsext.vf2 v11, v8
+; FOLDING-NEXT: vsext.vf2 v8, v9
+; FOLDING-NEXT: vsext.vf2 v9, v10
+; FOLDING-NEXT: vwmul.vv v10, v11, v8
+; FOLDING-NEXT: vwadd.vv v8, v11, v9
+; FOLDING-NEXT: vwsub.vv v12, v11, v9
+; FOLDING-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; FOLDING-NEXT: vor.vv v8, v10, v8
+; FOLDING-NEXT: vor.vv v8, v8, v12
; FOLDING-NEXT: ret
%a = load <vscale x 2 x i8>, ptr %x
%b = load <vscale x 2 x i8>, ptr %y
@@ -563,18 +564,19 @@ define <vscale x 2 x i32> @vwop_vscale_zext_i8i32_multiple_users(ptr %x, ptr %y,
;
; FOLDING-LABEL: vwop_vscale_zext_i8i32_multiple_users:
; FOLDING: # %bb.0:
-; FOLDING-NEXT: vsetvli a3, zero, e32, m1, ta, ma
+; FOLDING-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
; FOLDING-NEXT: vle8.v v8, (a0)
; FOLDING-NEXT: vle8.v v9, (a1)
; FOLDING-NEXT: vle8.v v10, (a2)
-; FOLDING-NEXT: vzext.vf4 v11, v8
-; FOLDING-NEXT: vzext.vf4 v8, v9
-; FOLDING-NEXT: vzext.vf4 v9, v10
-; FOLDING-NEXT: vmul.vv v8, v11, v8
-; FOLDING-NEXT: vadd.vv v10, v11, v9
-; FOLDING-NEXT: vsub.vv v9, v11, v9
-; FOLDING-NEXT: vor.vv v8, v8, v10
-; FOLDING-NEXT: vor.vv v8, v8, v9
+; FOLDING-NEXT: vzext.vf2 v11, v8
+; FOLDING-NEXT: vzext.vf2 v8, v9
+; FOLDING-NEXT: vzext.vf2 v9, v10
+; FOLDING-NEXT: vwmulu.vv v10, v11, v8
+; FOLDING-NEXT: vwaddu.vv v8, v11, v9
+; FOLDING-NEXT: vwsubu.vv v12, v11, v9
+; FOLDING-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; FOLDING-NEXT: vor.vv v8, v10, v8
+; FOLDING-NEXT: vor.vv v8, v8, v12
; FOLDING-NEXT: ret
%a = load <vscale x 2 x i8>, ptr %x
%b = load <vscale x 2 x i8>, ptr %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
index a559fbf..66e6883 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV64 %s
define <vscale x 1 x i64> @vwadd_vv_nxv1i64_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
; CHECK-LABEL: vwadd_vv_nxv1i64_nxv1i32:
@@ -421,10 +421,10 @@ define <vscale x 8 x i64> @vwaddu_wx_nxv8i64_nxv8i32(<vscale x 8 x i64> %va, i32
define <vscale x 1 x i64> @vwadd_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwadd_vv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i16> %va to <vscale x 1 x i64>
%vd = sext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
@@ -435,10 +435,10 @@ define <vscale x 1 x i64> @vwadd_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vsc
define <vscale x 1 x i64> @vwaddu_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i16> %va to <vscale x 1 x i64>
%vd = zext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
@@ -451,10 +451,10 @@ define <vscale x 1 x i64> @vwadd_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -468,11 +468,9 @@ define <vscale x 1 x i64> @vwaddu_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
; CHECK-LABEL: vwaddu_vx_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v9, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vzext.vf2 v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -485,9 +483,9 @@ define <vscale x 1 x i64> @vwaddu_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
define <vscale x 1 x i64> @vwadd_wv_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwadd_wv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v9
+; CHECK-NEXT: vwadd.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
%vd = add <vscale x 1 x i64> %va, %vc
@@ -497,9 +495,9 @@ define <vscale x 1 x i64> @vwadd_wv_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, <vsc
define <vscale x 1 x i64> @vwaddu_wv_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vwaddu.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
%vd = add <vscale x 1 x i64> %va, %vc
@@ -511,9 +509,9 @@ define <vscale x 1 x i64> @vwadd_wx_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v9
+; CHECK-NEXT: vwadd.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -527,9 +525,9 @@ define <vscale x 1 x i64> @vwaddu_wx_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vwaddu.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -541,10 +539,10 @@ define <vscale x 1 x i64> @vwaddu_wx_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, i16
define <vscale x 2 x i64> @vwadd_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwadd_vv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i16> %va to <vscale x 2 x i64>
%vd = sext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
@@ -555,10 +553,10 @@ define <vscale x 2 x i64> @vwadd_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vsc
define <vscale x 2 x i64> @vwaddu_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i16> %va to <vscale x 2 x i64>
%vd = zext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
@@ -571,10 +569,10 @@ define <vscale x 2 x i64> @vwadd_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -588,11 +586,9 @@ define <vscale x 2 x i64> @vwaddu_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
; CHECK-LABEL: vwaddu_vx_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v10, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -605,9 +601,9 @@ define <vscale x 2 x i64> @vwaddu_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
define <vscale x 2 x i64> @vwadd_wv_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwadd_wv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v11, v10
+; CHECK-NEXT: vwadd.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
%vd = add <vscale x 2 x i64> %va, %vc
@@ -617,9 +613,9 @@ define <vscale x 2 x i64> @vwadd_wv_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, <vsc
define <vscale x 2 x i64> @vwaddu_wv_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v11, v10
+; CHECK-NEXT: vwaddu.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
%vd = add <vscale x 2 x i64> %va, %vc
@@ -631,9 +627,9 @@ define <vscale x 2 x i64> @vwadd_wx_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v11, v10
+; CHECK-NEXT: vwadd.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -647,9 +643,9 @@ define <vscale x 2 x i64> @vwaddu_wx_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v11, v10
+; CHECK-NEXT: vwaddu.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -661,10 +657,10 @@ define <vscale x 2 x i64> @vwaddu_wx_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, i16
define <vscale x 4 x i64> @vwadd_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwadd_vv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vsext.vf4 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vsext.vf2 v14, v9
+; CHECK-NEXT: vwadd.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i16> %va to <vscale x 4 x i64>
%vd = sext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
@@ -675,10 +671,10 @@ define <vscale x 4 x i64> @vwadd_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vsc
define <vscale x 4 x i64> @vwaddu_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vwaddu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i16> %va to <vscale x 4 x i64>
%vd = zext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
@@ -691,10 +687,10 @@ define <vscale x 4 x i64> @vwadd_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vsext.vf4 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vsext.vf2 v14, v9
+; CHECK-NEXT: vwadd.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -708,11 +704,9 @@ define <vscale x 4 x i64> @vwaddu_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
; CHECK-LABEL: vwaddu_vx_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v12, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vzext.vf2 v8, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -725,9 +719,9 @@ define <vscale x 4 x i64> @vwaddu_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
define <vscale x 4 x i64> @vwadd_wv_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwadd_wv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v14, v12
+; CHECK-NEXT: vwadd.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
%vd = add <vscale x 4 x i64> %va, %vc
@@ -737,9 +731,9 @@ define <vscale x 4 x i64> @vwadd_wv_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, <vsc
define <vscale x 4 x i64> @vwaddu_wv_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v14, v12
+; CHECK-NEXT: vwaddu.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
%vd = add <vscale x 4 x i64> %va, %vc
@@ -751,9 +745,9 @@ define <vscale x 4 x i64> @vwadd_wx_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v14, v12
+; CHECK-NEXT: vwadd.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -767,9 +761,9 @@ define <vscale x 4 x i64> @vwaddu_wx_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v14, v12
+; CHECK-NEXT: vwaddu.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -781,10 +775,10 @@ define <vscale x 4 x i64> @vwaddu_wx_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, i16
define <vscale x 8 x i64> @vwadd_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwadd_vv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vsext.vf4 v24, v10
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwadd.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i16> %va to <vscale x 8 x i64>
%vd = sext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
@@ -795,10 +789,10 @@ define <vscale x 8 x i64> @vwadd_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vsc
define <vscale x 8 x i64> @vwaddu_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v8
-; CHECK-NEXT: vzext.vf4 v24, v10
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwaddu.vv v16, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v16
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i16> %va to <vscale x 8 x i64>
%vd = zext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
@@ -811,10 +805,10 @@ define <vscale x 8 x i64> @vwadd_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vsext.vf4 v24, v10
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwadd.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -828,11 +822,9 @@ define <vscale x 8 x i64> @vwaddu_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
; CHECK-LABEL: vwaddu_vx_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.v.x v10, a0
+; CHECK-NEXT: vwaddu.vx v16, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v8
-; CHECK-NEXT: vzext.vf4 v24, v10
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vzext.vf2 v8, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -845,9 +837,9 @@ define <vscale x 8 x i64> @vwaddu_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
define <vscale x 8 x i64> @vwadd_wv_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwadd_wv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v20, v16
+; CHECK-NEXT: vwadd.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
%vd = add <vscale x 8 x i64> %va, %vc
@@ -857,9 +849,9 @@ define <vscale x 8 x i64> @vwadd_wv_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, <vsc
define <vscale x 8 x i64> @vwaddu_wv_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v20, v16
+; CHECK-NEXT: vwaddu.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
%vd = add <vscale x 8 x i64> %va, %vc
@@ -871,9 +863,9 @@ define <vscale x 8 x i64> @vwadd_wx_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v20, v16
+; CHECK-NEXT: vwadd.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -887,9 +879,9 @@ define <vscale x 8 x i64> @vwaddu_wx_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v20, v16
+; CHECK-NEXT: vwaddu.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -901,10 +893,10 @@ define <vscale x 8 x i64> @vwaddu_wx_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, i16
define <vscale x 1 x i64> @vwadd_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwadd_vv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i8> %va to <vscale x 1 x i64>
%vd = sext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
@@ -915,10 +907,10 @@ define <vscale x 1 x i64> @vwadd_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscal
define <vscale x 1 x i64> @vwaddu_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i8> %va to <vscale x 1 x i64>
%vd = zext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
@@ -931,10 +923,10 @@ define <vscale x 1 x i64> @vwadd_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -948,11 +940,9 @@ define <vscale x 1 x i64> @vwaddu_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b
; CHECK-LABEL: vwaddu_vx_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v9, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vzext.vf4 v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -965,9 +955,9 @@ define <vscale x 1 x i64> @vwaddu_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b
define <vscale x 1 x i64> @vwadd_wv_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwadd_wv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v9
+; CHECK-NEXT: vwadd.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
%vd = add <vscale x 1 x i64> %va, %vc
@@ -977,9 +967,9 @@ define <vscale x 1 x i64> @vwadd_wv_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, <vsca
define <vscale x 1 x i64> @vwaddu_wv_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf4 v10, v9
+; CHECK-NEXT: vwaddu.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
%vd = add <vscale x 1 x i64> %va, %vc
@@ -991,9 +981,9 @@ define <vscale x 1 x i64> @vwadd_wx_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v9
+; CHECK-NEXT: vwadd.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -1007,9 +997,9 @@ define <vscale x 1 x i64> @vwaddu_wx_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf4 v10, v9
+; CHECK-NEXT: vwaddu.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -1021,10 +1011,10 @@ define <vscale x 1 x i64> @vwaddu_wx_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, i8 %
define <vscale x 2 x i64> @vwadd_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwadd_vv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i8> %va to <vscale x 2 x i64>
%vd = sext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
@@ -1035,10 +1025,10 @@ define <vscale x 2 x i64> @vwadd_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscal
define <vscale x 2 x i64> @vwaddu_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i8> %va to <vscale x 2 x i64>
%vd = zext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
@@ -1051,10 +1041,10 @@ define <vscale x 2 x i64> @vwadd_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1068,11 +1058,9 @@ define <vscale x 2 x i64> @vwaddu_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b
; CHECK-LABEL: vwaddu_vx_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v10, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1085,9 +1073,9 @@ define <vscale x 2 x i64> @vwaddu_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b
define <vscale x 2 x i64> @vwadd_wv_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwadd_wv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v11, v10
+; CHECK-NEXT: vwadd.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
%vd = add <vscale x 2 x i64> %va, %vc
@@ -1097,9 +1085,9 @@ define <vscale x 2 x i64> @vwadd_wv_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, <vsca
define <vscale x 2 x i64> @vwaddu_wv_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v11, v10
+; CHECK-NEXT: vwaddu.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
%vd = add <vscale x 2 x i64> %va, %vc
@@ -1111,9 +1099,9 @@ define <vscale x 2 x i64> @vwadd_wx_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v11, v10
+; CHECK-NEXT: vwadd.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1127,9 +1115,9 @@ define <vscale x 2 x i64> @vwaddu_wx_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v11, v10
+; CHECK-NEXT: vwaddu.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1141,10 +1129,10 @@ define <vscale x 2 x i64> @vwaddu_wx_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, i8 %
define <vscale x 4 x i64> @vwadd_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwadd_vv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vsext.vf4 v14, v9
+; CHECK-NEXT: vwadd.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i8> %va to <vscale x 4 x i64>
%vd = sext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
@@ -1155,10 +1143,10 @@ define <vscale x 4 x i64> @vwadd_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscal
define <vscale x 4 x i64> @vwaddu_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v8
-; CHECK-NEXT: vzext.vf8 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vwaddu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v12
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i8> %va to <vscale x 4 x i64>
%vd = zext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
@@ -1171,10 +1159,10 @@ define <vscale x 4 x i64> @vwadd_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vsext.vf4 v14, v9
+; CHECK-NEXT: vwadd.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1188,11 +1176,9 @@ define <vscale x 4 x i64> @vwaddu_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b
; CHECK-LABEL: vwaddu_vx_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v12, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v8
-; CHECK-NEXT: vzext.vf8 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vzext.vf4 v8, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1205,9 +1191,9 @@ define <vscale x 4 x i64> @vwaddu_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b
define <vscale x 4 x i64> @vwadd_wv_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwadd_wv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v14, v12
+; CHECK-NEXT: vwadd.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
%vd = add <vscale x 4 x i64> %va, %vc
@@ -1217,9 +1203,9 @@ define <vscale x 4 x i64> @vwadd_wv_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, <vsca
define <vscale x 4 x i64> @vwaddu_wv_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v14, v12
+; CHECK-NEXT: vwaddu.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
%vd = add <vscale x 4 x i64> %va, %vc
@@ -1231,9 +1217,9 @@ define <vscale x 4 x i64> @vwadd_wx_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v14, v12
+; CHECK-NEXT: vwadd.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1247,9 +1233,9 @@ define <vscale x 4 x i64> @vwaddu_wx_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v14, v12
+; CHECK-NEXT: vwaddu.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1261,10 +1247,10 @@ define <vscale x 4 x i64> @vwaddu_wx_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, i8 %
define <vscale x 8 x i64> @vwadd_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwadd_vv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsext.vf8 v24, v9
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vsext.vf4 v20, v9
+; CHECK-NEXT: vwadd.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i8> %va to <vscale x 8 x i64>
%vd = sext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
@@ -1275,10 +1261,10 @@ define <vscale x 8 x i64> @vwadd_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscal
define <vscale x 8 x i64> @vwaddu_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v8
-; CHECK-NEXT: vzext.vf8 v24, v9
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwaddu.vv v16, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v16
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i8> %va to <vscale x 8 x i64>
%vd = zext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
@@ -1291,10 +1277,10 @@ define <vscale x 8 x i64> @vwadd_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsext.vf8 v24, v9
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vsext.vf4 v20, v9
+; CHECK-NEXT: vwadd.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -1308,11 +1294,9 @@ define <vscale x 8 x i64> @vwaddu_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b
; CHECK-LABEL: vwaddu_vx_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v16, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v8
-; CHECK-NEXT: vzext.vf8 v24, v9
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vzext.vf4 v8, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -1325,9 +1309,9 @@ define <vscale x 8 x i64> @vwaddu_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b
define <vscale x 8 x i64> @vwadd_wv_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwadd_wv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v20, v16
+; CHECK-NEXT: vwadd.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
%vd = add <vscale x 8 x i64> %va, %vc
@@ -1337,9 +1321,9 @@ define <vscale x 8 x i64> @vwadd_wv_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, <vsca
define <vscale x 8 x i64> @vwaddu_wv_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v20, v16
+; CHECK-NEXT: vwaddu.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
%vd = add <vscale x 8 x i64> %va, %vc
@@ -1351,9 +1335,9 @@ define <vscale x 8 x i64> @vwadd_wx_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v20, v16
+; CHECK-NEXT: vwadd.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -1367,9 +1351,9 @@ define <vscale x 8 x i64> @vwaddu_wx_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v20, v16
+; CHECK-NEXT: vwaddu.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -1377,3 +1361,108 @@ define <vscale x 8 x i64> @vwaddu_wx_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, i8 %
%vc = add <vscale x 8 x i64> %va, %vb
ret <vscale x 8 x i64> %vc
}
+
+; Make sure that we don't introduce any V{S,Z}EXT_VL nodes with i1 types from
+; combineBinOp_VLToVWBinOp_VL, since they can't be selected.
+define <vscale x 1 x i64> @i1_zext(<vscale x 1 x i1> %va, <vscale x 1 x i64> %vb, ptr %p) {
+; RV32-LABEL: i1_zext:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT: vmv.v.i v9, 0
+; RV32-NEXT: vmerge.vim v9, v9, 1, v0
+; RV32-NEXT: vadd.vv v8, v9, v8
+; RV32-NEXT: li a1, 42
+; RV32-NEXT: sh a1, 0(a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: i1_zext:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
+; RV64-NEXT: vadd.vi v8, v8, 1, v0.t
+; RV64-NEXT: li a1, 42
+; RV64-NEXT: sh a1, 0(a0)
+; RV64-NEXT: ret
+ %vc = zext <vscale x 1 x i1> %va to <vscale x 1 x i64>
+ %vd = add <vscale x 1 x i64> %vc, %vb
+
+; Introduce an illegal type so that the DAG changes after legalizing
+; types. Otherwise the legalize vector ops phase will be run immediately after
+; the legalize types phase, and the zext will already be in non-i1 form by the
+; time combineBinOp_VLToVWBinOp_VL is called.
+ store i9 42, ptr %p
+ ret <vscale x 1 x i64> %vd
+}
+
+; %x.i32 and %y.i32 are disjoint, so DAGCombiner will combine it into an or.
+; FIXME: We should be able to recover the or into vwaddu.vv if the disjoint
+; flag is set.
+define <vscale x 2 x i32> @vwaddu_vv_disjoint_or_add(<vscale x 2 x i8> %x.i8, <vscale x 2 x i8> %y.i8) {
+; CHECK-LABEL: vwaddu_vv_disjoint_or_add:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsll.vi v10, v10, 8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwaddu.vv v8, v10, v11
+; CHECK-NEXT: ret
+ %x.i16 = zext <vscale x 2 x i8> %x.i8 to <vscale x 2 x i16>
+ %x.shl = shl <vscale x 2 x i16> %x.i16, shufflevector(<vscale x 2 x i16> insertelement(<vscale x 2 x i16> poison, i16 8, i32 0), <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer)
+ %x.i32 = zext <vscale x 2 x i16> %x.shl to <vscale x 2 x i32>
+ %y.i32 = zext <vscale x 2 x i8> %y.i8 to <vscale x 2 x i32>
+ %add = add <vscale x 2 x i32> %x.i32, %y.i32
+ ret <vscale x 2 x i32> %add
+}
+
+; TODO: We could select vwaddu.vv, but when both arms of the or are the same
+; DAGCombiner::hoistLogicOpWithSameOpcodeHands moves the zext above the or.
+define <vscale x 2 x i32> @vwaddu_vv_disjoint_or(<vscale x 2 x i16> %x.i16, <vscale x 2 x i16> %y.i16) {
+; CHECK-LABEL: vwaddu_vv_disjoint_or:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vor.vv v9, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %x.i32 = zext <vscale x 2 x i16> %x.i16 to <vscale x 2 x i32>
+ %y.i32 = zext <vscale x 2 x i16> %y.i16 to <vscale x 2 x i32>
+ %or = or disjoint <vscale x 2 x i32> %x.i32, %y.i32
+ ret <vscale x 2 x i32> %or
+}
+
+; TODO: We could select vwadd.vv, but when both arms of the or are the same
+; DAGCombiner::hoistLogicOpWithSameOpcodeHands moves the zext above the or.
+define <vscale x 2 x i32> @vwadd_vv_disjoint_or(<vscale x 2 x i16> %x.i16, <vscale x 2 x i16> %y.i16) {
+; CHECK-LABEL: vwadd_vv_disjoint_or:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vor.vv v9, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %x.i32 = sext <vscale x 2 x i16> %x.i16 to <vscale x 2 x i32>
+ %y.i32 = sext <vscale x 2 x i16> %y.i16 to <vscale x 2 x i32>
+ %or = or disjoint <vscale x 2 x i32> %x.i32, %y.i32
+ ret <vscale x 2 x i32> %or
+}
+
+define <vscale x 2 x i32> @vwaddu_wv_disjoint_or(<vscale x 2 x i32> %x.i32, <vscale x 2 x i16> %y.i16) {
+; CHECK-LABEL: vwaddu_wv_disjoint_or:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vwaddu.wv v8, v8, v9
+; CHECK-NEXT: ret
+ %y.i32 = zext <vscale x 2 x i16> %y.i16 to <vscale x 2 x i32>
+ %or = or disjoint <vscale x 2 x i32> %x.i32, %y.i32
+ ret <vscale x 2 x i32> %or
+}
+
+define <vscale x 2 x i32> @vwadd_wv_disjoint_or(<vscale x 2 x i32> %x.i32, <vscale x 2 x i16> %y.i16) {
+; CHECK-LABEL: vwadd_wv_disjoint_or:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vwadd.wv v8, v8, v9
+; CHECK-NEXT: ret
+ %y.i32 = sext <vscale x 2 x i16> %y.i16 to <vscale x 2 x i32>
+ %or = or disjoint <vscale x 2 x i32> %x.i32, %y.i32
+ ret <vscale x 2 x i32> %or
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll
index 3634162..28fc53f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll
@@ -341,10 +341,10 @@ define <vscale x 8 x i64> @vwmulsu_vx_nxv8i64_nxv8i32(<vscale x 8 x i32> %va, i3
define <vscale x 1 x i64> @vwmul_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwmul_vv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i16> %va to <vscale x 1 x i64>
%vd = sext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
@@ -355,10 +355,10 @@ define <vscale x 1 x i64> @vwmul_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vsc
define <vscale x 1 x i64> @vwmulu_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vwmulu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i16> %va to <vscale x 1 x i64>
%vd = zext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
@@ -369,10 +369,10 @@ define <vscale x 1 x i64> @vwmulu_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vs
define <vscale x 1 x i64> @vwmulsu_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i16> %va to <vscale x 1 x i64>
%vd = zext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
@@ -385,10 +385,10 @@ define <vscale x 1 x i64> @vwmul_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
@@ -402,11 +402,9 @@ define <vscale x 1 x i64> @vwmulu_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
; CHECK-LABEL: vwmulu_vx_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v9, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vzext.vf2 v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
@@ -421,10 +419,10 @@ define <vscale x 1 x i64> @vwmulsu_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i1
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
@@ -437,10 +435,10 @@ define <vscale x 1 x i64> @vwmulsu_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i1
define <vscale x 2 x i64> @vwmul_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwmul_vv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i16> %va to <vscale x 2 x i64>
%vd = sext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
@@ -451,10 +449,10 @@ define <vscale x 2 x i64> @vwmul_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vsc
define <vscale x 2 x i64> @vwmulu_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vwmulu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i16> %va to <vscale x 2 x i64>
%vd = zext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
@@ -465,10 +463,10 @@ define <vscale x 2 x i64> @vwmulu_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vs
define <vscale x 2 x i64> @vwmulsu_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i16> %va to <vscale x 2 x i64>
%vd = zext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
@@ -481,10 +479,10 @@ define <vscale x 2 x i64> @vwmul_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
@@ -498,11 +496,9 @@ define <vscale x 2 x i64> @vwmulu_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
; CHECK-LABEL: vwmulu_vx_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v10, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
@@ -517,10 +513,10 @@ define <vscale x 2 x i64> @vwmulsu_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i1
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
@@ -533,10 +529,10 @@ define <vscale x 2 x i64> @vwmulsu_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i1
define <vscale x 4 x i64> @vwmul_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwmul_vv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vsext.vf4 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vsext.vf2 v14, v9
+; CHECK-NEXT: vwmul.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i16> %va to <vscale x 4 x i64>
%vd = sext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
@@ -547,10 +543,10 @@ define <vscale x 4 x i64> @vwmul_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vsc
define <vscale x 4 x i64> @vwmulu_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vwmulu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i16> %va to <vscale x 4 x i64>
%vd = zext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
@@ -561,10 +557,10 @@ define <vscale x 4 x i64> @vwmulu_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vs
define <vscale x 4 x i64> @vwmulsu_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vzext.vf2 v14, v9
+; CHECK-NEXT: vwmulsu.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i16> %va to <vscale x 4 x i64>
%vd = zext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
@@ -577,10 +573,10 @@ define <vscale x 4 x i64> @vwmul_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vsext.vf4 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vsext.vf2 v14, v9
+; CHECK-NEXT: vwmul.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
@@ -594,11 +590,9 @@ define <vscale x 4 x i64> @vwmulu_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
; CHECK-LABEL: vwmulu_vx_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v12, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vzext.vf2 v8, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
@@ -613,10 +607,10 @@ define <vscale x 4 x i64> @vwmulsu_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i1
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vzext.vf2 v14, v9
+; CHECK-NEXT: vwmulsu.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
@@ -629,10 +623,10 @@ define <vscale x 4 x i64> @vwmulsu_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i1
define <vscale x 8 x i64> @vwmul_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwmul_vv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vsext.vf4 v24, v10
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwmul.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i16> %va to <vscale x 8 x i64>
%vd = sext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
@@ -643,10 +637,10 @@ define <vscale x 8 x i64> @vwmul_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vsc
define <vscale x 8 x i64> @vwmulu_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v8
-; CHECK-NEXT: vzext.vf4 v24, v10
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwmulu.vv v16, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v16
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i16> %va to <vscale x 8 x i64>
%vd = zext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
@@ -657,10 +651,10 @@ define <vscale x 8 x i64> @vwmulu_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vs
define <vscale x 8 x i64> @vwmulsu_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vzext.vf4 v24, v10
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vzext.vf2 v20, v10
+; CHECK-NEXT: vwmulsu.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i16> %va to <vscale x 8 x i64>
%vd = zext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
@@ -673,10 +667,10 @@ define <vscale x 8 x i64> @vwmul_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vsext.vf4 v24, v10
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwmul.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
@@ -690,11 +684,9 @@ define <vscale x 8 x i64> @vwmulu_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
; CHECK-LABEL: vwmulu_vx_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.v.x v10, a0
+; CHECK-NEXT: vwmulu.vx v16, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v8
-; CHECK-NEXT: vzext.vf4 v24, v10
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vzext.vf2 v8, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
@@ -709,10 +701,10 @@ define <vscale x 8 x i64> @vwmulsu_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i1
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vzext.vf4 v24, v10
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vzext.vf2 v20, v10
+; CHECK-NEXT: vwmulsu.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
@@ -725,10 +717,10 @@ define <vscale x 8 x i64> @vwmulsu_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i1
define <vscale x 1 x i64> @vwmul_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwmul_vv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i8> %va to <vscale x 1 x i64>
%vd = sext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
@@ -739,10 +731,10 @@ define <vscale x 1 x i64> @vwmul_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscal
define <vscale x 1 x i64> @vwmulu_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vwmulu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i8> %va to <vscale x 1 x i64>
%vd = zext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
@@ -753,10 +745,10 @@ define <vscale x 1 x i64> @vwmulu_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vsca
define <vscale x 1 x i64> @vwmulsu_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vzext.vf4 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i8> %va to <vscale x 1 x i64>
%vd = zext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
@@ -769,10 +761,10 @@ define <vscale x 1 x i64> @vwmul_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
@@ -786,11 +778,9 @@ define <vscale x 1 x i64> @vwmulu_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b
; CHECK-LABEL: vwmulu_vx_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v9, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vzext.vf4 v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
@@ -805,10 +795,10 @@ define <vscale x 1 x i64> @vwmulsu_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vzext.vf4 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
@@ -821,10 +811,10 @@ define <vscale x 1 x i64> @vwmulsu_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %
define <vscale x 2 x i64> @vwmul_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwmul_vv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i8> %va to <vscale x 2 x i64>
%vd = sext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
@@ -835,10 +825,10 @@ define <vscale x 2 x i64> @vwmul_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscal
define <vscale x 2 x i64> @vwmulu_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vwmulu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i8> %va to <vscale x 2 x i64>
%vd = zext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
@@ -849,10 +839,10 @@ define <vscale x 2 x i64> @vwmulu_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vsca
define <vscale x 2 x i64> @vwmulsu_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vzext.vf4 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i8> %va to <vscale x 2 x i64>
%vd = zext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
@@ -865,10 +855,10 @@ define <vscale x 2 x i64> @vwmul_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
@@ -882,11 +872,9 @@ define <vscale x 2 x i64> @vwmulu_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b
; CHECK-LABEL: vwmulu_vx_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v10, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
@@ -901,10 +889,10 @@ define <vscale x 2 x i64> @vwmulsu_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vzext.vf4 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
@@ -917,10 +905,10 @@ define <vscale x 2 x i64> @vwmulsu_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %
define <vscale x 4 x i64> @vwmul_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwmul_vv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vsext.vf4 v14, v9
+; CHECK-NEXT: vwmul.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i8> %va to <vscale x 4 x i64>
%vd = sext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
@@ -931,10 +919,10 @@ define <vscale x 4 x i64> @vwmul_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscal
define <vscale x 4 x i64> @vwmulu_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v8
-; CHECK-NEXT: vzext.vf8 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vwmulu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v12
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i8> %va to <vscale x 4 x i64>
%vd = zext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
@@ -945,10 +933,10 @@ define <vscale x 4 x i64> @vwmulu_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vsca
define <vscale x 4 x i64> @vwmulsu_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vzext.vf8 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vzext.vf4 v14, v9
+; CHECK-NEXT: vwmulsu.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i8> %va to <vscale x 4 x i64>
%vd = zext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
@@ -961,10 +949,10 @@ define <vscale x 4 x i64> @vwmul_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vsext.vf4 v14, v9
+; CHECK-NEXT: vwmul.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
@@ -978,11 +966,9 @@ define <vscale x 4 x i64> @vwmulu_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b
; CHECK-LABEL: vwmulu_vx_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v12, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v8
-; CHECK-NEXT: vzext.vf8 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vzext.vf4 v8, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
@@ -997,10 +983,10 @@ define <vscale x 4 x i64> @vwmulsu_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vzext.vf8 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vzext.vf4 v14, v9
+; CHECK-NEXT: vwmulsu.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
@@ -1013,10 +999,10 @@ define <vscale x 4 x i64> @vwmulsu_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %
define <vscale x 8 x i64> @vwmul_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwmul_vv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsext.vf8 v24, v9
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vsext.vf4 v20, v9
+; CHECK-NEXT: vwmul.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i8> %va to <vscale x 8 x i64>
%vd = sext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
@@ -1027,10 +1013,10 @@ define <vscale x 8 x i64> @vwmul_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscal
define <vscale x 8 x i64> @vwmulu_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v8
-; CHECK-NEXT: vzext.vf8 v24, v9
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vv v16, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v16
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i8> %va to <vscale x 8 x i64>
%vd = zext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
@@ -1041,10 +1027,10 @@ define <vscale x 8 x i64> @vwmulu_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vsca
define <vscale x 8 x i64> @vwmulsu_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vzext.vf8 v24, v9
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vzext.vf4 v20, v9
+; CHECK-NEXT: vwmulsu.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i8> %va to <vscale x 8 x i64>
%vd = zext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
@@ -1057,10 +1043,10 @@ define <vscale x 8 x i64> @vwmul_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsext.vf8 v24, v9
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vsext.vf4 v20, v9
+; CHECK-NEXT: vwmul.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
@@ -1074,11 +1060,9 @@ define <vscale x 8 x i64> @vwmulu_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b
; CHECK-LABEL: vwmulu_vx_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v16, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v8
-; CHECK-NEXT: vzext.vf8 v24, v9
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vzext.vf4 v8, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
@@ -1093,10 +1077,10 @@ define <vscale x 8 x i64> @vwmulsu_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vzext.vf8 v24, v9
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vzext.vf4 v20, v9
+; CHECK-NEXT: vwmulsu.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll
index 123469a..852814d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll
@@ -421,10 +421,10 @@ define <vscale x 8 x i64> @vwsubu_wx_nxv8i64_nxv8i32(<vscale x 8 x i64> %va, i32
define <vscale x 1 x i64> @vwsub_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwsub_vv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v8, v9
-; CHECK-NEXT: vsub.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i16> %va to <vscale x 1 x i64>
%vd = sext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
@@ -451,10 +451,10 @@ define <vscale x 1 x i64> @vwsub_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v8, v9
-; CHECK-NEXT: vsub.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -483,9 +483,9 @@ define <vscale x 1 x i64> @vwsubu_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
define <vscale x 1 x i64> @vwsub_wv_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwsub_wv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v9
+; CHECK-NEXT: vwsub.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
%vd = sub <vscale x 1 x i64> %va, %vc
@@ -495,9 +495,9 @@ define <vscale x 1 x i64> @vwsub_wv_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, <vsc
define <vscale x 1 x i64> @vwsubu_wv_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vwsubu.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
%vd = sub <vscale x 1 x i64> %va, %vc
@@ -509,9 +509,9 @@ define <vscale x 1 x i64> @vwsub_wx_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v9
+; CHECK-NEXT: vwsub.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -525,9 +525,9 @@ define <vscale x 1 x i64> @vwsubu_wx_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vwsubu.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -539,10 +539,10 @@ define <vscale x 1 x i64> @vwsubu_wx_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, i16
define <vscale x 2 x i64> @vwsub_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwsub_vv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v12, v9
-; CHECK-NEXT: vsub.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i16> %va to <vscale x 2 x i64>
%vd = sext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
@@ -569,10 +569,10 @@ define <vscale x 2 x i64> @vwsub_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v12, v9
-; CHECK-NEXT: vsub.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -601,9 +601,9 @@ define <vscale x 2 x i64> @vwsubu_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
define <vscale x 2 x i64> @vwsub_wv_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwsub_wv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v11, v10
+; CHECK-NEXT: vwsub.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
%vd = sub <vscale x 2 x i64> %va, %vc
@@ -613,9 +613,9 @@ define <vscale x 2 x i64> @vwsub_wv_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, <vsc
define <vscale x 2 x i64> @vwsubu_wv_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v11, v10
+; CHECK-NEXT: vwsubu.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
%vd = sub <vscale x 2 x i64> %va, %vc
@@ -627,9 +627,9 @@ define <vscale x 2 x i64> @vwsub_wx_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v11, v10
+; CHECK-NEXT: vwsub.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -643,9 +643,9 @@ define <vscale x 2 x i64> @vwsubu_wx_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v11, v10
+; CHECK-NEXT: vwsubu.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -657,10 +657,10 @@ define <vscale x 2 x i64> @vwsubu_wx_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, i16
define <vscale x 4 x i64> @vwsub_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwsub_vv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vsext.vf4 v16, v9
-; CHECK-NEXT: vsub.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vsext.vf2 v14, v9
+; CHECK-NEXT: vwsub.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i16> %va to <vscale x 4 x i64>
%vd = sext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
@@ -687,10 +687,10 @@ define <vscale x 4 x i64> @vwsub_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vsext.vf4 v16, v9
-; CHECK-NEXT: vsub.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vsext.vf2 v14, v9
+; CHECK-NEXT: vwsub.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -719,9 +719,9 @@ define <vscale x 4 x i64> @vwsubu_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
define <vscale x 4 x i64> @vwsub_wv_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwsub_wv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v14, v12
+; CHECK-NEXT: vwsub.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
%vd = sub <vscale x 4 x i64> %va, %vc
@@ -731,9 +731,9 @@ define <vscale x 4 x i64> @vwsub_wv_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, <vsc
define <vscale x 4 x i64> @vwsubu_wv_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v14, v12
+; CHECK-NEXT: vwsubu.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
%vd = sub <vscale x 4 x i64> %va, %vc
@@ -745,9 +745,9 @@ define <vscale x 4 x i64> @vwsub_wx_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v14, v12
+; CHECK-NEXT: vwsub.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -761,9 +761,9 @@ define <vscale x 4 x i64> @vwsubu_wx_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v14, v12
+; CHECK-NEXT: vwsubu.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -775,10 +775,10 @@ define <vscale x 4 x i64> @vwsubu_wx_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, i16
define <vscale x 8 x i64> @vwsub_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwsub_vv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vsext.vf4 v24, v10
-; CHECK-NEXT: vsub.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwsub.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i16> %va to <vscale x 8 x i64>
%vd = sext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
@@ -805,10 +805,10 @@ define <vscale x 8 x i64> @vwsub_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vsext.vf4 v24, v10
-; CHECK-NEXT: vsub.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwsub.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -837,9 +837,9 @@ define <vscale x 8 x i64> @vwsubu_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
define <vscale x 8 x i64> @vwsub_wv_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwsub_wv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v20, v16
+; CHECK-NEXT: vwsub.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
%vd = sub <vscale x 8 x i64> %va, %vc
@@ -849,9 +849,9 @@ define <vscale x 8 x i64> @vwsub_wv_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, <vsc
define <vscale x 8 x i64> @vwsubu_wv_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v20, v16
+; CHECK-NEXT: vwsubu.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
%vd = sub <vscale x 8 x i64> %va, %vc
@@ -863,9 +863,9 @@ define <vscale x 8 x i64> @vwsub_wx_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v20, v16
+; CHECK-NEXT: vwsub.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -879,9 +879,9 @@ define <vscale x 8 x i64> @vwsubu_wx_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v20, v16
+; CHECK-NEXT: vwsubu.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -893,10 +893,10 @@ define <vscale x 8 x i64> @vwsubu_wx_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, i16
define <vscale x 1 x i64> @vwsub_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwsub_vv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v8, v9
-; CHECK-NEXT: vsub.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i8> %va to <vscale x 1 x i64>
%vd = sext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
@@ -923,10 +923,10 @@ define <vscale x 1 x i64> @vwsub_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v8, v9
-; CHECK-NEXT: vsub.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -955,9 +955,9 @@ define <vscale x 1 x i64> @vwsubu_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b
define <vscale x 1 x i64> @vwsub_wv_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwsub_wv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v9
+; CHECK-NEXT: vwsub.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
%vd = sub <vscale x 1 x i64> %va, %vc
@@ -967,9 +967,9 @@ define <vscale x 1 x i64> @vwsub_wv_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, <vsca
define <vscale x 1 x i64> @vwsubu_wv_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf4 v10, v9
+; CHECK-NEXT: vwsubu.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
%vd = sub <vscale x 1 x i64> %va, %vc
@@ -981,9 +981,9 @@ define <vscale x 1 x i64> @vwsub_wx_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v9
+; CHECK-NEXT: vwsub.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -997,9 +997,9 @@ define <vscale x 1 x i64> @vwsubu_wx_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf4 v10, v9
+; CHECK-NEXT: vwsubu.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -1011,10 +1011,10 @@ define <vscale x 1 x i64> @vwsubu_wx_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, i8 %
define <vscale x 2 x i64> @vwsub_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwsub_vv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v12, v9
-; CHECK-NEXT: vsub.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i8> %va to <vscale x 2 x i64>
%vd = sext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
@@ -1041,10 +1041,10 @@ define <vscale x 2 x i64> @vwsub_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v12, v9
-; CHECK-NEXT: vsub.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1073,9 +1073,9 @@ define <vscale x 2 x i64> @vwsubu_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b
define <vscale x 2 x i64> @vwsub_wv_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwsub_wv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v11, v10
+; CHECK-NEXT: vwsub.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
%vd = sub <vscale x 2 x i64> %va, %vc
@@ -1085,9 +1085,9 @@ define <vscale x 2 x i64> @vwsub_wv_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, <vsca
define <vscale x 2 x i64> @vwsubu_wv_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v11, v10
+; CHECK-NEXT: vwsubu.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
%vd = sub <vscale x 2 x i64> %va, %vc
@@ -1099,9 +1099,9 @@ define <vscale x 2 x i64> @vwsub_wx_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v11, v10
+; CHECK-NEXT: vwsub.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1115,9 +1115,9 @@ define <vscale x 2 x i64> @vwsubu_wx_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v11, v10
+; CHECK-NEXT: vwsubu.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1129,10 +1129,10 @@ define <vscale x 2 x i64> @vwsubu_wx_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, i8 %
define <vscale x 4 x i64> @vwsub_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwsub_vv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vsub.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vsext.vf4 v14, v9
+; CHECK-NEXT: vwsub.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i8> %va to <vscale x 4 x i64>
%vd = sext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
@@ -1159,10 +1159,10 @@ define <vscale x 4 x i64> @vwsub_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vsub.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vsext.vf4 v14, v9
+; CHECK-NEXT: vwsub.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1191,9 +1191,9 @@ define <vscale x 4 x i64> @vwsubu_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b
define <vscale x 4 x i64> @vwsub_wv_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwsub_wv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v14, v12
+; CHECK-NEXT: vwsub.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
%vd = sub <vscale x 4 x i64> %va, %vc
@@ -1203,9 +1203,9 @@ define <vscale x 4 x i64> @vwsub_wv_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, <vsca
define <vscale x 4 x i64> @vwsubu_wv_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v14, v12
+; CHECK-NEXT: vwsubu.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
%vd = sub <vscale x 4 x i64> %va, %vc
@@ -1217,9 +1217,9 @@ define <vscale x 4 x i64> @vwsub_wx_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v14, v12
+; CHECK-NEXT: vwsub.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1233,9 +1233,9 @@ define <vscale x 4 x i64> @vwsubu_wx_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v14, v12
+; CHECK-NEXT: vwsubu.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1247,10 +1247,10 @@ define <vscale x 4 x i64> @vwsubu_wx_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, i8 %
define <vscale x 8 x i64> @vwsub_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwsub_vv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsext.vf8 v24, v9
-; CHECK-NEXT: vsub.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vsext.vf4 v20, v9
+; CHECK-NEXT: vwsub.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i8> %va to <vscale x 8 x i64>
%vd = sext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
@@ -1277,10 +1277,10 @@ define <vscale x 8 x i64> @vwsub_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsext.vf8 v24, v9
-; CHECK-NEXT: vsub.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vsext.vf4 v20, v9
+; CHECK-NEXT: vwsub.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -1309,9 +1309,9 @@ define <vscale x 8 x i64> @vwsubu_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b
define <vscale x 8 x i64> @vwsub_wv_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwsub_wv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v20, v16
+; CHECK-NEXT: vwsub.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
%vd = sub <vscale x 8 x i64> %va, %vc
@@ -1321,9 +1321,9 @@ define <vscale x 8 x i64> @vwsub_wv_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, <vsca
define <vscale x 8 x i64> @vwsubu_wv_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v20, v16
+; CHECK-NEXT: vwsubu.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
%vd = sub <vscale x 8 x i64> %va, %vc
@@ -1335,9 +1335,9 @@ define <vscale x 8 x i64> @vwsub_wx_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v20, v16
+; CHECK-NEXT: vwsub.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -1351,9 +1351,9 @@ define <vscale x 8 x i64> @vwsubu_wx_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v20, v16
+; CHECK-NEXT: vwsubu.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir
index 06ed46f..8248c26 100644
--- a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir
@@ -83,7 +83,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 8
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/RISCV/spill-fill-fold.ll b/llvm/test/CodeGen/RISCV/spill-fill-fold.ll
index a9a0cc5..8cf5f55 100644
--- a/llvm/test/CodeGen/RISCV/spill-fill-fold.ll
+++ b/llvm/test/CodeGen/RISCV/spill-fill-fold.ll
@@ -290,8 +290,8 @@ define double @spill_i64_to_double(i64 %a) nounwind {
; RV32ID-NEXT: fsd fs9, 40(sp) # 8-byte Folded Spill
; RV32ID-NEXT: fsd fs10, 32(sp) # 8-byte Folded Spill
; RV32ID-NEXT: fsd fs11, 24(sp) # 8-byte Folded Spill
-; RV32ID-NEXT: sw a1, 20(sp)
; RV32ID-NEXT: sw a0, 16(sp)
+; RV32ID-NEXT: sw a1, 20(sp)
; RV32ID-NEXT: fld fa5, 16(sp)
; RV32ID-NEXT: fsd fa5, 8(sp) # 8-byte Folded Spill
; RV32ID-NEXT: #APP
@@ -804,13 +804,15 @@ define double @fill_i64_to_double(i64 %a) nounwind {
; RV32ID-NEXT: fsd fs9, 40(sp) # 8-byte Folded Spill
; RV32ID-NEXT: fsd fs10, 32(sp) # 8-byte Folded Spill
; RV32ID-NEXT: fsd fs11, 24(sp) # 8-byte Folded Spill
-; RV32ID-NEXT: sw a1, 20(sp)
-; RV32ID-NEXT: sw a0, 16(sp)
-; RV32ID-NEXT: fld fa5, 16(sp)
-; RV32ID-NEXT: fsd fa5, 8(sp) # 8-byte Folded Spill
+; RV32ID-NEXT: sw a1, 12(sp) # 4-byte Folded Spill
+; RV32ID-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
; RV32ID-NEXT: #APP
; RV32ID-NEXT: #NO_APP
-; RV32ID-NEXT: fld fa0, 8(sp) # 8-byte Folded Reload
+; RV32ID-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; RV32ID-NEXT: sw a0, 16(sp)
+; RV32ID-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; RV32ID-NEXT: sw a0, 20(sp)
+; RV32ID-NEXT: fld fa0, 16(sp)
; RV32ID-NEXT: lw ra, 172(sp) # 4-byte Folded Reload
; RV32ID-NEXT: lw s0, 168(sp) # 4-byte Folded Reload
; RV32ID-NEXT: lw s1, 164(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/stack-inst-compress.mir b/llvm/test/CodeGen/RISCV/stack-inst-compress.mir
index 6721ff1..5cc4615 100644
--- a/llvm/test/CodeGen/RISCV/stack-inst-compress.mir
+++ b/llvm/test/CodeGen/RISCV/stack-inst-compress.mir
@@ -32,6 +32,7 @@ alignment: 2
tracksRegLiveness: true
frameInfo:
maxAlignment: 4
+ adjustsStack: true
hasCalls: true
localFrameSize: 2048
stack:
@@ -117,6 +118,7 @@ alignment: 2
tracksRegLiveness: true
frameInfo:
maxAlignment: 4
+ adjustsStack: true
hasCalls: true
localFrameSize: 4096
stack:
@@ -210,6 +212,7 @@ alignment: 2
tracksRegLiveness: true
frameInfo:
maxAlignment: 4
+ adjustsStack: true
hasCalls: true
localFrameSize: 8192
stack:
diff --git a/llvm/test/CodeGen/RISCV/strip-w-suffix.ll b/llvm/test/CodeGen/RISCV/strip-w-suffix.ll
new file mode 100644
index 0000000..4124b3d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/strip-w-suffix.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=STRIP %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+no-strip-w-suffix -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=NO-STRIP %s
+
+define i32 @addiw(i32 %a) {
+; STRIP-LABEL: addiw:
+; STRIP: # %bb.0:
+; STRIP-NEXT: lui a1, 1
+; STRIP-NEXT: addi a1, a1, -1
+; STRIP-NEXT: addw a0, a0, a1
+; STRIP-NEXT: ret
+;
+; NO-STRIP-LABEL: addiw:
+; NO-STRIP: # %bb.0:
+; NO-STRIP-NEXT: lui a1, 1
+; NO-STRIP-NEXT: addiw a1, a1, -1
+; NO-STRIP-NEXT: addw a0, a0, a1
+; NO-STRIP-NEXT: ret
+ %ret = add i32 %a, 4095
+ ret i32 %ret
+}
+
+define i32 @addw(i32 %a, i32 %b) {
+; STRIP-LABEL: addw:
+; STRIP: # %bb.0:
+; STRIP-NEXT: add a0, a0, a1
+; STRIP-NEXT: addiw a0, a0, 1024
+; STRIP-NEXT: ret
+;
+; NO-STRIP-LABEL: addw:
+; NO-STRIP: # %bb.0:
+; NO-STRIP-NEXT: addw a0, a0, a1
+; NO-STRIP-NEXT: addiw a0, a0, 1024
+; NO-STRIP-NEXT: ret
+ %add = add i32 %a, %b
+ %ret = add i32 %add, 1024
+ ret i32 %ret
+}
+
+define i32 @mulw(i32 %a, i32 %b) {
+; STRIP-LABEL: mulw:
+; STRIP: # %bb.0:
+; STRIP-NEXT: mul a0, a0, a1
+; STRIP-NEXT: addiw a0, a0, 1024
+; STRIP-NEXT: ret
+;
+; NO-STRIP-LABEL: mulw:
+; NO-STRIP: # %bb.0:
+; NO-STRIP-NEXT: mulw a0, a0, a1
+; NO-STRIP-NEXT: addiw a0, a0, 1024
+; NO-STRIP-NEXT: ret
+ %mul = mul i32 %a, %b
+ %ret = add i32 %mul, 1024
+ ret i32 %ret
+}
+
+define i32 @slliw(i32 %a) {
+; STRIP-LABEL: slliw:
+; STRIP: # %bb.0:
+; STRIP-NEXT: slli a0, a0, 1
+; STRIP-NEXT: addiw a0, a0, 1024
+; STRIP-NEXT: ret
+;
+; NO-STRIP-LABEL: slliw:
+; NO-STRIP: # %bb.0:
+; NO-STRIP-NEXT: slliw a0, a0, 1
+; NO-STRIP-NEXT: addiw a0, a0, 1024
+; NO-STRIP-NEXT: ret
+ %shl = shl i32 %a, 1
+ %ret = add i32 %shl, 1024
+ ret i32 %ret
+}
diff --git a/llvm/test/CodeGen/RISCV/tlsdesc-symbol.ll b/llvm/test/CodeGen/RISCV/tlsdesc-symbol.ll
new file mode 100644
index 0000000..23ba2ff
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/tlsdesc-symbol.ll
@@ -0,0 +1,24 @@
+;; The test in this file do not appear in tls-models.ll because
+;; they are not auto-generated.
+; RUN: llc -mtriple=riscv64 -relocation-model=pic -enable-tlsdesc < %s \
+; RUN: | llvm-mc -triple=riscv64 -filetype=obj -o - \
+; RUN: | llvm-readelf --symbols - \
+; RUN: | FileCheck %s
+
+; RUN: llc -mtriple=riscv32 -relocation-model=pic -enable-tlsdesc < %s \
+; RUN: | llvm-mc -triple=riscv32 -filetype=obj -o - \
+; RUN: | llvm-readelf --symbols - \
+; RUN: | FileCheck %s
+
+; Check that TLS symbols are lowered correctly based on the specified
+; model. Make sure they're external to avoid them all being optimised to Local
+; Exec for the executable.
+
+@unspecified = external thread_local global i32
+
+define ptr @f1() nounwind {
+entry:
+ ret ptr @unspecified
+ ; CHECK: Symbol table '.symtab' contains 7 entries:
+ ; CHECK: TLS {{.*}} unspecified
+}
diff --git a/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll b/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll
index 8c0d97a..f1ae320 100644
--- a/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll
+++ b/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll
@@ -89,8 +89,8 @@ define { i128, i8 } @muloti_test(i128 %l, i128 %r) #0 {
; RISCV32-NEXT: snez a3, a3
; RISCV32-NEXT: and a3, a3, a7
; RISCV32-NEXT: or a2, a3, a2
-; RISCV32-NEXT: or a3, t2, t3
-; RISCV32-NEXT: or a2, a2, a3
+; RISCV32-NEXT: or a2, a2, t2
+; RISCV32-NEXT: or a2, a2, t3
; RISCV32-NEXT: mul a3, a5, a4
; RISCV32-NEXT: andi a2, a2, 1
; RISCV32-NEXT: sw a3, 0(a0)
diff --git a/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll b/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll
index e5c2e01..73ace20 100644
--- a/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll
+++ b/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll
@@ -3,7 +3,8 @@
define ptr @func(ptr %s, i32 %_c, ptr %incdec.ptr, i1 %0, i8 %conv14) #0 {
; RV32-LABEL: func:
; RV32: # %bb.0: # %entry
-; RV32-NEXT: cm.push {ra, s0-s1}, -24
+; RV32-NEXT: cm.push {ra, s0-s1}, -16
+; RV32-NEXT: addi sp, sp, -8
; RV32-NEXT: .cfi_def_cfa_offset 24
; RV32-NEXT: .cfi_offset ra, -12
; RV32-NEXT: .cfi_offset s0, -8
@@ -31,7 +32,8 @@ define ptr @func(ptr %s, i32 %_c, ptr %incdec.ptr, i1 %0, i8 %conv14) #0 {
; RV32-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
; RV32-NEXT: sb a0, 0(s0)
; RV32-NEXT: mv a0, s1
-; RV32-NEXT: cm.popret {ra, s0-s1}, 24
+; RV32-NEXT: addi sp, sp, 8
+; RV32-NEXT: cm.popret {ra, s0-s1}, 16
entry:
br label %while.body
diff --git a/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll b/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll
index 63c46ca..95695aa 100644
--- a/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll
+++ b/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll
@@ -4,21 +4,15 @@
define dso_local void @zdinx_asm(ptr nocapture noundef writeonly %a, double noundef %b, double noundef %c) nounwind {
; CHECK-LABEL: zdinx_asm:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 8(sp)
-; CHECK-NEXT: sw a2, 12(sp)
-; CHECK-NEXT: lw a6, 8(sp)
-; CHECK-NEXT: lw a7, 12(sp)
-; CHECK-NEXT: sw a3, 8(sp)
-; CHECK-NEXT: sw a4, 12(sp)
-; CHECK-NEXT: lw a2, 8(sp)
-; CHECK-NEXT: lw a3, 12(sp)
+; CHECK-NEXT: mv a5, a4
+; CHECK-NEXT: mv a7, a2
+; CHECK-NEXT: mv a4, a3
+; CHECK-NEXT: mv a6, a1
; CHECK-NEXT: #APP
-; CHECK-NEXT: fsgnjx.d a2, a6, a2
+; CHECK-NEXT: fsgnjx.d a2, a6, a4
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: sw a2, 8(a0)
; CHECK-NEXT: sw a3, 12(a0)
-; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
entry:
%arrayidx = getelementptr inbounds double, ptr %a, i32 1
diff --git a/llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll b/llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll
index 3eeb704..f56d477 100644
--- a/llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll
+++ b/llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll
@@ -7,15 +7,11 @@
define void @foo(ptr nocapture %p, double %d) nounwind {
; RV32ZDINX-LABEL: foo:
; RV32ZDINX: # %bb.0: # %entry
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: sw a1, 8(sp)
-; RV32ZDINX-NEXT: sw a2, 12(sp)
-; RV32ZDINX-NEXT: lw a2, 8(sp)
-; RV32ZDINX-NEXT: lw a3, 12(sp)
+; RV32ZDINX-NEXT: mv a3, a2
; RV32ZDINX-NEXT: addi a0, a0, 2047
+; RV32ZDINX-NEXT: mv a2, a1
; RV32ZDINX-NEXT: sw a2, -3(a0)
; RV32ZDINX-NEXT: sw a3, 1(a0)
-; RV32ZDINX-NEXT: addi sp, sp, 16
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: foo:
@@ -31,16 +27,12 @@ entry:
define void @foo2(ptr nocapture %p, double %d) nounwind {
; RV32ZDINX-LABEL: foo2:
; RV32ZDINX: # %bb.0: # %entry
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: sw a1, 8(sp)
-; RV32ZDINX-NEXT: sw a2, 12(sp)
-; RV32ZDINX-NEXT: lw a2, 8(sp)
-; RV32ZDINX-NEXT: lw a3, 12(sp)
+; RV32ZDINX-NEXT: mv a3, a2
+; RV32ZDINX-NEXT: mv a2, a1
; RV32ZDINX-NEXT: fadd.d a2, a2, a2
; RV32ZDINX-NEXT: addi a0, a0, 2047
; RV32ZDINX-NEXT: sw a2, -3(a0)
; RV32ZDINX-NEXT: sw a3, 1(a0)
-; RV32ZDINX-NEXT: addi sp, sp, 16
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: foo2:
@@ -117,15 +109,11 @@ entry:
define void @foo5(ptr nocapture %p, double %d) nounwind {
; RV32ZDINX-LABEL: foo5:
; RV32ZDINX: # %bb.0: # %entry
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: sw a1, 8(sp)
-; RV32ZDINX-NEXT: sw a2, 12(sp)
-; RV32ZDINX-NEXT: lw a2, 8(sp)
-; RV32ZDINX-NEXT: lw a3, 12(sp)
+; RV32ZDINX-NEXT: mv a3, a2
; RV32ZDINX-NEXT: addi a0, a0, -2048
+; RV32ZDINX-NEXT: mv a2, a1
; RV32ZDINX-NEXT: sw a2, -1(a0)
; RV32ZDINX-NEXT: sw a3, 3(a0)
-; RV32ZDINX-NEXT: addi sp, sp, 16
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: foo5:
@@ -142,19 +130,15 @@ entry:
define void @foo6(ptr %p, double %d) nounwind {
; RV32ZDINX-LABEL: foo6:
; RV32ZDINX: # %bb.0: # %entry
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: sw a1, 8(sp)
-; RV32ZDINX-NEXT: sw a2, 12(sp)
-; RV32ZDINX-NEXT: lw a2, 8(sp)
-; RV32ZDINX-NEXT: lw a3, 12(sp)
-; RV32ZDINX-NEXT: lui a1, %hi(.LCPI5_0)
-; RV32ZDINX-NEXT: lw a4, %lo(.LCPI5_0)(a1)
-; RV32ZDINX-NEXT: lw a5, %lo(.LCPI5_0+4)(a1)
+; RV32ZDINX-NEXT: lui a3, %hi(.LCPI5_0)
+; RV32ZDINX-NEXT: lw a4, %lo(.LCPI5_0)(a3)
+; RV32ZDINX-NEXT: lw a5, %lo(.LCPI5_0+4)(a3)
+; RV32ZDINX-NEXT: mv a3, a2
+; RV32ZDINX-NEXT: mv a2, a1
; RV32ZDINX-NEXT: fadd.d a2, a2, a4
; RV32ZDINX-NEXT: addi a0, a0, 2047
; RV32ZDINX-NEXT: sw a2, -3(a0)
; RV32ZDINX-NEXT: sw a3, 1(a0)
-; RV32ZDINX-NEXT: addi sp, sp, 16
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: foo6:
diff --git a/llvm/test/CodeGen/RISCV/zdinx-large-spill.mir b/llvm/test/CodeGen/RISCV/zdinx-large-spill.mir
new file mode 100644
index 0000000..8596a65
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/zdinx-large-spill.mir
@@ -0,0 +1,74 @@
+# NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+# RUN: llc %s -mtriple=riscv32 -mattr=+zdinx -start-before=prologepilog -o - | FileCheck %s
+
+# We want to make sure eliminateFrameIndex doesn't fold sp+2044 as an offset in
+# a GPR pair spill/reload instruction. When we split the pair spill, we would be
+# unable to add 4 to the immediate without overflowing simm12.
+
+--- |
+ define void @foo() {
+ ; CHECK-LABEL: foo:
+ ; CHECK: # %bb.0:
+ ; CHECK-NEXT: addi sp, sp, -2048
+ ; CHECK-NEXT: addi sp, sp, -16
+ ; CHECK-NEXT: .cfi_def_cfa_offset 2064
+ ; CHECK-NEXT: lui t0, 1
+ ; CHECK-NEXT: add t0, sp, t0
+ ; CHECK-NEXT: sw a0, -2040(t0) # 4-byte Folded Spill
+ ; CHECK-NEXT: sw a1, -2036(t0) # 4-byte Folded Spill
+ ; CHECK-NEXT: lui a0, 1
+ ; CHECK-NEXT: add a0, sp, a0
+ ; CHECK-NEXT: sw a2, -2048(a0) # 4-byte Folded Spill
+ ; CHECK-NEXT: sw a3, -2044(a0) # 4-byte Folded Spill
+ ; CHECK-NEXT: sw a4, 2040(sp) # 4-byte Folded Spill
+ ; CHECK-NEXT: sw a5, 2044(sp) # 4-byte Folded Spill
+ ; CHECK-NEXT: sw a6, 2032(sp) # 4-byte Folded Spill
+ ; CHECK-NEXT: sw a7, 2036(sp) # 4-byte Folded Spill
+ ; CHECK-NEXT: lui a0, 1
+ ; CHECK-NEXT: add a0, sp, a0
+ ; CHECK-NEXT: lw a1, -2036(a0) # 4-byte Folded Reload
+ ; CHECK-NEXT: lw a0, -2040(a0) # 4-byte Folded Reload
+ ; CHECK-NEXT: lui a0, 1
+ ; CHECK-NEXT: add a0, sp, a0
+ ; CHECK-NEXT: lw a2, -2048(a0) # 4-byte Folded Reload
+ ; CHECK-NEXT: lw a3, -2044(a0) # 4-byte Folded Reload
+ ; CHECK-NEXT: lw a4, 2040(sp) # 4-byte Folded Reload
+ ; CHECK-NEXT: lw a5, 2044(sp) # 4-byte Folded Reload
+ ; CHECK-NEXT: lw a6, 2032(sp) # 4-byte Folded Reload
+ ; CHECK-NEXT: lw a7, 2036(sp) # 4-byte Folded Reload
+ ; CHECK-NEXT: addi sp, sp, 2032
+ ; CHECK-NEXT: addi sp, sp, 32
+ ; CHECK-NEXT: ret
+ ret void
+ }
+...
+---
+name: foo
+tracksRegLiveness: true
+tracksDebugUserValues: true
+frameInfo:
+ maxAlignment: 4
+stack:
+ - { id: 0, type: spill-slot, size: 8, alignment: 4 }
+ - { id: 1, type: spill-slot, size: 8, alignment: 4 }
+ - { id: 2, type: spill-slot, size: 8, alignment: 4 }
+ - { id: 3, type: spill-slot, size: 8, alignment: 4 }
+ - { id: 4, type: spill-slot, size: 2024, alignment: 4 }
+machineFunctionInfo:
+ varArgsFrameIndex: 0
+ varArgsSaveSize: 0
+body: |
+ bb.0:
+ liveins: $x10_x11, $x12_x13, $x14_x15, $x16_x17
+
+ PseudoRV32ZdinxSD killed renamable $x10_x11, %stack.0, 0 :: (store (s64) into %stack.0, align 4)
+ PseudoRV32ZdinxSD killed renamable $x12_x13, %stack.1, 0 :: (store (s64) into %stack.1, align 4)
+ PseudoRV32ZdinxSD killed renamable $x14_x15, %stack.2, 0 :: (store (s64) into %stack.2, align 4)
+ PseudoRV32ZdinxSD killed renamable $x16_x17, %stack.3, 0 :: (store (s64) into %stack.3, align 4)
+ renamable $x10_x11 = PseudoRV32ZdinxLD %stack.0, 0 :: (load (s64) from %stack.0, align 4)
+ renamable $x12_x13 = PseudoRV32ZdinxLD %stack.1, 0 :: (load (s64) from %stack.1, align 4)
+ renamable $x14_x15 = PseudoRV32ZdinxLD %stack.2, 0 :: (load (s64) from %stack.2, align 4)
+ renamable $x16_x17 = PseudoRV32ZdinxLD %stack.3, 0 :: (load (s64) from %stack.3, align 4)
+ PseudoRET
+
+...
diff --git a/llvm/test/CodeGen/SPIRV/ComparePointers.ll b/llvm/test/CodeGen/SPIRV/ComparePointers.ll
index fd2084d..6777fc3 100644
--- a/llvm/test/CodeGen/SPIRV/ComparePointers.ll
+++ b/llvm/test/CodeGen/SPIRV/ComparePointers.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown --mattr=+spirv1.3 %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; kernel void test(int global *in, int global *in2) {
;; if (!in)
diff --git a/llvm/test/CodeGen/SPIRV/ExecutionMode.ll b/llvm/test/CodeGen/SPIRV/ExecutionMode.ll
index 3e321e1..180b7246 100644
--- a/llvm/test/CodeGen/SPIRV/ExecutionMode.ll
+++ b/llvm/test/CodeGen/SPIRV/ExecutionMode.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: %[[#VOID:]] = OpTypeVoid
diff --git a/llvm/test/CodeGen/SPIRV/LinkOnceODR.ll b/llvm/test/CodeGen/SPIRV/LinkOnceODR.ll
index 3dfdeac..ec660b7 100644
--- a/llvm/test/CodeGen/SPIRV/LinkOnceODR.ll
+++ b/llvm/test/CodeGen/SPIRV/LinkOnceODR.ll
@@ -1,5 +1,5 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_linkonce_odr %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV-EXT
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_linkonce_odr %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_linkonce_odr %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV-EXT
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_linkonce_odr %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-EXT: Capability Linkage
; CHECK-SPIRV-EXT: Extension "SPV_KHR_linkonce_odr"
diff --git a/llvm/test/CodeGen/SPIRV/LinkOnceODRFun.ll b/llvm/test/CodeGen/SPIRV/LinkOnceODRFun.ll
index 7505c3f..42170dc 100644
--- a/llvm/test/CodeGen/SPIRV/LinkOnceODRFun.ll
+++ b/llvm/test/CodeGen/SPIRV/LinkOnceODRFun.ll
@@ -1,5 +1,5 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_linkonce_odr %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV-EXT
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_linkonce_odr %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_linkonce_odr %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV-EXT
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_linkonce_odr %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-EXT: Capability Linkage
; CHECK-SPIRV-EXT: Extension "SPV_KHR_linkonce_odr"
diff --git a/llvm/test/CodeGen/SPIRV/assume.ll b/llvm/test/CodeGen/SPIRV/assume.ll
index 6099955..fbf12ef 100644
--- a/llvm/test/CodeGen/SPIRV/assume.ll
+++ b/llvm/test/CodeGen/SPIRV/assume.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=EXT,CHECK %s
-; RUN: llc -mtriple=spirv64-unknown-unknown --spirv-extensions=SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=EXT,CHECK %s
+; RUN: llc -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=EXT,CHECK %s
+; RUN: llc -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=EXT,CHECK %s
; RUN: llc -mtriple=spirv32-unknown-unknown < %s | FileCheck --check-prefixes=NOEXT,CHECK %s
; RUN: llc -mtriple=spirv64-unknown-unknown < %s | FileCheck --check-prefixes=NOEXT,CHECK %s
diff --git a/llvm/test/CodeGen/SPIRV/capability-kernel.ll b/llvm/test/CodeGen/SPIRV/capability-kernel.ll
index 03ea58c..fea1951 100644
--- a/llvm/test/CodeGen/SPIRV/capability-kernel.ll
+++ b/llvm/test/CodeGen/SPIRV/capability-kernel.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpCapability Addresses
diff --git a/llvm/test/CodeGen/SPIRV/empty-logical.ll b/llvm/test/CodeGen/SPIRV/empty-logical.ll
index a99df5f..1c66040 100644
--- a/llvm/test/CodeGen/SPIRV/empty-logical.ll
+++ b/llvm/test/CodeGen/SPIRV/empty-logical.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; Ensure the required Capabilities are listed.
; CHECK-DAG: OpCapability Shader
diff --git a/llvm/test/CodeGen/SPIRV/empty-module.ll b/llvm/test/CodeGen/SPIRV/empty-module.ll
index f220176..b56e58c 100644
--- a/llvm/test/CodeGen/SPIRV/empty-module.ll
+++ b/llvm/test/CodeGen/SPIRV/empty-module.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpCapability Addresses
; CHECK-DAG: OpCapability Linkage
diff --git a/llvm/test/CodeGen/SPIRV/empty-opencl32.ll b/llvm/test/CodeGen/SPIRV/empty-opencl32.ll
index a373781..8e826ec 100644
--- a/llvm/test/CodeGen/SPIRV/empty-opencl32.ll
+++ b/llvm/test/CodeGen/SPIRV/empty-opencl32.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; FIXME: ensure Magic Number, version number, generator's magic number, "bound" and "schema" are at least present
diff --git a/llvm/test/CodeGen/SPIRV/empty-opencl64.ll b/llvm/test/CodeGen/SPIRV/empty-opencl64.ll
index d101965..4eaa2e4 100644
--- a/llvm/test/CodeGen/SPIRV/empty-opencl64.ll
+++ b/llvm/test/CodeGen/SPIRV/empty-opencl64.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; FIXME: ensure Magic Number, version number, generator's magic number, "bound" and "schema" are at least present
diff --git a/llvm/test/CodeGen/SPIRV/empty.ll b/llvm/test/CodeGen/SPIRV/empty.ll
index fdcf316..390ab32 100644
--- a/llvm/test/CodeGen/SPIRV/empty.ll
+++ b/llvm/test/CodeGen/SPIRV/empty.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK: OpCapability Addresses
; CHECK: "foo"
diff --git a/llvm/test/CodeGen/SPIRV/exec_mode_float_control_khr.ll b/llvm/test/CodeGen/SPIRV/exec_mode_float_control_khr.ll
index 473794a..721e825 100644
--- a/llvm/test/CodeGen/SPIRV/exec_mode_float_control_khr.ll
+++ b/llvm/test/CodeGen/SPIRV/exec_mode_float_control_khr.ll
@@ -1,5 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=SPV
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s --mattr=+spirv1.3 --spirv-extensions=SPV_KHR_float_controls -o - | FileCheck %s --check-prefixes=SPVEXT
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s --mattr=+spirv1.3 --spirv-ext=+SPV_KHR_float_controls -o - | FileCheck %s --check-prefixes=SPVEXT
define dso_local dllexport spir_kernel void @k_float_controls_0(i32 %ibuf, i32 %obuf) local_unnamed_addr {
entry:
diff --git a/llvm/test/CodeGen/SPIRV/expect.ll b/llvm/test/CodeGen/SPIRV/expect.ll
index 51555cd..82c1ec7 100644
--- a/llvm/test/CodeGen/SPIRV/expect.ll
+++ b/llvm/test/CodeGen/SPIRV/expect.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=CHECK,EXT %s
-; RUN: llc -mtriple=spirv64-unknown-unknown --spirv-extensions=SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=CHECK,EXT %s
+; RUN: llc -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=CHECK,EXT %s
+; RUN: llc -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=CHECK,EXT %s
; RUN: llc -mtriple=spirv32-unknown-unknown < %s | FileCheck --check-prefixes=CHECK,NOEXT %s
; RUN: llc -mtriple=spirv64-unknown-unknown < %s | FileCheck --check-prefixes=CHECK,NOEXT %s
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll
index 1bfa556..e7b6679 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_add %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_add %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float_add
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll
index 627b59f..4fb99d9 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_add %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_add %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float_add
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll
index fffda4b..2f536dc 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll
@@ -1,7 +1,7 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR1
-; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_add %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR2
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_add %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR2
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_add --spirv-extensions=SPV_EXT_shader_atomic_float16_add %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_add,+SPV_EXT_shader_atomic_float16_add %s -o - | FileCheck %s
; CHECK-ERROR1: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float_add
; CHECK-ERROR2: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float16_add
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll
index 3c6fa27..7654c36 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_min_max %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_min_max %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float_min_max
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll
index cc52e4c..8a35990 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_min_max %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_min_max %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float_min_max
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll
index b406aee..45baaa8 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_min_max %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_min_max %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float_min_max
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_arbitrary_precision_integers.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_arbitrary_precision_integers.ll
index b68fb36..f49367c 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_arbitrary_precision_integers.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_arbitrary_precision_integers.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_arbitrary_precision_integers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_arbitrary_precision_integers %s -o - | FileCheck %s
define i6 @getConstantI6() {
ret i6 2
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative1.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative1.ll
index 2f3c859d..4326d8d 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative1.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative1.ll
@@ -1,4 +1,4 @@
-; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
; CHECK-ERROR: result and argument must have the same number of components
target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative2.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative2.ll
index 8ab84d6..57f52b9 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative2.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative2.ll
@@ -1,4 +1,4 @@
-; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
; CHECK-ERROR: result and argument must have the same number of components
target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative3.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative3.ll
index 20a8042..2cb229e 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative3.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative3.ll
@@ -1,4 +1,4 @@
-; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
; CHECK-ERROR: result and argument must have the same number of components
target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative4.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative4.ll
index efbd50b..eb5a2c7 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative4.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative4.ll
@@ -1,4 +1,4 @@
-; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
; CHECK-ERROR: result and argument must have the same number of components
target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv.ll
index 2bd59b2..91fa340e 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv.ll
@@ -1,5 +1,5 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_bfloat16_conversion %s -o - | FileCheck %s
-; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-extensions=SPV_INTEL_bfloat16_conversion %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_bfloat16_conversion %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_INTEL_bfloat16_conversion %s -o - -filetype=obj | spirv-val %}
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
; CHECK-ERROR: the builtin requires the following SPIR-V extension: SPV_INTEL_bfloat16_conversion
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll
index 0bd1b5d..5f073e9 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpCapability Int8
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_two_calls.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_two_calls.ll
index 89de098..b7fecef 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_two_calls.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_two_calls.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpCapability Int8
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_optnone.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_optnone.ll
index afbcaec..a611be8 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_optnone.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_optnone.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_optnone %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-EXTENSION
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_optnone %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-EXTENSION
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-NO-EXTENSION
; CHECK-EXTENSION: OpCapability OptNoneINTEL
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_subgroups/cl_intel_sub_groups.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_subgroups/cl_intel_sub_groups.ll
index 0e0b2a4..df17ec4 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_subgroups/cl_intel_sub_groups.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_subgroups/cl_intel_sub_groups.ll
@@ -37,7 +37,7 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_subgroups %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_subgroups %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: intel_sub_group_shuffle: the builtin requires the following SPIR-V extension: SPV_INTEL_subgroups
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll
index 30c1635..b5df462 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll
@@ -1,7 +1,7 @@
; Modified from: https://github.com/KhronosGroup/SPIRV-LLVM-Translator/test/extensions/INTEL/SPV_INTEL_usm_storage_classes/intel_usm_addrspaces.ll
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_usm_storage_classes %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-EXT
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-extensions=SPV_INTEL_usm_storage_classes %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_usm_storage_classes %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-EXT
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_INTEL_usm_storage_classes %s -o - -filetype=obj | spirv-val %}
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-WITHOUT
; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr.ll
index 897aab7..8a54d22 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr.ll
@@ -1,8 +1,8 @@
; Modified from: https://github.com/KhronosGroup/SPIRV-LLVM-Translator/test/extensions/INTEL/SPV_INTEL_variable_length_array/basic.ll
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_variable_length_array %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_variable_length_array %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_variable_length_array %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_variable_length_array %s -o - -filetype=obj | spirv-val %}
; CHECK-ERROR: LLVM ERROR: array allocation: this instruction requires the following SPIR-V extension: SPV_INTEL_variable_length_array
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr_spec_const.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr_spec_const.ll
index fbac43e..7b9f75d 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr_spec_const.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr_spec_const.ll
@@ -1,7 +1,7 @@
; Modified from: https://github.com/KhronosGroup/SPIRV-LLVM-Translator/test/extensions/INTEL/SPV_INTEL_variable_length_array/vla_spec_const.ll
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_variable_length_array %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_variable_length_array %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_variable_length_array %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_variable_length_array %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: Capability VariableLengthArrayINTEL
; CHECK-SPIRV: Extension "SPV_INTEL_variable_length_array"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll
index 95395d5..100f02f 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s --spirv-extensions=SPV_KHR_bit_instructions -o - | FileCheck %s --check-prefix=CHECK-EXTENSION
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s --spirv-ext=+SPV_KHR_bit_instructions -o - | FileCheck %s --check-prefix=CHECK-EXTENSION
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-NO-EXTENSION
; CHECK-EXTENSION: OpCapability BitInstructions
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_no_integer_wrap_decoration.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_no_integer_wrap_decoration.ll
index e74dd99..0d9ab4a 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_no_integer_wrap_decoration.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_no_integer_wrap_decoration.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s
; CHECK-DAG: OpExtension "SPV_KHR_no_integer_wrap_decoration"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll
index b1d6a09..63aade4 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_subgroup_rotate %s -o - | FileCheck %s
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-extensions=SPV_KHR_subgroup_rotate %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_subgroup_rotate %s -o - | FileCheck %s
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_subgroup_rotate %s -o - -filetype=obj | spirv-val %}
; CHECK-ERROR: LLVM ERROR: OpGroupNonUniformRotateKHR instruction requires the following SPIR-V extension: SPV_KHR_subgroup_rotate
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_uniform_group_instructions/uniform-group-instructions.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_uniform_group_instructions/uniform-group-instructions.ll
index 39bf63d..0de654b 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_uniform_group_instructions/uniform-group-instructions.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_uniform_group_instructions/uniform-group-instructions.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_uniform_group_instructions %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_uniform_group_instructions %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: __spirv_GroupBitwiseAndKHR: the builtin requires the following SPIR-V extension: SPV_KHR_uniform_group_instructions
diff --git a/llvm/test/CodeGen/SPIRV/extensions/both-allowed-disallowed-extension-error.ll b/llvm/test/CodeGen/SPIRV/extensions/both-allowed-disallowed-extension-error.ll
new file mode 100644
index 0000000..fc07cca
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/both-allowed-disallowed-extension-error.ll
@@ -0,0 +1,7 @@
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_arbitrary_precision_integers,-SPV_INTEL_arbitrary_precision_integers %s -o %t.spvt 2>&1 | FileCheck %s
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=-SPV_INTEL_arbitrary_precision_integers,+SPV_INTEL_arbitrary_precision_integers %s -o %t.spvt 2>&1 | FileCheck %s
+; CHECK: Extension cannot be allowed and disallowed at the same time: SPV_INTEL_arbitrary_precision_integers
+
+define i8 @foo() {
+ ret i8 2
+}
diff --git a/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll b/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll
new file mode 100644
index 0000000..973a5e6
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll
@@ -0,0 +1,9 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=all,-SPV_INTEL_arbitrary_precision_integers %s -o - | FileCheck %s
+
+define i6 @foo() {
+ %call = tail call i32 @llvm.bitreverse.i32(i32 42)
+ ret i6 2
+}
+
+; CHECK-NOT: OpExtension "SPV_INTEL_arbitrary_precision_integers"
+; CHECK-DAG: OpExtension "SPV_KHR_bit_instructions"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions.ll b/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions.ll
new file mode 100644
index 0000000..a5b97946
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions.ll
@@ -0,0 +1,7 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=all %s -o - | FileCheck %s
+
+define i6 @getConstantI6() {
+ ret i6 2
+}
+
+; CHECK: OpExtension "SPV_INTEL_arbitrary_precision_integers"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/invalid-extension-list-format.ll b/llvm/test/CodeGen/SPIRV/extensions/invalid-extension-list-format.ll
new file mode 100644
index 0000000..207ed4b
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/invalid-extension-list-format.ll
@@ -0,0 +1,6 @@
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=UNKNOWN_EXTENSION %s -o %t.spvt 2>&1 | FileCheck %s
+; CHECK: Invalid extension list format: UNKNOWN_EXTENSION
+
+define i8 @foo() {
+ ret i8 2
+}
diff --git a/llvm/test/CodeGen/SPIRV/extensions/unknown-extension-name.ll b/llvm/test/CodeGen/SPIRV/extensions/unknown-extension-name.ll
new file mode 100644
index 0000000..f4f5424
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/unknown-extension-name.ll
@@ -0,0 +1,6 @@
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+UNKNOWN_EXTENSION %s -o %t.spvt 2>&1 | FileCheck %s
+; CHECK: Unknown SPIR-V extension: +UNKNOWN_EXTENSION
+
+define i8 @foo() {
+ ret i8 2
+}
diff --git a/llvm/test/CodeGen/SPIRV/fence.ll b/llvm/test/CodeGen/SPIRV/fence.ll
new file mode 100644
index 0000000..5da5866
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/fence.ll
@@ -0,0 +1,54 @@
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: OpName %[[#GetScope:]] "_Z8getScopev"
+; CHECK-DAG: %[[#Long:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#ScopeDevice:]] = OpConstant %[[#Long]] 1
+; CHECK-DAG: %[[#WrkGrpConst2:]] = OpConstant %[[#Long]] 2
+; CHECK-DAG: %[[#Const3:]] = OpConstant %[[#Long]] 3
+; CHECK-DAG: %[[#InvocationConst4:]] = OpConstant %[[#Long]] 4
+; CHECK-DAG: %[[#Const8:]] = OpConstant %[[#Long]] 8
+; CHECK-DAG: %[[#Const16:]] = OpConstant %[[#Long]] 16
+; CHECK-DAG: %[[#Const912:]] = OpConstant %[[#Long]] 912
+; CHECK: OpMemoryBarrier %[[#ScopeDevice]] %[[#WrkGrpConst2]]
+; CHECK: OpMemoryBarrier %[[#ScopeDevice]] %[[#InvocationConst4]]
+; CHECK: OpMemoryBarrier %[[#ScopeDevice]] %[[#Const8]]
+; CHECK: OpMemoryBarrier %[[#InvocationConst4]] %[[#Const16]]
+; CHECK: OpMemoryBarrier %[[#WrkGrpConst2]] %[[#InvocationConst4]]
+; CHECK: OpFunctionEnd
+; CHECK: %[[#ScopeId:]] = OpFunctionCall %[[#Long]] %[[#GetScope]]
+; CHECK: OpControlBarrier %[[#Const3]] %[[#ScopeId:]] %[[#Const912]]
+
+define spir_kernel void @fence_test_kernel1(ptr addrspace(1) noalias %s.ascast) {
+ fence acquire
+ ret void
+}
+
+define spir_kernel void @fence_test_kernel2(ptr addrspace(1) noalias %s.ascast) {
+ fence release
+ ret void
+}
+
+define spir_kernel void @fence_test_kernel3(ptr addrspace(1) noalias %s.ascast) {
+ fence acq_rel
+ ret void
+}
+
+define spir_kernel void @fence_test_kernel4(ptr addrspace(1) noalias %s.ascast) {
+ fence syncscope("singlethread") seq_cst
+ ret void
+}
+
+define spir_kernel void @fence_test_kernel5(ptr addrspace(1) noalias %s.ascast) {
+ fence syncscope("workgroup") release
+ ret void
+}
+
+define spir_func void @barrier_test1() {
+ %scope = call noundef i32 @_Z8getScopev()
+ call void @_Z22__spirv_ControlBarrieriii(i32 noundef 3, i32 noundef %scope, i32 noundef 912)
+ ret void
+}
+
+declare spir_func void @_Z22__spirv_ControlBarrieriii(i32 noundef, i32 noundef, i32 noundef)
+declare spir_func i32 @_Z8getScopev()
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveGetLaneIndex.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveGetLaneIndex.ll
new file mode 100644
index 0000000..ec35690
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveGetLaneIndex.ll
@@ -0,0 +1,68 @@
+; RUN: llc -O0 -mtriple=spirv-vulkan-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-vulkan-unknown %s -o - -filetype=obj | spirv-val %}
+
+; This file generated from the following command:
+; clang -cc1 -triple spirv-vulkan-compute -x hlsl -emit-llvm -finclude-default-header -o - - <<EOF
+; [numthreads(1, 1, 1)]
+; void main() {
+; int idx = WaveGetLaneIndex();
+; }
+; EOF
+
+; CHECK-DAG: OpCapability Shader
+; CHECK-DAG: OpCapability GroupNonUniform
+; CHECK-DAG: OpDecorate %[[#var:]] BuiltIn SubgroupLocalInvocationId
+; CHECK-DAG: %[[#int:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#ptri:]] = OpTypePointer Input %[[#int]]
+; CHECK-DAG: %[[#ptrf:]] = OpTypePointer Function %[[#int]]
+; CHECK-DAG: %[[#var]] = OpVariable %[[#ptri]] Input
+
+; CHECK-NOT: OpDecorate %[[#var]] LinkageAttributes
+
+
+; ModuleID = '-'
+source_filename = "-"
+target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024"
+target triple = "spirv-unknown-vulkan-compute"
+
+; Function Attrs: convergent noinline norecurse nounwind optnone
+define internal spir_func void @main() #0 {
+entry:
+ %0 = call token @llvm.experimental.convergence.entry()
+ %idx = alloca i32, align 4
+; CHECK: %[[#idx:]] = OpVariable %[[#ptrf]] Function
+
+ %1 = call i32 @__hlsl_wave_get_lane_index() [ "convergencectrl"(token %0) ]
+; CHECK: %[[#tmp:]] = OpLoad %[[#int]] %[[#var]]
+
+ store i32 %1, ptr %idx, align 4
+; CHECK: OpStore %[[#idx]] %[[#tmp]]
+
+ ret void
+}
+
+; Function Attrs: norecurse
+define void @main.1() #1 {
+entry:
+ call void @main()
+ ret void
+}
+
+; Function Attrs: convergent
+declare i32 @__hlsl_wave_get_lane_index() #2
+
+; Function Attrs: convergent nocallback nofree nosync nounwind willreturn memory(none)
+declare token @llvm.experimental.convergence.entry() #3
+
+attributes #0 = { convergent noinline norecurse nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
+attributes #1 = { norecurse "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
+attributes #2 = { convergent }
+attributes #3 = { convergent nocallback nofree nosync nounwind willreturn memory(none) }
+
+!llvm.module.flags = !{!0, !1}
+!llvm.ident = !{!2}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 4, !"dx.disable_optimizations", i32 1}
+!2 = !{!"clang version 19.0.0git (/usr/local/google/home/nathangauer/projects/llvm-project/clang bc6fd04b73a195981ee77823cf1382d04ab96c44)"}
+
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/abs.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/abs.ll
index 7031129..38c033b 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/abs.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/abs.ll
@@ -1,4 +1,5 @@
-; RUN: llc -O0 -mtriple=spirv-unknown-linux %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK: OpExtInstImport "GLSL.std.450"
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/ceil.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/ceil.ll
new file mode 100644
index 0000000..1b358ae
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/ceil.ll
@@ -0,0 +1,20 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @ceil_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Ceil %[[#]]
+ %elt.ceil = call float @llvm.ceil.f32(float %a)
+ ret float %elt.ceil
+}
+
+define noundef half @ceil_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Ceil %[[#]]
+ %elt.ceil = call half @llvm.ceil.f16(half %a)
+ ret half %elt.ceil
+}
+
+declare half @llvm.ceil.f16(half)
+declare float @llvm.ceil.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/cos.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/cos.ll
new file mode 100644
index 0000000..28675cf
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/cos.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @cos_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Cos %[[#]]
+ %elt.cos = call float @llvm.cos.f32(float %a)
+ ret float %elt.cos
+}
+
+define noundef half @cos_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Cos %[[#]]
+ %elt.cos = call half @llvm.cos.f16(half %a)
+ ret half %elt.cos
+}
+
+declare half @llvm.cos.f16(half)
+declare float @llvm.cos.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/exp.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/exp.ll
new file mode 100644
index 0000000..ee230df
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/exp.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @exp_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Exp %[[#]]
+ %elt.exp = call float @llvm.exp.f32(float %a)
+ ret float %elt.exp
+}
+
+define noundef half @exp_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Exp %[[#]]
+ %elt.exp = call half @llvm.exp.f16(half %a)
+ ret half %elt.exp
+}
+
+declare half @llvm.exp.f16(half)
+declare float @llvm.exp.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/exp2.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/exp2.ll
new file mode 100644
index 0000000..eeaca1b
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/exp2.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @exp2_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Exp2 %[[#]]
+ %elt.exp2 = call float @llvm.exp2.f32(float %a)
+ ret float %elt.exp2
+}
+
+define noundef half @exp2_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Exp2 %[[#]]
+ %elt.exp2 = call half @llvm.exp2.f16(half %a)
+ ret half %elt.exp2
+}
+
+declare half @llvm.exp2.f16(half)
+declare float @llvm.exp2.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/floor.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/floor.ll
new file mode 100644
index 0000000..5b972104
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/floor.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @floor_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Floor %[[#]]
+ %elt.floor = call float @llvm.floor.f32(float %a)
+ ret float %elt.floor
+}
+
+define noundef half @floor_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Floor %[[#]]
+ %elt.floor = call half @llvm.floor.f16(half %a)
+ ret half %elt.floor
+}
+
+declare half @llvm.floor.f16(half)
+declare float @llvm.floor.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmad.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmad.ll
new file mode 100644
index 0000000..a3fec10
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmad.ll
@@ -0,0 +1,29 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef half @fmad_half(half noundef %a, half noundef %b, half noundef %c) #0 {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Fma %[[#]] %[[#]] %[[#]]
+ %dx.fmad = call half @llvm.fmuladd.f16(half %a, half %b, half %c)
+ ret half %dx.fmad
+}
+
+define noundef float @fmad_float(float noundef %a, float noundef %b, float noundef %c) #0 {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Fma %[[#]] %[[#]] %[[#]]
+ %dx.fmad = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
+ ret float %dx.fmad
+}
+
+define noundef double @fmad_double(double noundef %a, double noundef %b, double noundef %c) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Fma %[[#]] %[[#]] %[[#]]
+ %dx.fmad = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
+ ret double %dx.fmad
+}
+
+declare half @llvm.fmuladd.f16(half, half, half)
+declare float @llvm.fmuladd.f32(float, float, float)
+declare double @llvm.fmuladd.f64(double, double, double)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmax.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmax.ll
new file mode 100644
index 0000000..48e9165
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmax.ll
@@ -0,0 +1,29 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+; TODO: This need to be NMax: See https://github.com/llvm/llvm-project/issues/87072
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef half @test_fmax_half(half noundef %a, half noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] FMax %[[#]] %[[#]]
+ %0 = call half @llvm.maxnum.f16(half %a, half %b)
+ ret half %0
+}
+
+define noundef float @test_fmax_float(float noundef %a, float noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] FMax %[[#]] %[[#]]
+ %0 = call float @llvm.maxnum.f32(float %a, float %b)
+ ret float %0
+}
+
+define noundef double @test_fmax_double(double noundef %a, double noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] FMax %[[#]] %[[#]]
+ %0 = call double @llvm.maxnum.f64(double %a, double %b)
+ ret double %0
+}
+
+declare half @llvm.maxnum.f16(half, half)
+declare float @llvm.maxnum.f32(float, float)
+declare double @llvm.maxnum.f64(double, double)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmin.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmin.ll
new file mode 100644
index 0000000..5bfd69c
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmin.ll
@@ -0,0 +1,31 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+; TODO: This need to be NMin: See https://github.com/llvm/llvm-project/issues/87072
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+; CHECK: OpMemoryModel Logical GLSL450
+
+define noundef half @test_fmax_half(half noundef %a, half noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] FMin %[[#]] %[[#]]
+ %0 = call half @llvm.minnum.f16(half %a, half %b)
+ ret half %0
+}
+
+define noundef float @test_fmax_float(float noundef %a, float noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] FMin %[[#]] %[[#]]
+ %0 = call float @llvm.minnum.f32(float %a, float %b)
+ ret float %0
+}
+
+define noundef double @test_fmax_double(double noundef %a, double noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] FMin %[[#]] %[[#]]
+ %0 = call double @llvm.minnum.f64(double %a, double %b)
+ ret double %0
+}
+
+declare half @llvm.minnum.f16(half, half)
+declare float @llvm.minnum.f32(float, float)
+declare double @llvm.minnum.f64(double, double)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log.ll
new file mode 100644
index 0000000..5a09f32
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @log_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Log %[[#]]
+ %elt.log = call float @llvm.log.f32(float %a)
+ ret float %elt.log
+}
+
+define noundef half @log_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Log %[[#]]
+ %elt.log = call half @llvm.log.f16(half %a)
+ ret half %elt.log
+}
+
+declare half @llvm.log.f16(half)
+declare float @llvm.log.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log10.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log10.ll
index e7b00eb..52ca6812 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log10.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log10.ll
@@ -1,4 +1,5 @@
-; RUN: llc -O0 -mtriple=spirv-unknown-linux %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK: %[[#extinst:]] = OpExtInstImport "GLSL.std.450"
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log2.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log2.ll
new file mode 100644
index 0000000..21f02a4
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log2.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @log2_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Log2 %[[#]]
+ %elt.log2 = call float @llvm.log2.f32(float %a)
+ ret float %elt.log2
+}
+
+define noundef half @log2_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Log2 %[[#]]
+ %elt.log2 = call half @llvm.log2.f16(half %a)
+ ret half %elt.log2
+}
+
+declare half @llvm.log2.f16(half)
+declare float @llvm.log2.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/pow.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/pow.ll
new file mode 100644
index 0000000..7fae963
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/pow.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @pow_float(float noundef %a,float noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Pow %[[#]]
+ %elt.pow = call float @llvm.pow.f32(float %a,float %b)
+ ret float %elt.pow
+}
+
+define noundef half @pow_half(half noundef %a, half noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Pow %[[#]]
+ %elt.pow = call half @llvm.pow.f16(half %a, half %b)
+ ret half %elt.pow
+}
+
+declare half @llvm.pow.f16(half,half)
+declare float @llvm.pow.f32(float,float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/reversebits.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/reversebits.ll
new file mode 100644
index 0000000..e58c9ab
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/reversebits.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpMemoryModel Logical GLSL450
+
+define noundef i32 @reversebits_i32(i32 noundef %a) {
+entry:
+; CHECK: %[[#]] = OpBitReverse %[[#]] %[[#]]
+ %elt.bitreverse = call i32 @llvm.bitreverse.i32(i32 %a)
+ ret i32 %elt.bitreverse
+}
+
+define noundef i16 @reversebits_i16(i16 noundef %a) {
+entry:
+; CHECK: %[[#]] = OpBitReverse %[[#]] %[[#]]
+ %elt.bitreverse = call i16 @llvm.bitreverse.i16(i16 %a)
+ ret i16 %elt.bitreverse
+}
+
+declare i16 @llvm.bitreverse.i16(i16)
+declare i32 @llvm.bitreverse.i32(i32)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/round.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/round.ll
new file mode 100644
index 0000000..baf2083
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/round.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @round_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] RoundEven %[[#]]
+ %elt.roundeven = call float @llvm.roundeven.f32(float %a)
+ ret float %elt.roundeven
+}
+
+define noundef half @round_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] RoundEven %[[#]]
+ %elt.roundeven = call half @llvm.roundeven.f16(half %a)
+ ret half %elt.roundeven
+}
+
+declare half @llvm.roundeven.f16(half)
+declare float @llvm.roundeven.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/sin.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/sin.ll
new file mode 100644
index 0000000..061af5b
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/sin.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @sin_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Sin %[[#]]
+ %elt.sin = call float @llvm.sin.f32(float %a)
+ ret float %elt.sin
+}
+
+define noundef half @sin_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Sin %[[#]]
+ %elt.sin = call half @llvm.sin.f16(half %a)
+ ret half %elt.sin
+}
+
+declare half @llvm.sin.f16(half)
+declare float @llvm.sin.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/smax.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/smax.ll
new file mode 100644
index 0000000..6bbf103
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/smax.ll
@@ -0,0 +1,29 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef i16 @test_smax_i16(i16 noundef %a, i16 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] SMax %[[#]] %[[#]]
+ %0 = call i16 @llvm.smax.i16(i16 %a, i16 %b)
+ ret i16 %0
+}
+
+define noundef i32 @test_smax_i32(i32 noundef %a, i32 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] SMax %[[#]] %[[#]]
+ %0 = call i32 @llvm.smax.i32(i32 %a, i32 %b)
+ ret i32 %0
+}
+
+define noundef i64 @test_smax_i64(i64 noundef %a, i64 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] SMax %[[#]] %[[#]]
+ %0 = call i64 @llvm.smax.i64(i64 %a, i64 %b)
+ ret i64 %0
+}
+
+declare i16 @llvm.smax.i16(i16, i16)
+declare i32 @llvm.smax.i32(i32, i32)
+declare i64 @llvm.smax.i64(i64, i64)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/smin.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/smin.ll
new file mode 100644
index 0000000..04ab960
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/smin.ll
@@ -0,0 +1,32 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+
+define noundef i16 @test_smin_i16(i16 noundef %a, i16 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] SMin %[[#]] %[[#]]
+ %0 = call i16 @llvm.smin.i16(i16 %a, i16 %b)
+ ret i16 %0
+}
+
+
+define noundef i32 @test_smin_i32(i32 noundef %a, i32 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] SMin %[[#]] %[[#]]
+ %0 = call i32 @llvm.smin.i32(i32 %a, i32 %b)
+ ret i32 %0
+}
+
+
+define noundef i64 @test_smin_i64(i64 noundef %a, i64 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] SMin %[[#]] %[[#]]
+ %0 = call i64 @llvm.smin.i64(i64 %a, i64 %b)
+ ret i64 %0
+}
+
+declare i16 @llvm.smin.i16(i16, i16)
+declare i32 @llvm.smin.i32(i32, i32)
+declare i64 @llvm.smin.i64(i64, i64)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/sqrt.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/sqrt.ll
new file mode 100644
index 0000000..6882b77
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/sqrt.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @sqrt_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Sqrt %[[#]]
+ %elt.sqrt = call float @llvm.sqrt.f32(float %a)
+ ret float %elt.sqrt
+}
+
+define noundef half @sqrt_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Sqrt %[[#]]
+ %elt.sqrt = call half @llvm.sqrt.f16(half %a)
+ ret half %elt.sqrt
+}
+
+declare half @llvm.sqrt.f16(half)
+declare float @llvm.sqrt.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/trunc.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/trunc.ll
new file mode 100644
index 0000000..d75b7fa
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/trunc.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @trunc_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Trunc %[[#]]
+ %elt.trunc = call float @llvm.trunc.f32(float %a)
+ ret float %elt.trunc
+}
+
+define noundef half @trunc_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Trunc %[[#]]
+ %elt.trunc = call half @llvm.trunc.f16(half %a)
+ ret half %elt.trunc
+}
+
+declare half @llvm.trunc.f16(half)
+declare float @llvm.trunc.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/umax.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/umax.ll
new file mode 100644
index 0000000..32677df
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/umax.ll
@@ -0,0 +1,29 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef i16 @test_umax_i16(i16 noundef %a, i16 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] UMax %[[#]] %[[#]]
+ %0 = call i16 @llvm.umax.i16(i16 %a, i16 %b)
+ ret i16 %0
+}
+
+define noundef i32 @test_umax_i32(i32 noundef %a, i32 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] UMax %[[#]] %[[#]]
+ %0 = call i32 @llvm.umax.i32(i32 %a, i32 %b)
+ ret i32 %0
+}
+
+define noundef i64 @test_umax_i64(i64 noundef %a, i64 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] UMax %[[#]] %[[#]]
+ %0 = call i64 @llvm.umax.i64(i64 %a, i64 %b)
+ ret i64 %0
+}
+
+declare i16 @llvm.umax.i16(i16, i16)
+declare i32 @llvm.umax.i32(i32, i32)
+declare i64 @llvm.umax.i64(i64, i64)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/umin.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/umin.ll
new file mode 100644
index 0000000..a91fb80
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/umin.ll
@@ -0,0 +1,32 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+
+define noundef i16 @test_umin_i16(i16 noundef %a, i16 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] UMin %[[#]] %[[#]]
+ %0 = call i16 @llvm.umin.i16(i16 %a, i16 %b)
+ ret i16 %0
+}
+
+
+define noundef i32 @test_umin_i32(i32 noundef %a, i32 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] UMin %[[#]] %[[#]]
+ %0 = call i32 @llvm.umin.i32(i32 %a, i32 %b)
+ ret i32 %0
+}
+
+
+define noundef i64 @test_umin_i64(i64 noundef %a, i64 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] UMin %[[#]] %[[#]]
+ %0 = call i64 @llvm.umin.i64(i64 %a, i64 %b)
+ ret i64 %0
+}
+
+declare i16 @llvm.umin.i16(i16, i16)
+declare i32 @llvm.umin.i32(i32, i32)
+declare i64 @llvm.umin.i64(i64, i64)
diff --git a/llvm/test/CodeGen/SPIRV/instructions/atomic.ll b/llvm/test/CodeGen/SPIRV/instructions/atomic.ll
index 9715504..ce59bb2 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/atomic.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/atomic.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpName [[ADD:%.*]] "test_add"
; CHECK-DAG: OpName [[SUB:%.*]] "test_sub"
@@ -20,7 +21,8 @@
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicIAdd [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicIAdd [[I32Ty]] [[BC_A]] [[SCOPE]] [[RELAXED]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_add(i32* %ptr, i32 %val) {
@@ -32,7 +34,8 @@ define i32 @test_add(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicISub [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicISub [[I32Ty]] [[BC_A]] [[SCOPE]] [[RELAXED]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_sub(i32* %ptr, i32 %val) {
@@ -44,7 +47,8 @@ define i32 @test_sub(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicSMin [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicSMin [[I32Ty]] [[BC_A]] [[SCOPE]] [[RELAXED]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_min(i32* %ptr, i32 %val) {
@@ -56,7 +60,8 @@ define i32 @test_min(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicSMax [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicSMax [[I32Ty]] [[BC_A]] [[SCOPE]] [[RELAXED]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_max(i32* %ptr, i32 %val) {
@@ -68,7 +73,8 @@ define i32 @test_max(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicUMin [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicUMin [[I32Ty]] [[BC_A]] [[SCOPE]] [[RELAXED]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_umin(i32* %ptr, i32 %val) {
@@ -80,7 +86,8 @@ define i32 @test_umin(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicUMax [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicUMax [[I32Ty]] [[BC_A]] [[SCOPE]] [[RELAXED]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_umax(i32* %ptr, i32 %val) {
@@ -92,7 +99,8 @@ define i32 @test_umax(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicAnd [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicAnd [[I32Ty]] [[BC_A]] [[SCOPE]] [[RELAXED]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_and(i32* %ptr, i32 %val) {
@@ -104,7 +112,8 @@ define i32 @test_and(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicOr [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicOr [[I32Ty]] [[BC_A]] [[SCOPE]] [[RELAXED]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_or(i32* %ptr, i32 %val) {
@@ -116,7 +125,8 @@ define i32 @test_or(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicXor [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicXor [[I32Ty]] [[BC_A]] [[SCOPE]] [[RELAXED]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_xor(i32* %ptr, i32 %val) {
diff --git a/llvm/test/CodeGen/SPIRV/instructions/atomic_acqrel.ll b/llvm/test/CodeGen/SPIRV/instructions/atomic_acqrel.ll
index 63c0ae7..950dfe4 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/atomic_acqrel.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/atomic_acqrel.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpName [[ADD:%.*]] "test_add"
; CHECK-DAG: OpName [[SUB:%.*]] "test_sub"
@@ -20,7 +21,8 @@
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicIAdd [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicIAdd [[I32Ty]] [[BC_A]] [[SCOPE]] [[ACQREL]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_add(i32* %ptr, i32 %val) {
@@ -32,7 +34,8 @@ define i32 @test_add(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicISub [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicISub [[I32Ty]] [[BC_A]] [[SCOPE]] [[ACQREL]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_sub(i32* %ptr, i32 %val) {
@@ -44,7 +47,8 @@ define i32 @test_sub(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicSMin [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicSMin [[I32Ty]] [[BC_A]] [[SCOPE]] [[ACQREL]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_min(i32* %ptr, i32 %val) {
@@ -56,7 +60,8 @@ define i32 @test_min(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicSMax [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicSMax [[I32Ty]] [[BC_A]] [[SCOPE]] [[ACQREL]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_max(i32* %ptr, i32 %val) {
@@ -68,7 +73,8 @@ define i32 @test_max(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicUMin [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicUMin [[I32Ty]] [[BC_A]] [[SCOPE]] [[ACQREL]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_umin(i32* %ptr, i32 %val) {
@@ -80,7 +86,8 @@ define i32 @test_umin(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicUMax [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicUMax [[I32Ty]] [[BC_A]] [[SCOPE]] [[ACQREL]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_umax(i32* %ptr, i32 %val) {
@@ -92,7 +99,8 @@ define i32 @test_umax(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicAnd [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicAnd [[I32Ty]] [[BC_A]] [[SCOPE]] [[ACQREL]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_and(i32* %ptr, i32 %val) {
@@ -104,7 +112,8 @@ define i32 @test_and(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicOr [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicOr [[I32Ty]] [[BC_A]] [[SCOPE]] [[ACQREL]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_or(i32* %ptr, i32 %val) {
@@ -116,7 +125,8 @@ define i32 @test_or(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicXor [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicXor [[I32Ty]] [[BC_A]] [[SCOPE]] [[ACQREL]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_xor(i32* %ptr, i32 %val) {
diff --git a/llvm/test/CodeGen/SPIRV/instructions/atomic_seq.ll b/llvm/test/CodeGen/SPIRV/instructions/atomic_seq.ll
index f6a8fe1..f142e01 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/atomic_seq.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/atomic_seq.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpName [[ADD:%.*]] "test_add"
; CHECK-DAG: OpName [[SUB:%.*]] "test_sub"
@@ -20,7 +21,8 @@
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicIAdd [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicIAdd [[I32Ty]] [[BC_A]] [[SCOPE]] [[SEQ]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_add(i32* %ptr, i32 %val) {
@@ -32,7 +34,8 @@ define i32 @test_add(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicISub [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicISub [[I32Ty]] [[BC_A]] [[SCOPE]] [[SEQ]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_sub(i32* %ptr, i32 %val) {
@@ -44,7 +47,8 @@ define i32 @test_sub(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicSMin [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicSMin [[I32Ty]] [[BC_A]] [[SCOPE]] [[SEQ]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_min(i32* %ptr, i32 %val) {
@@ -56,7 +60,8 @@ define i32 @test_min(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicSMax [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicSMax [[I32Ty]] [[BC_A]] [[SCOPE]] [[SEQ]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_max(i32* %ptr, i32 %val) {
@@ -68,7 +73,8 @@ define i32 @test_max(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicUMin [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicUMin [[I32Ty]] [[BC_A]] [[SCOPE]] [[SEQ]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_umin(i32* %ptr, i32 %val) {
@@ -80,7 +86,8 @@ define i32 @test_umin(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicUMax [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicUMax [[I32Ty]] [[BC_A]] [[SCOPE]] [[SEQ]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_umax(i32* %ptr, i32 %val) {
@@ -92,7 +99,8 @@ define i32 @test_umax(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicAnd [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicAnd [[I32Ty]] [[BC_A]] [[SCOPE]] [[SEQ]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_and(i32* %ptr, i32 %val) {
@@ -104,7 +112,8 @@ define i32 @test_and(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicOr [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicOr [[I32Ty]] [[BC_A]] [[SCOPE]] [[SEQ]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_or(i32* %ptr, i32 %val) {
@@ -116,7 +125,8 @@ define i32 @test_or(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicXor [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicXor [[I32Ty]] [[BC_A]] [[SCOPE]] [[SEQ]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_xor(i32* %ptr, i32 %val) {
diff --git a/llvm/test/CodeGen/SPIRV/instructions/bitwise-i1.ll b/llvm/test/CodeGen/SPIRV/instructions/bitwise-i1.ll
new file mode 100644
index 0000000..8d3657b
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/instructions/bitwise-i1.ll
@@ -0,0 +1,69 @@
+; This test ensures that LLVM IR bitwise instructions result in logical SPIR-V instructions
+; when applied to i1 type
+
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: %[[#Char:]] = OpTypeInt 8 0
+; CHECK-DAG: %[[#Vec2Char:]] = OpTypeVector %[[#Char]] 2
+; CHECK-DAG: %[[#Bool:]] = OpTypeBool
+; CHECK-DAG: %[[#Vec2Bool:]] = OpTypeVector %[[#Bool]] 2
+
+; CHECK: OpBitwiseAnd %[[#Char]]
+; CHECK: OpBitwiseOr %[[#Char]]
+; CHECK: OpBitwiseXor %[[#Char]]
+; CHECK: OpBitwiseAnd %[[#Vec2Char]]
+; CHECK: OpBitwiseOr %[[#Vec2Char]]
+; CHECK: OpBitwiseXor %[[#Vec2Char]]
+
+; CHECK: OpLogicalAnd %[[#Bool]]
+
+; CHECK: OpLogicalAnd %[[#Bool]]
+; CHECK: OpLogicalOr %[[#Bool]]
+; CHECK: OpLogicalNotEqual %[[#Bool]]
+; CHECK: OpLogicalAnd %[[#Vec2Bool]]
+; CHECK: OpLogicalOr %[[#Vec2Bool]]
+; CHECK: OpLogicalNotEqual %[[#Vec2Bool]]
+
+define void @test1(i8 noundef %arg1, i8 noundef %arg2) {
+ %cond1 = and i8 %arg1, %arg2
+ %cond2 = or i8 %arg1, %arg2
+ %cond3 = xor i8 %arg1, %arg2
+ ret void
+}
+
+define void @test1v(<2 x i8> noundef %arg1, <2 x i8> noundef %arg2) {
+ %cond1 = and <2 x i8> %arg1, %arg2
+ %cond2 = or <2 x i8> %arg1, %arg2
+ %cond3 = xor <2 x i8> %arg1, %arg2
+ ret void
+}
+
+define void @test2(float noundef %real, float noundef %imag) {
+entry:
+ %realabs = tail call spir_func noundef float @_Z16__spirv_ocl_fabsf(float noundef %real)
+ %cond1 = fcmp oeq float %realabs, 1.000000e+00
+ %cond2 = fcmp oeq float %imag, 0.000000e+00
+ %cond3 = and i1 %cond1, %cond2
+ br i1 %cond3, label %midlbl, label %cleanup
+midlbl:
+ br label %cleanup
+cleanup:
+ ret void
+}
+
+define void @test3(i1 noundef %arg1, i1 noundef %arg2) {
+ %cond1 = and i1 %arg1, %arg2
+ %cond2 = or i1 %arg1, %arg2
+ %cond3 = xor i1 %arg1, %arg2
+ ret void
+}
+
+define void @test3v(<2 x i1> noundef %arg1, <2 x i1> noundef %arg2) {
+ %cond1 = and <2 x i1> %arg1, %arg2
+ %cond2 = or <2 x i1> %arg1, %arg2
+ %cond3 = xor <2 x i1> %arg1, %arg2
+ ret void
+}
+
+declare dso_local spir_func noundef float @_Z16__spirv_ocl_fabsf(float noundef)
diff --git a/llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll b/llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll
index 641e2bf..31cd8bd 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll
@@ -1,7 +1,13 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s --translator-compatibility-mode -o - | FileCheck %s --check-prefix=CHECK-COMPAT
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s --translator-compatibility-mode -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpName [[EQ:%.*]] "test_eq"
; CHECK-DAG: OpName [[NE:%.*]] "test_ne"
+; CHECK-COMPAT-DAG: OpName [[EQ:%.*]] "test_eq"
+; CHECK-COMPAT-DAG: OpName [[NE:%.*]] "test_ne"
; CHECK-DAG: OpName [[ULT:%.*]] "test_ult"
; CHECK-DAG: OpName [[SLT:%.*]] "test_slt"
; CHECK-DAG: OpName [[ULE:%.*]] "test_ule"
@@ -19,6 +25,9 @@
; CHECK-NEXT: [[R:%.*]] = OpPtrEqual {{%.+}} [[A]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
+; CHECK-COMPAT: [[EQ]] = OpFunction
+; CHECK-COMPAT-NOT: OpPtrEqual
+; CHECK-COMPAT: OpFunctionEnd
define i1 @test_eq(i16* %a, i16* %b) {
%r = icmp eq i16* %a, %b
ret i1 %r
@@ -31,6 +40,9 @@ define i1 @test_eq(i16* %a, i16* %b) {
; CHECK-NEXT: [[R:%.*]] = OpPtrNotEqual {{%.+}} [[A]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
+; CHECK-COMPAT: [[NE]] = OpFunction
+; CHECK-COMPAT-NOT: OpPtrNotEqual
+; CHECK-COMPAT: OpFunctionEnd
define i1 @test_ne(i16* %a, i16* %b) {
%r = icmp ne i16* %a, %b
ret i1 %r
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll
new file mode 100644
index 0000000..710a158
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll
@@ -0,0 +1,25 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+
+; CHECK: OpFunction
+; CHECK: %[[FooArg:.*]] = OpVariable
+; CHECK: OpLifetimeStart %[[FooArg]], 0
+; CHECK: OpCopyMemorySized
+; CHECK: OpBitcast
+; CHECK: OpInBoundsPtrAccessChain
+; CHECK: OpLifetimeStop %[[FooArg]], 0
+
+%tprange = type { %tparray }
+%tparray = type { [2 x i64] }
+
+define spir_func void @foo(ptr noundef byval(%tprange) align 8 %_arg_UserRange) {
+ %RoundedRangeKernel = alloca %tprange, align 8
+ call void @llvm.lifetime.start.p0(i64 72, ptr nonnull %RoundedRangeKernel) #7
+ call void @llvm.memcpy.p0.p0.i64(ptr align 8 %RoundedRangeKernel, ptr align 8 %_arg_UserRange, i64 16, i1 false)
+ %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 16
+ call void @llvm.lifetime.end.p0(i64 72, ptr nonnull %RoundedRangeKernel) #7
+ ret void
+}
+
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/add.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/add.ll
index 93190f9..e0c84ee 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/add.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/add.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll
index aa879b2..12a4a86 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fadd.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fadd.ll
index a0d18d5..459bc6b 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fadd.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fadd.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmax.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmax.ll
index 247ebcc..4f9cd29 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmax.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmax.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmaximum.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmaximum.ll
index 13f4410..837bea0 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmaximum.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmaximum.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmin.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmin.ll
index 13ef118..475da2e 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmin.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmin.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fminimum.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fminimum.ll
index 93ef79a..b525c84 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fminimum.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fminimum.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmul.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmul.ll
index afe30d5..0985be9 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmul.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmul.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll
index 9b397ae..1a70057 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll
index a62bb0c..90c6cf5 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smax.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smax.ll
index 3fc2bcc..4551fa3 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smax.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smax.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smin.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smin.ll
index 9459946..a0d257b 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smin.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smin.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umax.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umax.ll
index cce7189..ba5dba7 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umax.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umax.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umin.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umin.ll
index bcc49c5..e16bde8 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umin.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umin.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/xor.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/xor.ll
index 26bc96b..cf887bb 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/xor.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/xor.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-accesschain.ll b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-accesschain.ll
new file mode 100644
index 0000000..7fae6ca
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-accesschain.ll
@@ -0,0 +1,37 @@
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: %[[#TYCHAR:]] = OpTypeInt 8 0
+; CHECK-DAG: %[[#TYCHARPTR:]] = OpTypePointer Function %[[#TYCHAR]]
+; CHECK-DAG: %[[#TYINT32:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#TYSTRUCTINT32:]] = OpTypeStruct %[[#TYINT32]]
+; CHECK-DAG: %[[#TYARRAY:]] = OpTypeArray %[[#TYSTRUCTINT32]] %[[#]]
+; CHECK-DAG: %[[#TYSTRUCT:]] = OpTypeStruct %[[#TYARRAY]]
+; CHECK-DAG: %[[#TYSTRUCTPTR:]] = OpTypePointer Function %[[#TYSTRUCT]]
+; CHECK-DAG: %[[#TYINT64:]] = OpTypeInt 64 0
+; CHECK-DAG: %[[#TYINT64PTR:]] = OpTypePointer Function %[[#TYINT64]]
+; CHECK: OpFunction
+; CHECK: %[[#PTRTOSTRUCT:]] = OpFunctionParameter %[[#TYSTRUCTPTR]]
+; CHECK: %[[#PTRTOCHAR:]] = OpBitcast %[[#TYCHARPTR]] %[[#PTRTOSTRUCT]]
+; CHECK-NEXT: OpInBoundsPtrAccessChain %[[#TYCHARPTR]] %[[#PTRTOCHAR]]
+; CHECK: OpFunction
+; CHECK: %[[#PTRTOSTRUCT2:]] = OpFunctionParameter %[[#TYSTRUCTPTR]]
+; CHECK: %[[#ELEM:]] = OpInBoundsPtrAccessChain %[[#TYSTRUCTPTR]] %[[#PTRTOSTRUCT2]]
+; CHECK-NEXT: %[[#TOLOAD:]] = OpBitcast %[[#TYINT64PTR]] %[[#ELEM]]
+; CHECK-NEXT: OpLoad %[[#TYINT64]] %[[#TOLOAD]]
+
+%struct.S = type { i32 }
+%struct.__wrapper_class = type { [7 x %struct.S] }
+
+define spir_kernel void @foo1(ptr noundef byval(%struct.__wrapper_class) align 4 %_arg_Arr) {
+entry:
+ %elem = getelementptr inbounds i8, ptr %_arg_Arr, i64 0
+ ret void
+}
+
+define spir_kernel void @foo2(ptr noundef byval(%struct.__wrapper_class) align 4 %_arg_Arr) {
+entry:
+ %elem = getelementptr inbounds %struct.__wrapper_class, ptr %_arg_Arr, i64 0
+ %data = load i64, ptr %elem
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-load.ll b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-load.ll
index a30d079..18752fd 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-load.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-load.ll
@@ -9,7 +9,7 @@
; CHECK-DAG: %[[#TYLONGPTR:]] = OpTypePointer Function %[[#TYLONG]]
; CHECK: %[[#PTRTOSTRUCT:]] = OpFunctionParameter %[[#TYSTRUCTPTR]]
; CHECK: %[[#PTRTOLONG:]] = OpBitcast %[[#TYLONGPTR]] %[[#PTRTOSTRUCT]]
-; CHECK: OpLoad %[[#TYLONG]] %[[#PTRTOLONG]]
+; CHECK-NEXT: OpLoad %[[#TYLONG]] %[[#PTRTOLONG]]
%struct.S = type { i32 }
%struct.__wrapper_class = type { [7 x %struct.S] }
diff --git a/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-store.ll b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-store.ll
index 4701f02..202bcfb 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-store.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-store.ll
@@ -13,7 +13,7 @@
; CHECK: %[[#OBJ:]] = OpFunctionParameter %[[#TYSTRUCT]]
; CHECK: %[[#ARGPTR2:]] = OpFunctionParameter %[[#TYLONGPTR]]
; CHECK: %[[#PTRTOSTRUCT:]] = OpBitcast %[[#TYSTRUCTPTR]] %[[#ARGPTR2]]
-; CHECK: OpStore %[[#PTRTOSTRUCT]] %[[#OBJ]]
+; CHECK-NEXT: OpStore %[[#PTRTOSTRUCT]] %[[#OBJ]]
%struct.S = type { i32 }
%struct.__wrapper_class = type { [7 x %struct.S] }
diff --git a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-addressspace.ll b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-addressspace.ll
index 062863a..7e9c621 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-addressspace.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-addressspace.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK: %[[#INT8:]] = OpTypeInt 8 0
; CHECK: %[[#PTR1:]] = OpTypePointer CrossWorkgroup %[[#INT8]]
diff --git a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-base-type.ll b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-base-type.ll
index aaf97f8..fc999ba 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-base-type.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-base-type.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK: %[[#FLOAT32:]] = OpTypeFloat 32
; CHECK: %[[#PTR:]] = OpTypePointer CrossWorkgroup %[[#FLOAT32]]
diff --git a/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-addressspace.ll b/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-addressspace.ll
index 6d12023..a3a730a 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-addressspace.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-addressspace.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: %[[#INT:]] = OpTypeInt 32 0
; CHECK-DAG: %[[#PTR1:]] = OpTypePointer Function %[[#INT]]
diff --git a/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type-deduction-no-bitcast-to-generic.ll b/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type-deduction-no-bitcast-to-generic.ll
index 9e136ce..b74a344 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type-deduction-no-bitcast-to-generic.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type-deduction-no-bitcast-to-generic.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: %[[#IMAGE:]] = OpTypeImage %2 2D 0 0 0 0 Unknown ReadOnly
diff --git a/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type.ll b/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type.ll
index 1fcc6d9..b8f205a 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: %[[#FLOAT32:]] = OpTypeFloat 32
; CHECK-DAG: %[[#PTR1:]] = OpTypePointer Function %[[#FLOAT32]]
diff --git a/llvm/test/CodeGen/SPIRV/pointers/load-addressspace.ll b/llvm/test/CodeGen/SPIRV/pointers/load-addressspace.ll
index 1b4e7a3..1667abc 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/load-addressspace.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/load-addressspace.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK: %[[#INT8:]] = OpTypeInt 8 0
; CHECK: %[[#PTR1:]] = OpTypePointer CrossWorkgroup %[[#INT8]]
diff --git a/llvm/test/CodeGen/SPIRV/pointers/nested-struct-opaque-pointers.ll b/llvm/test/CodeGen/SPIRV/pointers/nested-struct-opaque-pointers.ll
new file mode 100644
index 0000000..77b895c
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/nested-struct-opaque-pointers.ll
@@ -0,0 +1,20 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-NOT: OpTypeInt 8 0
+
+@GI = addrspace(1) constant i64 42
+
+@GS = addrspace(1) global {ptr addrspace(1), ptr addrspace(1)} { ptr addrspace(1) @GI, ptr addrspace(1) @GI }
+@GS2 = addrspace(1) global {ptr addrspace(1), ptr addrspace(1)} { ptr addrspace(1) @GS, ptr addrspace(1) @GS }
+@GS3 = addrspace(1) global {ptr addrspace(1), ptr addrspace(1)} { ptr addrspace(1) @GS2, ptr addrspace(1) @GS2 }
+
+@GPS = addrspace(1) global ptr addrspace(1) @GS3
+
+@GPI1 = addrspace(1) global ptr addrspace(1) @GI
+@GPI2 = addrspace(1) global ptr addrspace(1) @GPI1
+@GPI3 = addrspace(1) global ptr addrspace(1) @GPI2
+
+define spir_kernel void @foo() {
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/pointers/store-operand-ptr-to-struct.ll b/llvm/test/CodeGen/SPIRV/pointers/store-operand-ptr-to-struct.ll
index 00b03c0..3a0d65e 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/store-operand-ptr-to-struct.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/store-operand-ptr-to-struct.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; TODO: OpFunctionParameter should be a pointer of struct base type.
; XFAIL: *
diff --git a/llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll b/llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll
index 86f5f5b..6d4913f 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll
@@ -1,14 +1,14 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
-; CHECK: %[[TyInt8:.*]] = OpTypeInt 8 0
-; CHECK: %[[TyInt8Ptr:.*]] = OpTypePointer {{[a-zA-Z]+}} %[[TyInt8]]
-; CHECK: %[[TyStruct:.*]] = OpTypeStruct %[[TyInt8Ptr]] %[[TyInt8Ptr]]
+; CHECK: %[[TyInt64:.*]] = OpTypeInt 64 0
+; CHECK: %[[TyInt64Ptr:.*]] = OpTypePointer {{[a-zA-Z]+}} %[[TyInt64]]
+; CHECK: %[[TyStruct:.*]] = OpTypeStruct %[[TyInt64Ptr]] %[[TyInt64Ptr]]
; CHECK: %[[ConstStruct:.*]] = OpConstantComposite %[[TyStruct]] %[[ConstField:.*]] %[[ConstField]]
; CHECK: %[[TyStructPtr:.*]] = OpTypePointer {{[a-zA-Z]+}} %[[TyStruct]]
; CHECK: OpVariable %[[TyStructPtr]] {{[a-zA-Z]+}} %[[ConstStruct]]
-@a = addrspace(1) constant i32 123
+@a = addrspace(1) constant i64 42
@struct = addrspace(1) global {ptr addrspace(1), ptr addrspace(1)} { ptr addrspace(1) @a, ptr addrspace(1) @a }
define spir_kernel void @foo() {
diff --git a/llvm/test/CodeGen/SPIRV/pointers/two-bitcast-or-param-users.ll b/llvm/test/CodeGen/SPIRV/pointers/two-bitcast-or-param-users.ll
index 52180d5..23c3faa 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/two-bitcast-or-param-users.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/two-bitcast-or-param-users.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: %[[#INT:]] = OpTypeInt 32
; CHECK-DAG: %[[#GLOBAL_PTR_INT:]] = OpTypePointer CrossWorkgroup %[[#INT]]
diff --git a/llvm/test/CodeGen/SPIRV/pointers/two-subsequent-bitcasts.ll b/llvm/test/CodeGen/SPIRV/pointers/two-subsequent-bitcasts.ll
index 473c2a8..83234e3 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/two-subsequent-bitcasts.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/two-subsequent-bitcasts.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: %[[#float:]] = OpTypeFloat 32
; CHECK-DAG: %[[#pointer:]] = OpTypePointer CrossWorkgroup %[[#float]]
diff --git a/llvm/test/CodeGen/SPIRV/pointers/type-deduce-args-rev.ll b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-args-rev.ll
new file mode 100644
index 0000000..ae7fb99
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-args-rev.ll
@@ -0,0 +1,28 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-SPIRV-DAG: OpName %[[FooArg:.*]] "known_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Foo:.*]] "foo"
+; CHECK-SPIRV-DAG: OpName %[[ArgToDeduce:.*]] "unknown_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Bar:.*]] "bar"
+; CHECK-SPIRV-DAG: %[[Long:.*]] = OpTypeInt 32 0
+; CHECK-SPIRV-DAG: %[[Void:.*]] = OpTypeVoid
+; CHECK-SPIRV-DAG: %[[LongPtr:.*]] = OpTypePointer CrossWorkgroup %[[Long]]
+; CHECK-SPIRV-DAG: %[[Fun:.*]] = OpTypeFunction %[[Void]] %[[LongPtr]]
+; CHECK-SPIRV: %[[Bar]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[ArgToDeduce]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: OpFunctionCall %[[Void]] %[[Foo]] %[[ArgToDeduce]]
+; CHECK-SPIRV: %[[Foo]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[FooArg]] = OpFunctionParameter %[[LongPtr]]
+
+define spir_kernel void @bar(ptr addrspace(1) %unknown_type_ptr) {
+entry:
+ %elem = getelementptr inbounds i32, ptr addrspace(1) %unknown_type_ptr, i64 0
+ call void @foo(ptr addrspace(1) %unknown_type_ptr)
+ ret void
+}
+
+define void @foo(ptr addrspace(1) %known_type_ptr) {
+entry:
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/pointers/type-deduce-args.ll b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-args.ll
new file mode 100644
index 0000000..ee411f2
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-args.ll
@@ -0,0 +1,97 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-SPIRV-DAG: OpName %[[FooArg:.*]] "unknown_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Foo:.*]] "foo"
+; CHECK-SPIRV-DAG: OpName %[[BarArg:.*]] "known_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Bar:.*]] "bar"
+; CHECK-SPIRV-DAG: OpName %[[UntypedArg:.*]] "arg"
+; CHECK-SPIRV-DAG: OpName %[[FunUntypedArg:.*]] "foo_untyped_arg"
+; CHECK-SPIRV-DAG: OpName %[[UnusedArg1:.*]] "unused_arg1"
+; CHECK-SPIRV-DAG: OpName %[[Foo2Arg:.*]] "unknown_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Foo2:.*]] "foo2"
+; CHECK-SPIRV-DAG: OpName %[[Bar2Arg:.*]] "known_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Bar2:.*]] "bar2"
+; CHECK-SPIRV-DAG: OpName %[[Foo5Arg1:.*]] "unknown_type_ptr1"
+; CHECK-SPIRV-DAG: OpName %[[Foo5Arg2:.*]] "unknown_type_ptr2"
+; CHECK-SPIRV-DAG: OpName %[[Foo5:.*]] "foo5"
+; CHECK-SPIRV-DAG: OpName %[[Bar5Arg:.*]] "known_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Bar5:.*]] "bar5"
+; CHECK-SPIRV-DAG: %[[Char:.*]] = OpTypeInt 8 0
+; CHECK-SPIRV-DAG: %[[Long:.*]] = OpTypeInt 32 0
+; CHECK-SPIRV-DAG: %[[Half:.*]] = OpTypeFloat 16
+; CHECK-SPIRV-DAG: %[[Void:.*]] = OpTypeVoid
+; CHECK-SPIRV-DAG: %[[HalfConst:.*]] = OpConstant %[[Half]] 15360
+; CHECK-SPIRV-DAG: %[[CharPtr:.*]] = OpTypePointer CrossWorkgroup %[[Char]]
+; CHECK-SPIRV-DAG: %[[LongPtr:.*]] = OpTypePointer CrossWorkgroup %[[Long]]
+; CHECK-SPIRV-DAG: %[[Fun:.*]] = OpTypeFunction %[[Void]] %[[LongPtr]]
+; CHECK-SPIRV-DAG: %[[Fun2:.*]] = OpTypeFunction %[[Void]] %[[Half]] %[[LongPtr]]
+; CHECK-SPIRV-DAG: %[[Fun5:.*]] = OpTypeFunction %[[Void]] %[[Half]] %[[LongPtr]] %[[Half]] %[[LongPtr]] %[[Half]]
+; CHECK-SPIRV-DAG: %[[FunUntyped:.*]] = OpTypeFunction %[[Void]] %[[CharPtr]]
+
+; CHECK-SPIRV: %[[Foo]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[FooArg]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: %[[Bar]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[BarArg]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: OpFunctionCall %[[Void]] %[[Foo]] %[[BarArg]]
+
+; CHECK-SPIRV: %[[FunUntypedArg]] = OpFunction %[[Void]] None %[[FunUntyped]]
+; CHECK-SPIRV: %[[UntypedArg]] = OpFunctionParameter %[[CharPtr]]
+
+; CHECK-SPIRV: %[[Foo2]] = OpFunction %[[Void]] None %[[Fun2]]
+; CHECK-SPIRV: %[[UnusedArg1]] = OpFunctionParameter %[[Half]]
+; CHECK-SPIRV: %[[Foo2Arg]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: %[[Bar2]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[Bar2Arg]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: OpFunctionCall %[[Void]] %[[Foo2]] %[[HalfConst]] %[[Bar2Arg]]
+
+; CHECK-SPIRV: %[[Foo5]] = OpFunction %[[Void]] None %[[Fun5]]
+; CHECK-SPIRV: OpFunctionParameter %[[Half]]
+; CHECK-SPIRV: %[[Foo5Arg1]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: OpFunctionParameter %[[Half]]
+; CHECK-SPIRV: %[[Foo5Arg2]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: OpFunctionParameter %[[Half]]
+; CHECK-SPIRV: %[[Bar5]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[Bar5Arg]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: OpFunctionCall %[[Void]] %[[Foo5]] %[[HalfConst]] %[[Bar5Arg]] %[[HalfConst]] %[[Bar5Arg]] %[[HalfConst]]
+
+define void @foo(ptr addrspace(1) %unknown_type_ptr) {
+entry:
+ ret void
+}
+
+define spir_kernel void @bar(ptr addrspace(1) %known_type_ptr) {
+entry:
+ %elem = getelementptr inbounds i32, ptr addrspace(1) %known_type_ptr, i64 0
+ call void @foo(ptr addrspace(1) %known_type_ptr)
+ ret void
+}
+
+define void @foo_untyped_arg(ptr addrspace(1) %arg) {
+entry:
+ ret void
+}
+
+define void @foo2(half %unused_arg1, ptr addrspace(1) %unknown_type_ptr) {
+entry:
+ ret void
+}
+
+define spir_kernel void @bar2(ptr addrspace(1) %known_type_ptr) {
+entry:
+ %elem = getelementptr inbounds i32, ptr addrspace(1) %known_type_ptr, i64 0
+ call void @foo2(half 1.0, ptr addrspace(1) %known_type_ptr)
+ ret void
+}
+
+define void @foo5(half %unused_arg1, ptr addrspace(1) %unknown_type_ptr1, half %unused_arg2, ptr addrspace(1) %unknown_type_ptr2, half %unused_arg3) {
+entry:
+ ret void
+}
+
+define spir_kernel void @bar5(ptr addrspace(1) %known_type_ptr) {
+entry:
+ %elem = getelementptr inbounds i32, ptr addrspace(1) %known_type_ptr, i64 0
+ call void @foo5(half 1.0, ptr addrspace(1) %known_type_ptr, half 1.0, ptr addrspace(1) %known_type_ptr, half 1.0)
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-chain.ll b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-chain.ll
new file mode 100644
index 0000000..1071d34
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-chain.ll
@@ -0,0 +1,57 @@
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-SPIRV-DAG: OpName %[[ArgCum:.*]] "_arg_cum"
+; CHECK-SPIRV-DAG: OpName %[[FunTest:.*]] "test"
+; CHECK-SPIRV-DAG: OpName %[[Addr:.*]] "addr"
+; CHECK-SPIRV-DAG: OpName %[[StubObj:.*]] "stub_object"
+; CHECK-SPIRV-DAG: OpName %[[MemOrder:.*]] "mem_order"
+; CHECK-SPIRV-DAG: OpName %[[FooStub:.*]] "foo_stub"
+; CHECK-SPIRV-DAG: OpName %[[FooObj:.*]] "foo_object"
+; CHECK-SPIRV-DAG: OpName %[[FooMemOrder:.*]] "mem_order"
+; CHECK-SPIRV-DAG: OpName %[[FooFunc:.*]] "foo"
+; CHECK-SPIRV-DAG: %[[TyLong:.*]] = OpTypeInt 32 0
+; CHECK-SPIRV-DAG: %[[TyVoid:.*]] = OpTypeVoid
+; CHECK-SPIRV-DAG: %[[TyPtrLong:.*]] = OpTypePointer CrossWorkgroup %[[TyLong]]
+; CHECK-SPIRV-DAG: %[[TyFunPtrLong:.*]] = OpTypeFunction %[[TyVoid]] %[[TyPtrLong]]
+; CHECK-SPIRV-DAG: %[[TyGenPtrLong:.*]] = OpTypePointer Generic %[[TyLong]]
+; CHECK-SPIRV-DAG: %[[TyFunGenPtrLongLong:.*]] = OpTypeFunction %[[TyVoid]] %[[TyGenPtrLong]] %[[TyLong]]
+; CHECK-SPIRV-DAG: %[[Const3:.*]] = OpConstant %[[TyLong]] 3
+; CHECK-SPIRV: %[[FunTest]] = OpFunction %[[TyVoid]] None %[[TyFunPtrLong]]
+; CHECK-SPIRV: %[[ArgCum]] = OpFunctionParameter %[[TyPtrLong]]
+; CHECK-SPIRV: OpFunctionCall %[[TyVoid]] %[[FooFunc]] %[[Addr]] %[[Const3]]
+; CHECK-SPIRV: %[[FooStub]] = OpFunction %[[TyVoid]] None %[[TyFunGenPtrLongLong]]
+; CHECK-SPIRV: %[[StubObj]] = OpFunctionParameter %[[TyGenPtrLong]]
+; CHECK-SPIRV: %[[MemOrder]] = OpFunctionParameter %[[TyLong]]
+; CHECK-SPIRV: %[[FooFunc]] = OpFunction %[[TyVoid]] None %[[TyFunGenPtrLongLong]]
+; CHECK-SPIRV: %[[FooObj]] = OpFunctionParameter %[[TyGenPtrLong]]
+; CHECK-SPIRV: %[[FooMemOrder]] = OpFunctionParameter %[[TyLong]]
+; CHECK-SPIRV: OpFunctionCall %[[TyVoid]] %[[FooStub]] %[[FooObj]] %[[FooMemOrder]]
+
+define spir_kernel void @test(ptr addrspace(1) noundef align 4 %_arg_cum) {
+entry:
+ %lptr = getelementptr inbounds i32, ptr addrspace(1) %_arg_cum, i64 1
+ %addr = addrspacecast ptr addrspace(1) %lptr to ptr addrspace(4)
+ %object = bitcast ptr addrspace(4) %addr to ptr addrspace(4)
+ call spir_func void @foo(ptr addrspace(4) %object, i32 3)
+ %halfptr = getelementptr inbounds half, ptr addrspace(1) %_arg_cum, i64 1
+ %halfaddr = addrspacecast ptr addrspace(1) %halfptr to ptr addrspace(4)
+ call spir_func void @foo(ptr addrspace(4) %halfaddr, i32 3)
+ %dblptr = getelementptr inbounds double, ptr addrspace(1) %_arg_cum, i64 1
+ %dbladdr = addrspacecast ptr addrspace(1) %dblptr to ptr addrspace(4)
+ call spir_func void @foo(ptr addrspace(4) %dbladdr, i32 3)
+ ret void
+}
+
+define void @foo_stub(ptr addrspace(4) noundef %stub_object, i32 noundef %mem_order) {
+entry:
+ %object.addr = alloca ptr addrspace(4)
+ %object.addr.ascast = addrspacecast ptr %object.addr to ptr addrspace(4)
+ store ptr addrspace(4) %stub_object, ptr addrspace(4) %object.addr.ascast
+ ret void
+}
+
+define void @foo(ptr addrspace(4) noundef %foo_object, i32 noundef %mem_order) {
+ tail call void @foo_stub(ptr addrspace(4) noundef %foo_object, i32 noundef %mem_order)
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-complex.ll b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-complex.ll
new file mode 100644
index 0000000..ea7a22c
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-complex.ll
@@ -0,0 +1,29 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-SPIRV-DAG: %[[Long:.*]] = OpTypeInt 32 0
+; CHECK-SPIRV-DAG: %[[Void:.*]] = OpTypeVoid
+; CHECK-SPIRV-DAG: %[[Struct:.*]] = OpTypeStruct %[[Long]]
+; CHECK-SPIRV-DAG: %[[StructPtr:.*]] = OpTypePointer Generic %[[Struct]]
+; CHECK-SPIRV-DAG: %[[Function:.*]] = OpTypeFunction %[[Void]] %[[StructPtr]]
+; CHECK-SPIRV-DAG: %[[Const:.*]] = OpConstantNull %[[Struct]]
+; CHECK-SPIRV-DAG: %[[CrossStructPtr:.*]] = OpTypePointer CrossWorkgroup %[[Struct]]
+; CHECK-SPIRV-DAG: %[[Var:.*]] = OpVariable %[[CrossStructPtr]] CrossWorkgroup %[[Const]]
+; CHECK-SPIRV: %[[Foo:.*]] = OpFunction %[[Void]] None %[[Function]]
+; CHECK-SPIRV-NEXT: OpFunctionParameter %[[StructPtr]]
+; CHECK-SPIRV: %[[Casted:.*]] = OpPtrCastToGeneric %[[StructPtr]] %[[Var]]
+; CHECK-SPIRV-NEXT: OpFunctionCall %[[Void]] %[[Foo]] %[[Casted]]
+
+%struct.global_ctor_dtor = type { i32 }
+@g1 = addrspace(1) global %struct.global_ctor_dtor zeroinitializer
+
+define linkonce_odr spir_func void @foo(ptr addrspace(4) %this) {
+entry:
+ ret void
+}
+
+define internal spir_func void @bar() {
+entry:
+ call spir_func void @foo(ptr addrspace(4) addrspacecast (ptr addrspace(1) @g1 to ptr addrspace(4)))
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-rev.ll b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-rev.ll
new file mode 100644
index 0000000..76769ab
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-rev.ll
@@ -0,0 +1,28 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-SPIRV-DAG: OpName %[[FooArg:.*]] "known_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Foo:.*]] "foo"
+; CHECK-SPIRV-DAG: OpName %[[ArgToDeduce:.*]] "unknown_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Bar:.*]] "bar"
+; CHECK-SPIRV-DAG: %[[Long:.*]] = OpTypeInt 32 0
+; CHECK-SPIRV-DAG: %[[Void:.*]] = OpTypeVoid
+; CHECK-SPIRV-DAG: %[[LongPtr:.*]] = OpTypePointer CrossWorkgroup %[[Long]]
+; CHECK-SPIRV-DAG: %[[Fun:.*]] = OpTypeFunction %[[Void]] %[[LongPtr]]
+; CHECK-SPIRV: %[[Bar]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[ArgToDeduce]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: OpFunctionCall %[[Void]] %[[Foo]] %[[ArgToDeduce]]
+; CHECK-SPIRV: %[[Foo]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[FooArg]] = OpFunctionParameter %[[LongPtr]]
+
+define spir_kernel void @bar(ptr addrspace(1) %unknown_type_ptr) {
+entry:
+ call spir_func void @foo(ptr addrspace(1) %unknown_type_ptr)
+ ret void
+}
+
+define void @foo(ptr addrspace(1) %known_type_ptr) {
+entry:
+ %elem = getelementptr inbounds i32, ptr addrspace(1) %known_type_ptr, i64 0
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call.ll b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call.ll
new file mode 100644
index 0000000..8cbf360
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call.ll
@@ -0,0 +1,28 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-SPIRV-DAG: OpName %[[FooArg:.*]] "known_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Foo:.*]] "foo"
+; CHECK-SPIRV-DAG: OpName %[[ArgToDeduce:.*]] "unknown_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Bar:.*]] "bar"
+; CHECK-SPIRV-DAG: %[[Long:.*]] = OpTypeInt 32 0
+; CHECK-SPIRV-DAG: %[[Void:.*]] = OpTypeVoid
+; CHECK-SPIRV-DAG: %[[LongPtr:.*]] = OpTypePointer CrossWorkgroup %[[Long]]
+; CHECK-SPIRV-DAG: %[[Fun:.*]] = OpTypeFunction %[[Void]] %[[LongPtr]]
+; CHECK-SPIRV: %[[Foo]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[FooArg]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: %[[Bar]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[ArgToDeduce]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: OpFunctionCall %[[Void]] %[[Foo]] %[[ArgToDeduce]]
+
+define void @foo(ptr addrspace(1) %known_type_ptr) {
+entry:
+ %elem = getelementptr inbounds i32, ptr addrspace(1) %known_type_ptr, i64 0
+ ret void
+}
+
+define spir_kernel void @bar(ptr addrspace(1) %unknown_type_ptr) {
+entry:
+ call spir_func void @foo(ptr addrspace(1) %unknown_type_ptr)
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/pointers/typeof-ptr-int.ll b/llvm/test/CodeGen/SPIRV/pointers/typeof-ptr-int.ll
new file mode 100644
index 0000000..f144418
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/typeof-ptr-int.ll
@@ -0,0 +1,29 @@
+; This test is to check that two functions have different SPIR-V type
+; definitions, even though their LLVM function types are identical.
+
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: OpName %[[Fun32:.*]] "tp_arg_i32"
+; CHECK-DAG: OpName %[[Fun64:.*]] "tp_arg_i64"
+; CHECK-DAG: %[[TyI32:.*]] = OpTypeInt 32 0
+; CHECK-DAG: %[[TyVoid:.*]] = OpTypeVoid
+; CHECK-DAG: %[[TyPtr32:.*]] = OpTypePointer Function %[[TyI32]]
+; CHECK-DAG: %[[TyFun32:.*]] = OpTypeFunction %[[TyVoid]] %[[TyPtr32]]
+; CHECK-DAG: %[[TyI64:.*]] = OpTypeInt 64 0
+; CHECK-DAG: %[[TyPtr64:.*]] = OpTypePointer Function %[[TyI64]]
+; CHECK-DAG: %[[TyFun64:.*]] = OpTypeFunction %[[TyVoid]] %[[TyPtr64]]
+; CHECK-DAG: %[[Fun32]] = OpFunction %[[TyVoid]] None %[[TyFun32]]
+; CHECK-DAG: %[[Fun64]] = OpFunction %[[TyVoid]] None %[[TyFun64]]
+
+define spir_kernel void @tp_arg_i32(ptr %ptr) {
+entry:
+ store i32 1, ptr %ptr
+ ret void
+}
+
+define spir_kernel void @tp_arg_i64(ptr %ptr) {
+entry:
+ store i64 1, ptr %ptr
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/relationals.ll b/llvm/test/CodeGen/SPIRV/relationals.ll
index 1644dc7..f4fcf4d 100644
--- a/llvm/test/CodeGen/SPIRV/relationals.ll
+++ b/llvm/test/CodeGen/SPIRV/relationals.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
declare dso_local spir_func <4 x i8> @_Z13__spirv_IsNanIDv4_aDv4_fET_T0_(<4 x float>)
declare dso_local spir_func <4 x i8> @_Z13__spirv_IsInfIDv4_aDv4_fET_T0_(<4 x float>)
diff --git a/llvm/test/CodeGen/SPIRV/scfg-add-pre-headers.ll b/llvm/test/CodeGen/SPIRV/scfg-add-pre-headers.ll
index 329399b..2ea5c76 100644
--- a/llvm/test/CodeGen/SPIRV/scfg-add-pre-headers.ll
+++ b/llvm/test/CodeGen/SPIRV/scfg-add-pre-headers.ll
@@ -1,5 +1,6 @@
; RUN: llc -mtriple=spirv-unknown-unknown -O0 %s -o - | FileCheck %s
+; CHECK-DAG: OpDecorate %[[#SubgroupLocalInvocationId:]] BuiltIn SubgroupLocalInvocationId
; CHECK-DAG: %[[#bool:]] = OpTypeBool
; CHECK-DAG: %[[#uint:]] = OpTypeInt 32 0
; CHECK-DAG: %[[#uint_0:]] = OpConstant %[[#uint]] 0
@@ -37,10 +38,10 @@ l1_continue:
; CHECK-NEXT: OpBranch %[[#l1_header]]
l1_end:
- %call = call spir_func i32 @_Z3absi(i32 0) [ "convergencectrl"(token %tl1) ]
+ %call = call i32 @__hlsl_wave_get_lane_index() [ "convergencectrl"(token %tl1) ]
br label %end
; CHECK-DAG: %[[#l1_end]] = OpLabel
-; CHECK-DAG: %[[#]] = OpFunctionCall
+; CHECK-DAG: %[[#]] = OpLoad %[[#]] %[[#SubgroupLocalInvocationId]]
; CHECK-NEXT: OpBranch %[[#end:]]
l2:
@@ -76,6 +77,4 @@ declare token @llvm.experimental.convergence.entry()
declare token @llvm.experimental.convergence.control()
declare token @llvm.experimental.convergence.loop()
-; This intrinsic is not convergent. This is only because the backend doesn't
-; support convergent operations yet.
-declare spir_func i32 @_Z3absi(i32) convergent
+declare i32 @__hlsl_wave_get_lane_index() convergent
diff --git a/llvm/test/CodeGen/SPIRV/simple.ll b/llvm/test/CodeGen/SPIRV/simple.ll
index de9efa8..63c1596 100644
--- a/llvm/test/CodeGen/SPIRV/simple.ll
+++ b/llvm/test/CodeGen/SPIRV/simple.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; Support of doubles is required.
; CHECK: OpCapability Float64
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchangeExplicit_cl20.ll b/llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchangeExplicit_cl20.ll
index fdb26ba..e0c4779 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchangeExplicit_cl20.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchangeExplicit_cl20.ll
@@ -1,4 +1,5 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --mattr=+spirv1.3 %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --mattr=+spirv1.3 %s -o - -filetype=obj | spirv-val %}
;; __kernel void testAtomicCompareExchangeExplicit_cl20(
;; volatile global atomic_int* object,
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/BitReversePref.ll b/llvm/test/CodeGen/SPIRV/transcoding/BitReversePref.ll
index 55161e6..11b0578 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/BitReversePref.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/BitReversePref.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-linux %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK: OpDecorate %[[#FUNC_NAME:]] LinkageAttributes "_Z10BitReversei"
; CHECK-NOT: OpBitReverse
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange.ll b/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange.ll
index 95f3673..b63c1c6 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-DAG: %[[#]] = OpBuildNDRange %[[#]] %[[#GWS:]] %[[#LWS:]] %[[#GWO:]]
; CHECK-SPIRV-DAG: %[[#GWS]] = OpConstant %[[#]] 123
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange_2.ll b/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange_2.ll
index a2ae808..65c992c 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange_2.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange_2.ll
@@ -19,6 +19,7 @@
;; bash$ $PATH_TO_GEN/bin/clang -cc1 -x cl -cl-std=CL2.0 -triple spir64-unknown-unknown -emit-llvm -include opencl-20.h BuildNDRange_2.cl -o BuildNDRange_2.ll
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; TODO(#60133): Requires updates following opaque pointer migration.
; XFAIL: *
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/ConvertPtr.ll b/llvm/test/CodeGen/SPIRV/transcoding/ConvertPtr.ll
index 3403695..93aecc5 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/ConvertPtr.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/ConvertPtr.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; kernel void testConvertPtrToU(global int *a, global unsigned long *res) {
;; res[0] = (unsigned long)&a[0];
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/DecorationAlignment.ll b/llvm/test/CodeGen/SPIRV/transcoding/DecorationAlignment.ll
index 2e9b4a4..d4fc5c3 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/DecorationAlignment.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/DecorationAlignment.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpDecorate %[[#ALIGNMENT:]] Alignment 16
; CHECK-SPIRV: %[[#ALIGNMENT]] = OpFunctionParameter %[[#]]
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/DecorationMaxByteOffset.ll b/llvm/test/CodeGen/SPIRV/transcoding/DecorationMaxByteOffset.ll
index 64f25b7..966d835 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/DecorationMaxByteOffset.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/DecorationMaxByteOffset.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpName %[[#PTR_ID:]] "ptr"
; CHECK-SPIRV: OpName %[[#PTR2_ID:]] "ptr2"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/DivRem.ll b/llvm/test/CodeGen/SPIRV/transcoding/DivRem.ll
index 2f423c2..67c3380 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/DivRem.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/DivRem.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-DAG: %[[#int:]] = OpTypeInt 32 0
; CHECK-SPIRV-DAG: %[[#int2:]] = OpTypeVector %[[#int]] 2
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/ExecutionMode_SPIR_to_SPIRV.ll b/llvm/test/CodeGen/SPIRV/transcoding/ExecutionMode_SPIR_to_SPIRV.ll
index 6d6dd24..6e8726c 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/ExecutionMode_SPIR_to_SPIRV.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/ExecutionMode_SPIR_to_SPIRV.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-DAG: OpEntryPoint Kernel %[[#WORKER:]] "worker"
; CHECK-SPIRV-DAG: OpExecutionMode %[[#WORKER]] LocalSizeHint 128 10 1
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/GlobalFunAnnotate.ll b/llvm/test/CodeGen/SPIRV/transcoding/GlobalFunAnnotate.ll
index 2796dcb..33bece5 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/GlobalFunAnnotate.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/GlobalFunAnnotate.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-linux %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpDecorate %[[#]] UserSemantic "annotation_on_function"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll b/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll
index 02d1250..e405ef0 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll
@@ -7,7 +7,7 @@
;;
;; Positive tests:
;;
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-NEGATIVE
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-NEGATIVE
;;
;; Negative tests:
;;
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_cmpxchg.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_cmpxchg.ll
index 331960c..417b89e 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_cmpxchg.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_cmpxchg.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; This test checks that the backend is capable to correctly translate
;; atomic_cmpxchg OpenCL C 1.2 built-in function [1] into corresponding SPIR-V
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_legacy.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_legacy.ll
index 95eb6ad..3180b57 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_legacy.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_legacy.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; This test checks that the backend is capable to correctly translate
;; legacy atomic OpenCL C 1.2 built-in functions [1] into corresponding SPIR-V
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_work_item_fence.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_work_item_fence.ll
index 0f3a62a..c94c130 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_work_item_fence.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_work_item_fence.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; This test checks that the backend is capable to correctly translate
;; atomic_work_item_fence OpenCL C 2.0 built-in function [1] into corresponding
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/barrier.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/barrier.ll
index a126d94..cf4a247 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/barrier.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/barrier.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; This test checks that the backend is capable to correctly translate
;; barrier OpenCL C 1.2 built-in function [1] into corresponding SPIR-V
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/sub_group_mask.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/sub_group_mask.ll
index 42b127c..5d9840d 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/sub_group_mask.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/sub_group_mask.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpCapability GroupNonUniformBallot
; CHECK-SPIRV: OpDecorate %[[#]] BuiltIn SubgroupGtMask
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/work_group_barrier.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/work_group_barrier.ll
index 0874e6f..0702fd0 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/work_group_barrier.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/work_group_barrier.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; This test checks that the backend is capable to correctly translate
;; sub_group_barrier built-in function [1] from cl_khr_subgroups extension into
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/atomic_flag.ll b/llvm/test/CodeGen/SPIRV/transcoding/atomic_flag.ll
index 3c563d3..20204ac 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/atomic_flag.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/atomic_flag.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; Types:
; CHECK-DAG: %[[#INT:]] = OpTypeInt 32
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/atomic_load_store.ll b/llvm/test/CodeGen/SPIRV/transcoding/atomic_load_store.ll
index d013abc..3e5a3ac 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/atomic_load_store.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/atomic_load_store.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; Check 'LLVM ==> SPIR-V' conversion of atomic_load and atomic_store.
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/bitcast.ll b/llvm/test/CodeGen/SPIRV/transcoding/bitcast.ll
index 8dbf4d2..2c0fc39 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/bitcast.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/bitcast.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; Check the bitcast is translated back to bitcast
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/block_w_struct_return.ll b/llvm/test/CodeGen/SPIRV/transcoding/block_w_struct_return.ll
index 5ecd7f7..2249cbe 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/block_w_struct_return.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/block_w_struct_return.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV1_4
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; TODO(#60133): Requires updates following opaque pointer migration.
; XFAIL: *
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_calls.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_calls.ll
index 9b1ce76..0a02a8b 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/builtin_calls.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_calls.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-DAG: OpDecorate %[[#Id:]] BuiltIn GlobalInvocationId
; CHECK-SPIRV-DAG: OpDecorate %[[#Id:]] BuiltIn GlobalLinearId
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars.ll
index 8286671..5074893 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpDecorate %[[#Id:]] BuiltIn GlobalLinearId
; CHECK-SPIRV: %[[#Id:]] = OpVariable %[[#]]
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_arithmetics.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_arithmetics.ll
index 22aa40c..d0c4dff 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_arithmetics.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_arithmetics.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-linux %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; The IR was generated from the following source:
;; #include <CL/sycl.hpp>
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_opt.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_opt.ll
index 5b3474f..3885f07 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_opt.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_opt.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-linux %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; The IR was generated from the following source:
;; #include <CL/sycl.hpp>
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/check_ro_qualifier.ll b/llvm/test/CodeGen/SPIRV/transcoding/check_ro_qualifier.ll
index 6de610b..824ca1b2 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/check_ro_qualifier.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/check_ro_qualifier.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: %[[#IMAGE_TYPE:]] = OpTypeImage
; CHECK-SPIRV: %[[#IMAGE_ARG:]] = OpFunctionParameter %[[#IMAGE_TYPE]]
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/cl-types.ll b/llvm/test/CodeGen/SPIRV/transcoding/cl-types.ll
index 52b7dac..d7e87c0 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/cl-types.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/cl-types.ll
@@ -19,6 +19,7 @@
;; }
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-DAG: OpCapability Sampled1D
; CHECK-SPIRV-DAG: OpCapability SampledBuffer
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/clk_event_t.ll b/llvm/test/CodeGen/SPIRV/transcoding/clk_event_t.ll
index 9054454..0cd75bb 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/clk_event_t.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/clk_event_t.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpTypeDeviceEvent
; CHECK-SPIRV: OpFunction
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/enqueue_kernel.ll b/llvm/test/CodeGen/SPIRV/transcoding/enqueue_kernel.ll
index cf124ec..d23b068 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/enqueue_kernel.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/enqueue_kernel.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; TODO(#60133): Requires updates following opaque pointer migration.
; XFAIL: *
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/explicit-conversions.ll b/llvm/test/CodeGen/SPIRV/transcoding/explicit-conversions.ll
index c186a81..49b84c1 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/explicit-conversions.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/explicit-conversions.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpSatConvertSToU
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/extract_insert_value.ll b/llvm/test/CodeGen/SPIRV/transcoding/extract_insert_value.ll
index fd29bc8..0ed1dc7 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/extract_insert_value.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/extract_insert_value.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; TODO(#60133): Requires updates following opaque pointer migration.
; XFAIL: *
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fadd.ll b/llvm/test/CodeGen/SPIRV/transcoding/fadd.ll
index 78d9a23..af76c0e 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fadd.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fadd.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpName %[[#r1:]] "r1"
; CHECK-SPIRV: OpName %[[#r2:]] "r2"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fclamp.ll b/llvm/test/CodeGen/SPIRV/transcoding/fclamp.ll
index cfdcc72..550ec1a 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fclamp.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fclamp.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: %[[#]] = OpExtInst %[[#]] %[[#]] fclamp
; CHECK-SPIRV-NOT: %[[#]] = OpExtInst %[[#]] %[[#]] clamp
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fcmp.ll b/llvm/test/CodeGen/SPIRV/transcoding/fcmp.ll
index 572ccc3..46eaba9 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fcmp.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fcmp.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpName %[[#r1:]] "r1"
; CHECK-SPIRV: OpName %[[#r2:]] "r2"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fdiv.ll b/llvm/test/CodeGen/SPIRV/transcoding/fdiv.ll
index d0ed564..79b7868 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fdiv.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fdiv.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpName %[[#r1:]] "r1"
; CHECK-SPIRV: OpName %[[#r2:]] "r2"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fmod.ll b/llvm/test/CodeGen/SPIRV/transcoding/fmod.ll
index f506787b..683b5c2 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fmod.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fmod.ll
@@ -2,6 +2,7 @@
;; { out = fmod( in1, in2 ); }
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: %[[#]] = OpExtInst %[[#]] %[[#]] fmod %[[#]] %[[#]]
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fmul.ll b/llvm/test/CodeGen/SPIRV/transcoding/fmul.ll
index 886077a..fdab29c 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fmul.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fmul.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpName %[[#r1:]] "r1"
; CHECK-SPIRV: OpName %[[#r2:]] "r2"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fneg.ll b/llvm/test/CodeGen/SPIRV/transcoding/fneg.ll
index e17601a..60bbfe6 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fneg.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fneg.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpName %[[#r1:]] "r1"
; CHECK-SPIRV: OpName %[[#r2:]] "r2"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fp_contract_reassoc_fast_mode.ll b/llvm/test/CodeGen/SPIRV/transcoding/fp_contract_reassoc_fast_mode.ll
index c035c35..974043c 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fp_contract_reassoc_fast_mode.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fp_contract_reassoc_fast_mode.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-NOT: OpCapability FPFastMathModeINTEL
; CHECK-SPIRV: OpName %[[#mu:]] "mul"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/frem.ll b/llvm/test/CodeGen/SPIRV/transcoding/frem.ll
index ecb8f6f..d36ba7f 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/frem.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/frem.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpName %[[#r1:]] "r1"
; CHECK-SPIRV: OpName %[[#r2:]] "r2"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fsub.ll b/llvm/test/CodeGen/SPIRV/transcoding/fsub.ll
index 99d0d0e..3677c00 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fsub.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fsub.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpName %[[#r1:]] "r1"
; CHECK-SPIRV: OpName %[[#r2:]] "r2"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/get_image_num_mip_levels.ll b/llvm/test/CodeGen/SPIRV/transcoding/get_image_num_mip_levels.ll
index dc307c7..fd24196 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/get_image_num_mip_levels.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/get_image_num_mip_levels.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; Types:
; CHECK-DAG: %[[#INT:]] = OpTypeInt 32
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/global_block.ll b/llvm/test/CodeGen/SPIRV/transcoding/global_block.ll
index 2f44e19..ff1bec4 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/global_block.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/global_block.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV1_4
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; TODO(#60133): Requires updates following opaque pointer migration.
; XFAIL: *
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/group_ops.ll b/llvm/test/CodeGen/SPIRV/transcoding/group_ops.ll
index 6aa9faa..2412f40 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/group_ops.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/group_ops.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-DAG: %[[#int:]] = OpTypeInt 32 0
; CHECK-SPIRV-DAG: %[[#float:]] = OpTypeFloat 32
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/isequal.ll b/llvm/test/CodeGen/SPIRV/transcoding/isequal.ll
index 3c818af..c5f3f9e 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/isequal.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/isequal.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-NOT: OpSConvert
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/relationals_double.ll b/llvm/test/CodeGen/SPIRV/transcoding/relationals_double.ll
index f771854..de7673a 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/relationals_double.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/relationals_double.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; This test checks following SYCL relational builtins with double and double2
;; types:
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/relationals_float.ll b/llvm/test/CodeGen/SPIRV/transcoding/relationals_float.ll
index 1f55ceb..69a4a30 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/relationals_float.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/relationals_float.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; This test checks following SYCL relational builtins with float and float2
;; types:
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/relationals_half.ll b/llvm/test/CodeGen/SPIRV/transcoding/relationals_half.ll
index 864fb4f..d6a7fda 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/relationals_half.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/relationals_half.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; This test checks following SYCL relational builtins with half and half2 types:
;; isfinite, isinf, isnan, isnormal, signbit, isequal, isnotequal, isgreater
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll b/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll
index 3551030..e0172ec 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll
@@ -1,6 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
;
-; CHECK-SPIRV-DAG: %[[#i8:]] = OpTypeInt 8 0
; CHECK-SPIRV-DAG: %[[#i32:]] = OpTypeInt 32 0
; CHECK-SPIRV-DAG: %[[#one:]] = OpConstant %[[#i32]] 1
; CHECK-SPIRV-DAG: %[[#two:]] = OpConstant %[[#i32]] 2
@@ -13,7 +12,6 @@
; CHECK-SPIRV: %[[#test_arr2:]] = OpVariable %[[#const_i32x3_ptr]] UniformConstant %[[#test_arr_init]]
; CHECK-SPIRV: %[[#test_arr:]] = OpVariable %[[#const_i32x3_ptr]] UniformConstant %[[#test_arr_init]]
-; CHECK-SPIRV-DAG: %[[#const_i8_ptr:]] = OpTypePointer UniformConstant %[[#i8]]
; CHECK-SPIRV-DAG: %[[#i32x3_ptr:]] = OpTypePointer Function %[[#i32x3]]
; CHECK-SPIRV: %[[#arr:]] = OpVariable %[[#i32x3_ptr]] Function
diff --git a/llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir b/llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir
index 3b308ce..adeec15b 100644
--- a/llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir
+++ b/llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir
@@ -25,6 +25,8 @@
name: autogen_SD21418
alignment: 4
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
registers:
- { id: 0, class: vr128bit }
- { id: 1, class: vr128bit }
diff --git a/llvm/test/CodeGen/SystemZ/atomic-load-06.ll b/llvm/test/CodeGen/SystemZ/atomic-load-06.ll
index c9c5504..60ff780 100644
--- a/llvm/test/CodeGen/SystemZ/atomic-load-06.ll
+++ b/llvm/test/CodeGen/SystemZ/atomic-load-06.ll
@@ -4,9 +4,7 @@
define float @f1(ptr %src) {
; CHECK-LABEL: f1:
-; CHECK: lgf [[R:%r[0-9]+]], 0(%r2)
-; CHECK: sllg [[R]], [[R]], 32
-; CHECK: ldgr %f0, [[R]]
+; CHECK: le %f0, 0(%r2)
; CHECK: br %r14
%val = load atomic float, ptr %src seq_cst, align 4
ret float %val
diff --git a/llvm/test/CodeGen/SystemZ/atomic-memops-fp128.ll b/llvm/test/CodeGen/SystemZ/atomic-memops-fp128.ll
new file mode 100644
index 0000000..8038329
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/atomic-memops-fp128.ll
@@ -0,0 +1,31 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+;
+; Test fpext of atomic loads to fp128 without VectorEnhancements1 (using FP register pairs).
+
+define fp128 @f1(ptr %src) {
+; CHECK-LABEL: f1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lxeb %f0, 0(%r3)
+; CHECK-NEXT: std %f0, 0(%r2)
+; CHECK-NEXT: std %f2, 8(%r2)
+; CHECK-NEXT: br %r14
+ %V = load atomic float, ptr %src seq_cst, align 4
+ %Res = fpext float %V to fp128
+ ret fp128 %Res
+}
+
+define fp128 @f2(ptr %src) {
+; CHECK-LABEL: f2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lxdb %f0, 0(%r3)
+; CHECK-NEXT: std %f0, 0(%r2)
+; CHECK-NEXT: std %f2, 8(%r2)
+; CHECK-NEXT: br %r14
+ %V = load atomic double, ptr %src seq_cst, align 8
+ %Res = fpext double %V to fp128
+ ret fp128 %Res
+}
+
+
+
diff --git a/llvm/test/CodeGen/SystemZ/atomic-memops.ll b/llvm/test/CodeGen/SystemZ/atomic-memops.ll
new file mode 100644
index 0000000..0bc647a
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/atomic-memops.ll
@@ -0,0 +1,739 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z16 | FileCheck %s
+
+; Sign-extending atomic loads.
+define void @f1(ptr %src, ptr %dst) {
+; CHECK-LABEL: f1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lb %r0, 0(%r2)
+; CHECK-NEXT: sth %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %s = sext i8 %b to i16
+ store volatile i16 %s, ptr %dst
+ ret void
+}
+
+define void @f2(ptr %src, ptr %dst) {
+; CHECK-LABEL: f2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lb %r0, 0(%r2)
+; CHECK-NEXT: st %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %s = sext i8 %b to i32
+ store volatile i32 %s, ptr %dst
+ ret void
+}
+
+define void @f3(ptr %src, ptr %dst) {
+; CHECK-LABEL: f3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lgb %r0, 0(%r2)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %s = sext i8 %b to i64
+ store volatile i64 %s, ptr %dst
+ ret void
+}
+
+define void @f4(ptr %src, ptr %dst) {
+; CHECK-LABEL: f4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lh %r0, 0(%r2)
+; CHECK-NEXT: st %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i16, ptr %src seq_cst, align 2
+ %s = sext i16 %b to i32
+ store volatile i32 %s, ptr %dst
+ ret void
+}
+
+define void @f5(ptr %src, ptr %dst) {
+; CHECK-LABEL: f5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lgh %r0, 0(%r2)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i16, ptr %src seq_cst, align 2
+ %s = sext i16 %b to i64
+ store volatile i64 %s, ptr %dst
+ ret void
+}
+
+define void @f6(ptr %src, ptr %dst) {
+; CHECK-LABEL: f6:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lgf %r0, 0(%r2)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i32, ptr %src seq_cst, align 4
+ %s = sext i32 %b to i64
+ store volatile i64 %s, ptr %dst
+ ret void
+}
+
+; Zero-extending atomic loads.
+define void @f7(ptr %src, ptr %dst) {
+; CHECK-LABEL: f7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: llc %r0, 0(%r2)
+; CHECK-NEXT: sth %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %z = zext i8 %b to i16
+ store volatile i16 %z, ptr %dst
+ ret void
+}
+
+define void @f8(ptr %src, ptr %dst) {
+; CHECK-LABEL: f8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: llc %r0, 0(%r2)
+; CHECK-NEXT: st %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %z = zext i8 %b to i32
+ store volatile i32 %z, ptr %dst
+ ret void
+}
+
+define void @f9(ptr %src, ptr %dst) {
+; CHECK-LABEL: f9:
+; CHECK: # %bb.0:
+; CHECK-NEXT: llgc %r0, 0(%r2)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %z = zext i8 %b to i64
+ store volatile i64 %z, ptr %dst
+ ret void
+}
+
+define void @f10(ptr %src, ptr %dst) {
+; CHECK-LABEL: f10:
+; CHECK: # %bb.0:
+; CHECK-NEXT: llh %r0, 0(%r2)
+; CHECK-NEXT: st %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i16, ptr %src seq_cst, align 2
+ %z = zext i16 %b to i32
+ store volatile i32 %z, ptr %dst
+ ret void
+}
+
+define void @f11(ptr %src, ptr %dst) {
+; CHECK-LABEL: f11:
+; CHECK: # %bb.0:
+; CHECK-NEXT: llgh %r0, 0(%r2)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i16, ptr %src seq_cst, align 2
+ %z = zext i16 %b to i64
+ store volatile i64 %z, ptr %dst
+ ret void
+}
+
+define void @f12(ptr %src, ptr %dst) {
+; CHECK-LABEL: f12:
+; CHECK: # %bb.0:
+; CHECK-NEXT: llgf %r0, 0(%r2)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i32, ptr %src seq_cst, align 4
+ %z = zext i32 %b to i64
+ store volatile i64 %z, ptr %dst
+ ret void
+}
+
+; reg/mem
+define i64 @f13(i64 %a, ptr %src) {
+; CHECK-LABEL: f13:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ag %r2, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i64, ptr %src seq_cst, align 8
+ %add = add i64 %a, %b
+ ret i64 %add
+}
+
+; reg/mem op with extension from memory.
+define i64 @f14(i64 %a, ptr %src) {
+; CHECK-LABEL: f14:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slgf %r2, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i32, ptr %src seq_cst, align 4
+ %bext = zext i32 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+define float @f15(float %f1, ptr %ptr, float %acc) {
+; CHECK-LABEL: f15:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maeb %f2, %f0, 0(%r2)
+; CHECK-NEXT: ldr %f0, %f2
+; CHECK-NEXT: br %r14
+ %f2 = load atomic float, ptr %ptr seq_cst, align 4
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
+ ret float %res
+}
+declare float @llvm.fma.f32(float %f1, float %f2, float %f3)
+
+define double @f15_b(ptr %src) {
+; CHECK-LABEL: f15_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ldeb %f0, 0(%r2)
+; CHECK-NEXT: br %r14
+ %V = load atomic float, ptr %src seq_cst, align 4
+ %Res = fpext float %V to double
+ ret double %Res
+}
+
+define fp128 @f15_c(ptr %src) {
+; CHECK-LABEL: f15_c:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lde %f0, 0(%r3)
+; CHECK-NEXT: ldebr %f0, %f0
+; CHECK-NEXT: wflld %v0, %f0
+; CHECK-NEXT: vst %v0, 0(%r2), 3
+; CHECK-NEXT: br %r14
+ %V = load atomic float, ptr %src seq_cst, align 4
+ %Res = fpext float %V to fp128
+ ret fp128 %Res
+}
+
+define fp128 @f15_d(ptr %src) {
+; CHECK-LABEL: f15_d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld %f0, 0(%r3)
+; CHECK-NEXT: wflld %v0, %f0
+; CHECK-NEXT: vst %v0, 0(%r2), 3
+; CHECK-NEXT: br %r14
+ %V = load atomic double, ptr %src seq_cst, align 8
+ %Res = fpext double %V to fp128
+ ret fp128 %Res
+}
+
+; Do it twice for good measure given the involved DAG combines.
+define void @f16(ptr %src, ptr %dst) {
+; CHECK-LABEL: f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: llgc %r0, 0(%r2)
+; CHECK-NEXT: lgbr %r1, %r0
+; CHECK-NEXT: stg %r1, 0(%r3)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: llgc %r0, 0(%r2)
+; CHECK-NEXT: lgbr %r1, %r0
+; CHECK-NEXT: stg %r1, 0(%r3)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %s = sext i8 %b to i64
+ %z = zext i8 %b to i64
+ store volatile i64 %s, ptr %dst
+ store volatile i64 %z, ptr %dst
+
+ %b2 = load atomic i8, ptr %src seq_cst, align 1
+ %s2 = sext i8 %b2 to i64
+ %z2 = zext i8 %b2 to i64
+ store volatile i64 %s2, ptr %dst
+ store volatile i64 %z2, ptr %dst
+
+ ret void
+}
+
+define void @f16_b(ptr %src, ptr %dst) {
+; CHECK-LABEL: f16_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lgb %r0, 0(%r2)
+; CHECK-NEXT: sth %r0, 0(%r3)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %s = sext i8 %b to i16
+ store volatile i16 %s, ptr %dst
+
+ %s2 = sext i8 %b to i64
+ store volatile i64 %s2, ptr %dst
+
+ ret void
+}
+
+define void @f16_c(ptr %src, ptr %dst) {
+; CHECK-LABEL: f16_c:
+; CHECK: # %bb.0:
+; CHECK-NEXT: llgc %r0, 0(%r2)
+; CHECK-NEXT: sth %r0, 0(%r3)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %z = zext i8 %b to i16
+ store volatile i16 %z, ptr %dst
+
+ %z2 = zext i8 %b to i64
+ store volatile i64 %z2, ptr %dst
+
+ ret void
+}
+
+; Check that two i8 loads use a reg/reg op.
+define i8 @f16_d(ptr %src, ptr %src2) {
+; CHECK-LABEL: f16_d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lb %r2, 0(%r2)
+; CHECK-NEXT: lb %r0, 0(%r3)
+; CHECK-NEXT: ar %r2, %r0
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %b2 = load atomic i8, ptr %src2 seq_cst, align 1
+ %add = add i8 %b, %b2
+ ret i8 %add
+}
+
+; Binary operations on a byte in memory, with an atomic load.
+define void @f17(ptr %ptr) {
+; CHECK-LABEL: f17:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ni 0(%r2), 1
+; CHECK-NEXT: br %r14
+ %val = load atomic i8, ptr %ptr seq_cst, align 1
+ %xor = and i8 %val, -255
+ store i8 %xor, ptr %ptr
+ ret void
+}
+
+define void @f18(ptr %src) {
+; CHECK-LABEL: f18:
+; CHECK: # %bb.0:
+; CHECK-NEXT: oiy 4096(%r2), 1
+; CHECK-NEXT: br %r14
+ %ptr = getelementptr i8, ptr %src, i64 4096
+ %val = load atomic i8, ptr %ptr seq_cst, align 1
+ %xor = or i8 %val, -255
+ store i8 %xor, ptr %ptr
+ ret void
+}
+
+define void @f19(ptr %src) {
+; CHECK-LABEL: f19:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xi 4095(%r2), 1
+; CHECK-NEXT: br %r14
+ %ptr = getelementptr i8, ptr %src, i64 4095
+ %val = load atomic i8, ptr %ptr seq_cst, align 1
+ %xor = xor i8 %val, -255
+ store i8 %xor, ptr %ptr
+ ret void
+}
+
+; TM
+define double @f20(ptr %src, double %a, double %b) {
+; CHECK-LABEL: f20:
+; CHECK: # %bb.0:
+; CHECK-NEXT: tm 0(%r2), 1
+; CHECK-NEXT: je .LBB25_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: ldr %f2, %f0
+; CHECK-NEXT: .LBB25_2:
+; CHECK-NEXT: ldr %f0, %f2
+; CHECK-NEXT: br %r14
+ %byte = load atomic i8, ptr %src seq_cst, align 1
+ %and = and i8 %byte, 1
+ %cmp = icmp eq i8 %and, 0
+ %res = select i1 %cmp, double %b, double %a
+ ret double %res
+}
+
+; vector load and replicate
+define void @f21(ptr %src, ptr %dst) {
+; CHECK-LABEL: f21:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vlrepb %v0, 0(%r2)
+; CHECK-NEXT: vst %v0, 0(%r3), 3
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %v = insertelement <16 x i8> undef, i8 %b, i32 1
+ store volatile <16 x i8> %v, ptr %dst
+ ret void
+}
+
+define void @f22(ptr %src, ptr %dst) {
+; CHECK-LABEL: f22:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vlreph %v0, 0(%r2)
+; CHECK-NEXT: vst %v0, 0(%r3), 3
+; CHECK-NEXT: br %r14
+ %b = load atomic i16, ptr %src seq_cst, align 2
+ %v = insertelement <8 x i16> undef, i16 %b, i32 1
+ store volatile <8 x i16> %v, ptr %dst
+ ret void
+}
+
+define void @f23(ptr %src, ptr %dst) {
+; CHECK-LABEL: f23:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vlrepf %v0, 0(%r2)
+; CHECK-NEXT: vst %v0, 0(%r3), 3
+; CHECK-NEXT: br %r14
+ %b = load atomic i32, ptr %src seq_cst, align 4
+ %v = insertelement <4 x i32> undef, i32 %b, i32 2
+ store volatile <4 x i32> %v, ptr %dst
+ ret void
+}
+
+define void @f24(ptr %src, ptr %dst) {
+; CHECK-LABEL: f24:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vlrepg %v0, 0(%r2)
+; CHECK-NEXT: vst %v0, 0(%r3), 3
+; CHECK-NEXT: br %r14
+ %b = load atomic i64, ptr %src seq_cst, align 8
+ %v = insertelement <2 x i64> undef, i64 %b, i32 0
+ store volatile <2 x i64> %v, ptr %dst
+ ret void
+}
+
+define void @f25(ptr %src, ptr %dst) {
+; CHECK-LABEL: f25:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vlrepf %v0, 0(%r2)
+; CHECK-NEXT: vst %v0, 0(%r3), 3
+; CHECK-NEXT: br %r14
+ %b = load atomic float, ptr %src seq_cst, align 4
+ %v = insertelement <4 x float> undef, float %b, i32 1
+ store volatile <4 x float> %v, ptr %dst
+ ret void
+}
+
+; Do *not* use vlrep for an extending load.
+define <4 x i32> @f25_c(ptr %ptr) {
+; CHECK-LABEL: f25_c:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lb %r0, 0(%r2)
+; CHECK-NEXT: vlvgp %v0, %r0, %r0
+; CHECK-NEXT: vrepf %v24, %v0, 1
+; CHECK-NEXT: br %r14
+ %L = load atomic i8, ptr %ptr seq_cst, align 4
+ %S = sext i8 %L to i32
+ %val = insertelement <4 x i32> undef, i32 %S, i32 0
+ %ret = shufflevector <4 x i32> %val, <4 x i32> undef,
+ <4 x i32> zeroinitializer
+ ret <4 x i32> %ret
+}
+
+; Do *not* use vlrep if there is another scalar use.
+define <4 x i32> @f25_d(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: f25_d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: l %r0, 0(%r2)
+; CHECK-NEXT: vlvgp %v0, %r0, %r0
+; CHECK-NEXT: vrepf %v24, %v0, 1
+; CHECK-NEXT: st %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %L = load atomic i32, ptr %ptr seq_cst, align 4
+ store i32 %L, ptr %dst, align 4
+ %val = insertelement <4 x i32> undef, i32 %L, i32 0
+ %ret = shufflevector <4 x i32> %val, <4 x i32> undef,
+ <4 x i32> zeroinitializer
+ ret <4 x i32> %ret
+}
+
+define void @f26(ptr %src, ptr %dst) {
+; CHECK-LABEL: f26:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vlrepg %v0, 0(%r2)
+; CHECK-NEXT: vst %v0, 0(%r3), 3
+; CHECK-NEXT: br %r14
+ %b = load atomic double, ptr %src seq_cst, align 8
+ %v = insertelement <2 x double> undef, double %b, i32 0
+ store volatile <2 x double> %v, ptr %dst
+ ret void
+}
+
+; Vector Load logical element and zero.
+define <16 x i8> @f27(ptr %ptr) {
+; CHECK-LABEL: f27:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vllezb %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load atomic i8, ptr %ptr seq_cst, align 1
+ %ret = insertelement <16 x i8> zeroinitializer, i8 %val, i32 7
+ ret <16 x i8> %ret
+}
+
+define <8 x i16> @f28(ptr %ptr) {
+; CHECK-LABEL: f28:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vllezh %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load atomic i16, ptr %ptr seq_cst, align 2
+ %ret = insertelement <8 x i16> zeroinitializer, i16 %val, i32 3
+ ret <8 x i16> %ret
+}
+
+define <4 x i32> @f29(ptr %ptr) {
+; CHECK-LABEL: f29:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vllezf %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load atomic i32, ptr %ptr seq_cst, align 4
+ %ret = insertelement <4 x i32> zeroinitializer, i32 %val, i32 1
+ ret <4 x i32> %ret
+}
+
+define <2 x i64> @f30(ptr %ptr) {
+; CHECK-LABEL: f30:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vllezg %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load atomic i64, ptr %ptr seq_cst, align 8
+ %ret = insertelement <2 x i64> zeroinitializer, i64 %val, i32 0
+ ret <2 x i64> %ret
+}
+
+define <4 x i32> @f31(ptr %ptr) {
+; CHECK-LABEL: f31:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vllezlf %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load atomic i32, ptr %ptr seq_cst, align 4
+ %ret = insertelement <4 x i32> zeroinitializer, i32 %val, i32 0
+ ret <4 x i32> %ret
+}
+
+define <4 x float> @f32(ptr %ptr) {
+; CHECK-LABEL: f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vllezlf %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load atomic float, ptr %ptr seq_cst, align 4
+ %ret = insertelement <4 x float> zeroinitializer, float %val, i32 0
+ ret <4 x float> %ret
+}
+
+; Vector Load element.
+define <16 x i8> @f33(<16 x i8> %val, ptr %ptr) {
+; CHECK-LABEL: f33:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vleb %v24, 0(%r2), 0
+; CHECK-NEXT: br %r14
+ %element = load atomic i8, ptr %ptr seq_cst, align 1
+ %ret = insertelement <16 x i8> %val, i8 %element, i32 0
+ ret <16 x i8> %ret
+}
+
+define <8 x i16> @f34(<8 x i16> %val, ptr %ptr) {
+; CHECK-LABEL: f34:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vleh %v24, 0(%r2), 0
+; CHECK-NEXT: br %r14
+ %element = load atomic i16, ptr %ptr seq_cst, align 2
+ %ret = insertelement <8 x i16> %val, i16 %element, i32 0
+ ret <8 x i16> %ret
+}
+
+define <4 x i32> @f35(<4 x i32> %val, ptr %ptr) {
+; CHECK-LABEL: f35:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vlef %v24, 0(%r2), 0
+; CHECK-NEXT: br %r14
+ %element = load atomic i32, ptr %ptr seq_cst, align 4
+ %ret = insertelement <4 x i32> %val, i32 %element, i32 0
+ ret <4 x i32> %ret
+}
+
+define <2 x i64> @f36(<2 x i64> %val, ptr %ptr) {
+; CHECK-LABEL: f36:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vleg %v24, 0(%r2), 0
+; CHECK-NEXT: br %r14
+ %element = load atomic i64, ptr %ptr seq_cst, align 8
+ %ret = insertelement <2 x i64> %val, i64 %element, i32 0
+ ret <2 x i64> %ret
+}
+
+; Test operation on memory involving atomic load and store.
+define void @f39(ptr %ptr) {
+; CHECK-LABEL: f39:
+; CHECK: # %bb.0:
+; CHECK-NEXT: oi 0(%r2), 1
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %val = load atomic i8, ptr %ptr seq_cst, align 1
+ %or = or i8 %val, -255
+ store atomic i8 %or, ptr %ptr seq_cst, align 1
+ ret void
+}
+
+; Some atomic stores of immediates.
+define void @f40(ptr %ptr) {
+; CHECK-LABEL: f40:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mvi 0(%r2), 128
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ store atomic i8 128, ptr %ptr seq_cst, align 1
+ ret void
+}
+
+define void @f41(ptr %ptr) {
+; CHECK-LABEL: f41:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mvhi 0(%r2), -1
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ store atomic i32 4294967295, ptr %ptr seq_cst, align 4
+ ret void
+}
+
+define void @f42(ptr %ptr) {
+; CHECK-LABEL: f42:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mvhi 0(%r2), -1
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ store atomic i32 4294967295, ptr %ptr seq_cst, align 4
+ ret void
+}
+
+define void @f43(ptr %ptr) {
+; CHECK-LABEL: f43:
+; CHECK: # %bb.0:
+; CHECK-NEXT: llihl %r0, 255
+; CHECK-NEXT: oilf %r0, 4294967295
+; CHECK-NEXT: stg %r0, 0(%r2)
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ store atomic i64 1099511627775, ptr %ptr seq_cst, align 8
+ ret void
+}
+
+define void @f44(ptr %ptr) {
+; CHECK-LABEL: f44:
+; CHECK: # %bb.0:
+; CHECK-NEXT: larl %r1, .LCPI49_0
+; CHECK-NEXT: ld %f0, 0(%r1)
+; CHECK-NEXT: std %f0, 0(%r2)
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ store atomic double 0x3ff0000020000000, ptr %ptr seq_cst, align 8
+ ret void
+}
+
+; Vector Store Element.
+define void @f45(<16 x i8> %val, ptr %ptr) {
+; CHECK-LABEL: f45:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsteb %v24, 0(%r2), 0
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %element = extractelement <16 x i8> %val, i32 0
+ store atomic i8 %element, ptr %ptr seq_cst, align 1
+ ret void
+}
+
+define void @f46(<8 x i16> %val, ptr %base) {
+; CHECK-LABEL: f46:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsteh %v24, 4094(%r2), 5
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %ptr = getelementptr i16, ptr %base, i32 2047
+ %element = extractelement <8 x i16> %val, i32 5
+ store atomic i16 %element, ptr %ptr seq_cst, align 2
+ ret void
+}
+
+define void @f47(<4 x i32> %val, ptr %ptr) {
+; CHECK-LABEL: f47:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vstef %v24, 0(%r2), 3
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %element = extractelement <4 x i32> %val, i32 3
+ store atomic i32 %element, ptr %ptr seq_cst, align 4
+ ret void
+}
+
+define void @f48(<2 x i64> %val, ptr %ptr) {
+; CHECK-LABEL: f48:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsteg %v24, 0(%r2), 1
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %element = extractelement <2 x i64> %val, i32 1
+ store atomic i64 %element, ptr %ptr seq_cst, align 8
+ ret void
+}
+
+define void @f49(<4 x float> %val, ptr %ptr) {
+; CHECK-LABEL: f49:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vstef %v24, 0(%r2), 0
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %element = extractelement <4 x float> %val, i32 0
+ store atomic float %element, ptr %ptr seq_cst, align 4
+ ret void
+}
+
+define void @f50(<2 x double> %val, ptr %ptr) {
+; CHECK-LABEL: f50:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsteg %v24, 0(%r2), 1
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %element = extractelement <2 x double> %val, i32 1
+ store atomic double %element, ptr %ptr seq_cst, align 8
+ ret void
+}
+
+define void @f51(ptr %src, ptr %dst) {
+; CHECK-LABEL: f51:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lpq %r0, 0(%r2)
+; CHECK-NEXT: vlvgp %v0, %r0, %r1
+; CHECK-NEXT: vgmf %v1, 2, 8
+; CHECK-NEXT: aebr %f0, %f1
+; CHECK-NEXT: ste %f0, 0(%r3)
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %atomic-load = load atomic i128, ptr %src seq_cst, align 16
+ %b0 = bitcast i128 %atomic-load to <4 x float>
+ %vecext = extractelement <4 x float> %b0, i64 0
+ %add = fadd float %vecext, 1.000000e+00
+ store atomic float %add, ptr %dst seq_cst, align 4
+ ret void
+}
+
+define void @f52(ptr %src, ptr %dst) {
+; CHECK-LABEL: f52:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lpq %r0, 0(%r2)
+; CHECK-NEXT: vlvgp %v0, %r0, %r1
+; CHECK-NEXT: vgmg %v1, 2, 11
+; CHECK-NEXT: adbr %f0, %f1
+; CHECK-NEXT: std %f0, 0(%r3)
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %atomic-load = load atomic i128, ptr %src seq_cst, align 16
+ %b0 = bitcast i128 %atomic-load to <2 x double>
+ %vecext = extractelement <2 x double> %b0, i64 0
+ %add = fadd double %vecext, 1.000000e+00
+ store atomic double %add, ptr %dst seq_cst, align 8
+ ret void
+}
+
+define void @fun58(ptr %ptr, i64 %arg) {
+; CHECK-LABEL: fun58:
+; CHECK: # %bb.0:
+; CHECK-NEXT: st %r3, 0(%r2)
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %res = trunc i64 %arg to i32
+ store atomic i32 %res, ptr %ptr seq_cst, align 4
+ ret void
+}
diff --git a/llvm/test/CodeGen/SystemZ/atomic-store-06.ll b/llvm/test/CodeGen/SystemZ/atomic-store-06.ll
index b748bfc..91e324b 100644
--- a/llvm/test/CodeGen/SystemZ/atomic-store-06.ll
+++ b/llvm/test/CodeGen/SystemZ/atomic-store-06.ll
@@ -6,10 +6,7 @@
define void @f1(ptr %src, float %val) {
; CHECK-LABEL: f1:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $f0s killed $f0s def $f0d
-; CHECK-NEXT: lgdr %r0, %f0
-; CHECK-NEXT: srlg %r0, %r0, 32
-; CHECK-NEXT: st %r0, 0(%r2)
+; CHECK-NEXT: ste %f0, 0(%r2)
; CHECK-NEXT: bcr 15, %r0
; CHECK-NEXT: br %r14
store atomic float %val, ptr %src seq_cst, align 4
diff --git a/llvm/test/CodeGen/SystemZ/call-zos-01.ll b/llvm/test/CodeGen/SystemZ/call-zos-01.ll
index 7777686..fc7a85c 100644
--- a/llvm/test/CodeGen/SystemZ/call-zos-01.ll
+++ b/llvm/test/CodeGen/SystemZ/call-zos-01.ll
@@ -104,7 +104,7 @@ entry:
}
; CHECK-LABEL: call_double:
-; CHECK: larl [[GENREG:[0-9]+]], @{{CPI[0-9]+_[0-9]+}}
+; CHECK: larl [[GENREG:[0-9]+]], L#{{CPI[0-9]+_[0-9]+}}
; CHECK-NEXT: ld 0, 0([[GENREG]])
define double @call_double() {
entry:
@@ -113,7 +113,7 @@ entry:
}
; CHECK-LABEL: call_longdouble:
-; CHECK: larl [[GENREG:[0-9]+]], @{{CPI[0-9]+_[0-9]+}}
+; CHECK: larl [[GENREG:[0-9]+]], L#{{CPI[0-9]+_[0-9]+}}
; CHECK-NEXT: ld 0, 0([[GENREG]])
; CHECK-NEXT: ld 2, 8([[GENREG]])
define fp128 @call_longdouble() {
@@ -123,7 +123,7 @@ entry:
}
; CHECK-LABEL: call_floats0
-; CHECK: larl [[GENREG:[0-9]+]], @{{CPI[0-9]+_[0-9]+}}
+; CHECK: larl [[GENREG:[0-9]+]], L#{{CPI[0-9]+_[0-9]+}}
; CHECK-NEXT: ld 1, 0([[GENREG]])
; CHECK-NEXT: ld 3, 8([[GENREG]])
; CHECK: lxr 5, 0
@@ -146,7 +146,7 @@ entry:
}
; CHECK-LABEL: pass_float:
-; CHECK: larl 1, @{{CPI[0-9]+_[0-9]+}}
+; CHECK: larl 1, L#{{CPI[0-9]+_[0-9]+}}
; CHECK: aeb 0, 0(1)
define float @pass_float(float %arg) {
entry:
@@ -155,7 +155,7 @@ entry:
}
; CHECK-LABEL: pass_double:
-; CHECK: larl 1, @{{CPI[0-9]+_[0-9]+}}
+; CHECK: larl 1, L#{{CPI[0-9]+_[0-9]+}}
; CHECK: adb 0, 0(1)
define double @pass_double(double %arg) {
entry:
@@ -164,7 +164,7 @@ entry:
}
; CHECK-LABEL: pass_longdouble
-; CHECK: larl 1, @{{CPI[0-9]+_[0-9]+}}
+; CHECK: larl 1, L#{{CPI[0-9]+_[0-9]+}}
; CHECK: lxdb 1, 0(1)
; CHECK: axbr 0, 1
define fp128 @pass_longdouble(fp128 %arg) {
@@ -174,7 +174,7 @@ entry:
}
; CHECK-LABEL: pass_floats0
-; CHECK: larl 1, @{{CPI[0-9]+_[0-9]+}}
+; CHECK: larl 1, L#{{CPI[0-9]+_[0-9]+}}
; CHECK: axbr 0, 4
; CHECK: axbr 1, 0
; CHECK: cxbr 1, 5
diff --git a/llvm/test/CodeGen/SystemZ/call-zos-i128.ll b/llvm/test/CodeGen/SystemZ/call-zos-i128.ll
index ccdac16..7754833 100644
--- a/llvm/test/CodeGen/SystemZ/call-zos-i128.ll
+++ b/llvm/test/CodeGen/SystemZ/call-zos-i128.ll
@@ -3,10 +3,10 @@
; RUN: llc < %s -mtriple=s390x-ibm-zos -mcpu=z13 | FileCheck %s
; CHECK-LABEL: call_i128:
-; CHECK-DAG: larl 1, @CPI0_0
+; CHECK-DAG: larl 1, L#CPI0_0
; CHECK-DAG: vl 0, 0(1), 3
; CHECK-DAG: vst 0, 2256(4), 3
-; CHECK-DAG: larl 1, @CPI0_1
+; CHECK-DAG: larl 1, L#CPI0_1
; CHECK-DAG: vl 0, 0(1), 3
; CHECK-DAG: vst 0, 2272(4), 3
; CHECK-DAG: la 1, 2288(4)
diff --git a/llvm/test/CodeGen/SystemZ/call-zos-vararg.ll b/llvm/test/CodeGen/SystemZ/call-zos-vararg.ll
index bde59a6..81aedc1 100644
--- a/llvm/test/CodeGen/SystemZ/call-zos-vararg.ll
+++ b/llvm/test/CodeGen/SystemZ/call-zos-vararg.ll
@@ -88,13 +88,15 @@ entry:
ret i64 %retval
}
+;; TODO: The extra COPY after LGDR is unnecessary (machine-scheduler introduces the overlap).
; CHECK-LABEL: call_vararg_both0:
; CHECK: stmg 6, 7, 1872(4)
; CHECK-NEXT: aghi 4, -192
; CHECK-NEXT: lg 6, 40(5)
; CHECK-NEXT: lg 5, 32(5)
+; CHECK-NEXT: lgdr 0, 0
; CHECK-NEXT: lgr 2, 1
-; CHECK-NEXT: lgdr 1, 0
+; CHECK-NEXT: lgr 1, 0
; CHECK-NEXT: basr 7, 6
; CHECK-NEXT: bcr 0, 0
; CHECK-NEXT: lg 7, 2072(4)
@@ -108,7 +110,7 @@ define i64 @call_vararg_both0(i64 %arg0, double %arg1) {
; CHECK-LABEL: call_vararg_long_double0:
; CHECK: stmg 6, 7, 1872(4)
; CHECK-NEXT: aghi 4, -192
-; CHECK-NEXT: larl 1, @CPI5_0
+; CHECK-NEXT: larl 1, L#CPI5_0
; CHECK-NEXT: ld 0, 0(1)
; CHECK-NEXT: ld 2, 8(1)
; CHECK-NEXT: lg 6, 8(5)
@@ -202,7 +204,7 @@ define void @call_vec_vararg_test0(<2 x double> %v) {
}
; ARCH12-LABEL: call_vec_vararg_test1
-; ARCH12: larl 1, @CPI10_0
+; ARCH12: larl 1, L#CPI10_0
; ARCH12: vl 0, 0(1), 3
; ARCH12: vlgvg 3, 24, 0
; ARCH12: vrepg 2, 0, 1
@@ -294,7 +296,7 @@ entry:
; CHECK-NEXT: aghi 4, -192
; CHECK-NEXT: lg 6, 72(5)
; CHECK-NEXT: lg 5, 64(5)
-; CHECK-NEXT: larl 1, @CPI17_0
+; CHECK-NEXT: larl 1, L#CPI17_0
; CHECK-NEXT: le 0, 0(1)
; CHECK-NEXT: llihf 0, 1073692672
; CHECK-NEXT: llihh 2, 16384
diff --git a/llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir b/llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir
index 7ff7d9b..197c3d8 100644
--- a/llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir
+++ b/llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir
@@ -157,6 +157,7 @@ registers:
- { id: 129, class: grx32bit }
- { id: 130, class: fp64bit }
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0:
diff --git a/llvm/test/CodeGen/SystemZ/cond-move-04.mir b/llvm/test/CodeGen/SystemZ/cond-move-04.mir
index 97aa00f..ab4a14c 100644
--- a/llvm/test/CodeGen/SystemZ/cond-move-04.mir
+++ b/llvm/test/CodeGen/SystemZ/cond-move-04.mir
@@ -53,6 +53,7 @@ registers:
- { id: 10, class: gr64bit }
- { id: 11, class: gr32bit }
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0 (%ir-block.1):
@@ -64,12 +65,10 @@ body: |
CHIMux %3, 0, implicit-def $cc
%0 = LOCRMux undef %0, %5, 14, 6, implicit $cc
%0 = LOCRMux %0, %2, 14, 6, implicit killed $cc
- ADJCALLSTACKDOWN 0, 0
%7 = LGFR %0
$r3d = LGHI 0
$r4d = COPY %7
CallBRASL @foo, undef $r2d, killed $r3d, killed $r4d, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc, implicit-def dead $r2d
- ADJCALLSTACKUP 0, 0
J %bb.1
...
diff --git a/llvm/test/CodeGen/SystemZ/cond-move-08.mir b/llvm/test/CodeGen/SystemZ/cond-move-08.mir
index 93aa5626..2ea67dc 100644
--- a/llvm/test/CodeGen/SystemZ/cond-move-08.mir
+++ b/llvm/test/CodeGen/SystemZ/cond-move-08.mir
@@ -116,6 +116,7 @@ registers:
- { id: 27, class: grx32bit }
- { id: 28, class: addr64bit }
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0.bb5:
@@ -154,9 +155,7 @@ body: |
J %bb.4
bb.4.bb33:
- ADJCALLSTACKDOWN 0, 0
CallBRASL @fun, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc
- ADJCALLSTACKUP 0, 0
STRL %4, @globvar :: (store (s32) into @globvar)
CLFIMux undef %23:grx32bit, 1, implicit-def $cc
%25:grx32bit = LHIMux 0
diff --git a/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir b/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir
index 37e2980..8a7929c 100644
--- a/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir
+++ b/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir
@@ -30,6 +30,7 @@ registers:
- { id: 11, class: gr32bit }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
@@ -44,11 +45,9 @@ body: |
%11:gr32bit = SELRMux %8, %9:grx32bit, 14, 6, implicit killed $cc
CHIMux %6, 2, implicit-def $cc
%0:gr32bit = SELRMux %11, %5, 14, 8, implicit killed $cc
- ADJCALLSTACKDOWN 0, 0
%10:gr64bit = LGFR %0
$r2d = COPY %10
CallBRASL @foo, killed $r2d, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc, implicit $fpc
- ADJCALLSTACKUP 0, 0
J %bb.1
...
diff --git a/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir b/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir
index e7e1eaf..009fd6c 100644
--- a/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir
+++ b/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir
@@ -192,6 +192,7 @@ liveins:
- { reg: '$r2d', virtual-reg: '%31' }
- { reg: '$r3d', virtual-reg: '%32' }
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0.bb:
@@ -199,18 +200,12 @@ body: |
%32:gr64bit = COPY $r3d
%0:gr64bit = COPY $r2d
- ADJCALLSTACKDOWN 0, 0
CallBRASL @sre_malloc, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc, implicit-def $r2d
%1:addr64bit = COPY $r2d
- ADJCALLSTACKUP 0, 0
- ADJCALLSTACKDOWN 0, 0
CallBRASL @sre_malloc, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc, implicit-def $r2d
%2:addr64bit = COPY $r2d
- ADJCALLSTACKUP 0, 0
%3:gr32bit = AHIMuxK %0.subreg_l32, -1, implicit-def dead $cc
- ADJCALLSTACKDOWN 0, 0
CallBRASL @malloc, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc
- ADJCALLSTACKUP 0, 0
%55:gr32bit = AHIMuxK %0.subreg_l32, 3, implicit-def dead $cc
%56:addr64bit = LGHI 0
%57:gr64bit = COPY %0
diff --git a/llvm/test/CodeGen/SystemZ/frame-28.mir b/llvm/test/CodeGen/SystemZ/frame-28.mir
index dd5933a..254b8a2c 100644
--- a/llvm/test/CodeGen/SystemZ/frame-28.mir
+++ b/llvm/test/CodeGen/SystemZ/frame-28.mir
@@ -162,6 +162,8 @@ body: |
---
name: fun4
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
stack:
- { id: 0, size: 5000 }
- { id: 1, size: 2500 }
@@ -177,9 +179,7 @@ body: |
VST64 renamable $f16d, %stack.0, 0, $noreg
VST64 renamable $f16d, %stack.0, 0, $noreg
VST64 renamable $f16d, %stack.1, 0, $noreg
- ADJCALLSTACKDOWN 0, 0
CallBRASL @foo, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc, implicit $fpc, implicit-def $r2l
- ADJCALLSTACKUP 0, 0
$f17d = IMPLICIT_DEF
VST64 renamable $f17d, %stack.1, 0, $noreg
Return
diff --git a/llvm/test/CodeGen/SystemZ/frame-adjstack.ll b/llvm/test/CodeGen/SystemZ/frame-adjstack.ll
new file mode 100644
index 0000000..7edacaa
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/frame-adjstack.ll
@@ -0,0 +1,16 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu -verify-machineinstrs | FileCheck %s
+;
+; Test that inserting a new MBB near a call during finalize isel custom
+; insertion does not cause all frame instructions to be missed. That would
+; result in a missing to set the AdjustsStack flag.
+
+; CHECK-LABEL: fun
+define void @fun(i1 %cc) {
+ %sel = select i1 %cc, i32 5, i32 0
+ tail call void @input_report_abs(i32 %sel)
+ %sel2 = select i1 %cc, i32 6, i32 1
+ tail call void @input_report_abs(i32 %sel2)
+ ret void
+}
+
+declare void @input_report_abs(i32)
diff --git a/llvm/test/CodeGen/SystemZ/int-cmp-56.mir b/llvm/test/CodeGen/SystemZ/int-cmp-56.mir
index e52fd44..3e00b60 100644
--- a/llvm/test/CodeGen/SystemZ/int-cmp-56.mir
+++ b/llvm/test/CodeGen/SystemZ/int-cmp-56.mir
@@ -48,6 +48,7 @@ liveins:
- { reg: '$r2d', virtual-reg: '%0' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
@@ -125,6 +126,7 @@ liveins:
- { reg: '$r2d', virtual-reg: '%0' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
@@ -202,6 +204,7 @@ liveins:
- { reg: '$r2d', virtual-reg: '%0' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
@@ -279,6 +282,7 @@ liveins:
- { reg: '$r2d', virtual-reg: '%0' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/CodeGen/SystemZ/int-usub-12.ll b/llvm/test/CodeGen/SystemZ/int-usub-12.ll
index c39a6da..147fbfd 100644
--- a/llvm/test/CodeGen/SystemZ/int-usub-12.ll
+++ b/llvm/test/CodeGen/SystemZ/int-usub-12.ll
@@ -11,6 +11,7 @@ define zeroext i1 @f1(i128 %a, i128 %b, ptr %res) {
; CHECK-NEXT: vscbiq %v2, %v1, %v0
; CHECK-NEXT: vlgvg %r2, %v2, 1
; CHECK-NEXT: vsq %v0, %v1, %v0
+; CHECK-NEXT: xilf %r2, 1
; CHECK-NEXT: vst %v0, 0(%r4), 3
; CHECK-NEXT: br %r14
%t = call {i128, i1} @llvm.usub.with.overflow.i128(i128 %a, i128 %b)
@@ -27,6 +28,7 @@ define zeroext i1 @f2(i128 %a, i128 %b) {
; CHECK-NEXT: vl %v1, 0(%r2), 3
; CHECK-NEXT: vscbiq %v0, %v1, %v0
; CHECK-NEXT: vlgvg %r2, %v0, 1
+; CHECK-NEXT: xilf %r2, 1
; CHECK-NEXT: br %r14
%t = call {i128, i1} @llvm.usub.with.overflow.i128(i128 %a, i128 %b)
%obit = extractvalue {i128, i1} %t, 1
@@ -46,5 +48,25 @@ define i128 @f3(i128 %a, i128 %b) {
ret i128 %val
}
+define i128 @f4(i128 %a, i128 %b) {
+; CHECK-LABEL: f4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vl %v0, 0(%r4), 3
+; CHECK-NEXT: vl %v1, 0(%r3), 3
+; CHECK-NEXT: vscbiq %v2, %v1, %v0
+; CHECK-NEXT: vlgvf %r0, %v2, 3
+; CHECK-NEXT: vgbm %v2, 0
+; CHECK-NEXT: xilf %r0, 1
+; CHECK-NEXT: jl .LBB3_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: vsq %v2, %v1, %v0
+; CHECK-NEXT: .LBB3_2:
+; CHECK-NEXT: vst %v2, 0(%r2), 3
+; CHECK-NEXT: br %r14
+ %val = call i128 @llvm.usub.sat.i128(i128 %a, i128 %b)
+ ret i128 %val
+}
+
declare {i128, i1} @llvm.usub.with.overflow.i128(i128, i128) nounwind readnone
+declare i128 @llvm.usub.sat.i128(i128, i128) nounwind readnone
diff --git a/llvm/test/CodeGen/SystemZ/int-usub-13.ll b/llvm/test/CodeGen/SystemZ/int-usub-13.ll
index 637e1a8..794af3b 100644
--- a/llvm/test/CodeGen/SystemZ/int-usub-13.ll
+++ b/llvm/test/CodeGen/SystemZ/int-usub-13.ll
@@ -15,6 +15,7 @@ define zeroext i1 @f1(i256 %a, i256 %b, ptr %res) {
; CHECK-NEXT: vlgvg %r2, %v5, 1
; CHECK-NEXT: vsbiq %v0, %v1, %v0, %v4
; CHECK-NEXT: vsq %v1, %v3, %v2
+; CHECK-NEXT: xilf %r2, 1
; CHECK-NEXT: vst %v1, 16(%r4), 3
; CHECK-NEXT: vst %v0, 0(%r4), 3
; CHECK-NEXT: br %r14
@@ -35,6 +36,7 @@ define zeroext i1 @f2(i256 %a, i256 %b) {
; CHECK-NEXT: vscbiq %v2, %v3, %v2
; CHECK-NEXT: vsbcbiq %v0, %v1, %v0, %v2
; CHECK-NEXT: vlgvg %r2, %v0, 1
+; CHECK-NEXT: xilf %r2, 1
; CHECK-NEXT: br %r14
%t = call {i256, i1} @llvm.usub.with.overflow.i256(i256 %a, i256 %b)
%obit = extractvalue {i256, i1} %t, 1
diff --git a/llvm/test/CodeGen/SystemZ/readcyclecounter.ll b/llvm/test/CodeGen/SystemZ/readcyclecounter.ll
new file mode 100644
index 0000000..34b6d34
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/readcyclecounter.ll
@@ -0,0 +1,27 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=s390x-ibm-linux | FileCheck %s
+
+; Verify that we correctly lower ISD::READCYCLECOUNTER.
+
+define i64 @test_builtin_readcyclecounter1() {
+; CHECK-LABEL: test_builtin_readcyclecounter1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: aghi %r15, -168
+; CHECK-NEXT: .cfi_def_cfa_offset 328
+; CHECK-NEXT: stckf 160(%r15)
+; CHECK-NEXT: lg %r2, 160(%r15)
+; CHECK-NEXT: aghi %r15, 168
+; CHECK-NEXT: br %r14
+ %1 = tail call i64 @llvm.readcyclecounter()
+ ret i64 %1
+}
+
+define void @test_builtin_readcyclecounter2(ptr %ptr) {
+; CHECK-LABEL: test_builtin_readcyclecounter2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: stckf 0(%r2)
+; CHECK-NEXT: br %r14
+ %1 = tail call i64 @llvm.readcyclecounter()
+ store i64 %1, ptr %ptr
+ ret void
+}
diff --git a/llvm/test/CodeGen/SystemZ/regcoal-subranges-update.mir b/llvm/test/CodeGen/SystemZ/regcoal-subranges-update.mir
index f709b70..bf58550 100644
--- a/llvm/test/CodeGen/SystemZ/regcoal-subranges-update.mir
+++ b/llvm/test/CodeGen/SystemZ/regcoal-subranges-update.mir
@@ -49,6 +49,8 @@ body: |
---
name: segfault
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
liveins: []
body: |
; CHECK-LABEL: name: segfault
diff --git a/llvm/test/CodeGen/SystemZ/swifterror.ll b/llvm/test/CodeGen/SystemZ/swifterror.ll
index 3ea29f1..1b18287 100644
--- a/llvm/test/CodeGen/SystemZ/swifterror.ll
+++ b/llvm/test/CodeGen/SystemZ/swifterror.ll
@@ -30,8 +30,8 @@ entry:
define float @caller(ptr %error_ref) {
; CHECK-LABEL: caller:
; Make a copy of error_ref because r2 is getting clobbered
-; CHECK: lgr %r[[REG1:[0-9]+]], %r2
-; CHECK: lghi %r9, 0
+; CHECK-DAG: lgr %r[[REG1:[0-9]+]], %r2
+; CHECK-DAG: lghi %r9, 0
; CHECK: brasl %r14, foo
; CHECK: %r2, %r9
; CHECK: jlh
@@ -197,7 +197,7 @@ define void @foo_sret(ptr sret(%struct.S) %agg.result, i32 %val1, ptr swifterror
; CHECK-LABEL: foo_sret:
; CHECK-DAG: lgr %r[[REG1:[0-9]+]], %r2
; CHECK-DAG: lr %r[[REG2:[0-9]+]], %r3
-; CHECK: lghi %r2, 16
+; CHECK-DAG: lghi %r2, 16
; CHECK: brasl %r14, malloc
; CHECK: mvi 8(%r2), 1
; CHECK: st %r[[REG2]], 4(%r[[REG1]])
@@ -280,7 +280,7 @@ define float @caller_with_multiple_swifterror_values(ptr %error_ref, ptr %error_
; CHECK-DAG: lgr %r[[REG1:[0-9]+]], %r2
; CHECK-DAG: lgr %r[[REG2:[0-9]+]], %r3
; The first swifterror value:
-; CHECK: lghi %r9, 0
+; CHECK-DAG: lghi %r9, 0
; CHECK: brasl %r14, foo
; CHECK: ltgr %r2, %r9
; CHECK: jlh
diff --git a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
index 69e1c2f..9d77744 100644
--- a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
@@ -1649,8 +1649,8 @@ define <2 x double> @constrained_vector_powi_v2f64() #0 {
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: larl %r1, .LCPI36_1
; S390X-NEXT: ld %f1, 0(%r1)
-; S390X-NEXT: ldr %f8, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ldr %f8, %f0
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: ldr %f2, %f8
@@ -1707,14 +1707,14 @@ define <3 x float> @constrained_vector_powi_v3f32() #0 {
; S390X-NEXT: brasl %r14, __powisf2@PLT
; S390X-NEXT: larl %r1, .LCPI37_1
; S390X-NEXT: le %f1, 0(%r1)
-; S390X-NEXT: ler %f8, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ler %f8, %f0
; S390X-NEXT: ler %f0, %f1
; S390X-NEXT: brasl %r14, __powisf2@PLT
; S390X-NEXT: larl %r1, .LCPI37_2
; S390X-NEXT: le %f1, 0(%r1)
-; S390X-NEXT: ler %f9, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ler %f9, %f0
; S390X-NEXT: ler %f0, %f1
; S390X-NEXT: brasl %r14, __powisf2@PLT
; S390X-NEXT: ler %f2, %f9
@@ -1784,14 +1784,14 @@ define void @constrained_vector_powi_v3f64(ptr %a) #0 {
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: larl %r1, .LCPI38_1
; S390X-NEXT: ld %f1, 0(%r1)
-; S390X-NEXT: ldr %f8, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ldr %f8, %f0
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: larl %r1, .LCPI38_2
; S390X-NEXT: ld %f1, 0(%r1)
-; S390X-NEXT: ldr %f9, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ldr %f9, %f0
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: std %f0, 16(%r13)
@@ -1865,20 +1865,20 @@ define <4 x double> @constrained_vector_powi_v4f64() #0 {
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: larl %r1, .LCPI39_1
; S390X-NEXT: ld %f1, 0(%r1)
-; S390X-NEXT: ldr %f8, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ldr %f8, %f0
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: larl %r1, .LCPI39_2
; S390X-NEXT: ld %f1, 0(%r1)
-; S390X-NEXT: ldr %f9, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ldr %f9, %f0
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: larl %r1, .LCPI39_3
; S390X-NEXT: ld %f1, 0(%r1)
-; S390X-NEXT: ldr %f10, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ldr %f10, %f0
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: ldr %f2, %f10
diff --git a/llvm/test/CodeGen/SystemZ/zos-ada-relocations.ll b/llvm/test/CodeGen/SystemZ/zos-ada-relocations.ll
index e252469..db67ac5 100644
--- a/llvm/test/CodeGen/SystemZ/zos-ada-relocations.ll
+++ b/llvm/test/CodeGen/SystemZ/zos-ada-relocations.ll
@@ -56,9 +56,9 @@ entry:
declare signext i32 @callout(i32 signext)
; CHECK: .section ".ada"
-; CHECK: .set @@DoFunc@indirect0, DoFunc
-; CHECK: .indirect_symbol @@DoFunc@indirect0
-; CHECK: .quad V(@@DoFunc@indirect0) * Offset 0 pointer to function descriptor DoFunc
+; CHECK: .set L#DoFunc@indirect0, DoFunc
+; CHECK: .indirect_symbol L#DoFunc@indirect0
+; CHECK: .quad V(L#DoFunc@indirect0) * Offset 0 pointer to function descriptor DoFunc
; CHECK: .quad R(Caller) * Offset 8 function descriptor of Caller
; CHECK: .quad V(Caller)
; CHECK: .quad A(i2) * Offset 24 pointer to data symbol i2
diff --git a/llvm/test/CodeGen/SystemZ/zos-landingpad.ll b/llvm/test/CodeGen/SystemZ/zos-landingpad.ll
index 7f3214d..9db1011 100644
--- a/llvm/test/CodeGen/SystemZ/zos-landingpad.ll
+++ b/llvm/test/CodeGen/SystemZ/zos-landingpad.ll
@@ -19,7 +19,7 @@ done:
lpad:
%0 = landingpad { ptr, i32 } cleanup
; The Exception Pointer is %r1; the Exception Selector, %r2.
-; CHECK: @BB{{[^%]*}} %lpad
+; CHECK: L#BB{{[^%]*}} %lpad
; CHECK-DAG: stg 1, {{.*}}
; CHECK-DAG: st 2, {{.*}}
%1 = extractvalue { ptr, i32 } %0, 0
diff --git a/llvm/test/CodeGen/SystemZ/zos-ppa2.ll b/llvm/test/CodeGen/SystemZ/zos-ppa2.ll
index 60580ae..189b5a3 100644
--- a/llvm/test/CodeGen/SystemZ/zos-ppa2.ll
+++ b/llvm/test/CodeGen/SystemZ/zos-ppa2.ll
@@ -2,24 +2,24 @@
; REQUIRES: systemz-registered-target
; CHECK: .section ".ppa2"
-; CHECK: @@PPA2:
+; CHECK: L#PPA2:
; CHECK: .byte 3
; CHECK: .byte 231
; CHECK: .byte 34
; CHECK: .byte 4
-; CHECK: .long CELQSTRT-@@PPA2
+; CHECK: .long CELQSTRT-L#PPA2
; CHECK: .long 0
-; CHECK: .long @@DVS-@@PPA2
+; CHECK: .long L#DVS-L#PPA2
; CHECK: .long 0
; CHECK: .byte 129
; CHECK: .byte 0
; CHECK: .short 0
-; CHECK: @@DVS:
+; CHECK: L#DVS:
; CHECK: .ascii "\361\371\367\360\360\361\360\361\360\360\360\360\360\360"
; CHECK: .short 0
-; CHECK: .quad @@PPA2-CELQSTRT * A(PPA2-CELQSTRT)
-; CHECK: @@PPA1_void_test_0:
-; CHECK: .long @@PPA2-@@PPA1_void_test_0 * Offset to PPA2
+; CHECK: .quad L#PPA2-CELQSTRT * A(PPA2-CELQSTRT)
+; CHECK: L#PPA1_void_test_0:
+; CHECK: .long L#PPA2-L#PPA1_void_test_0 * Offset to PPA2
; CHECK: .section "B_IDRL"
; CHECK: .byte 0
; CHECK: .byte 3
diff --git a/llvm/test/CodeGen/SystemZ/zos-prologue-epilog.ll b/llvm/test/CodeGen/SystemZ/zos-prologue-epilog.ll
index 8c04116..d3e5823 100644
--- a/llvm/test/CodeGen/SystemZ/zos-prologue-epilog.ll
+++ b/llvm/test/CodeGen/SystemZ/zos-prologue-epilog.ll
@@ -15,7 +15,7 @@
; CHECK64: aghi 4, 192
; CHECK64: b 2(7)
-; CHECK64: @@PPA1_func0_0:
+; CHECK64: L#PPA1_func0_0:
; CHECK64: .short 0 * Length/4 of Parms
define void @func0() {
call i64 (i64) @fun(i64 10)
@@ -31,7 +31,7 @@ define void @func0() {
; CHECK64: aghi 4, 160
; CHECK64: b 2(7)
-; CHECK64: @@PPA1_func1_0:
+; CHECK64: L#PPA1_func1_0:
; CHECK64: .short 2 * Length/4 of Parms
define void @func1(ptr %ptr) {
%l01 = load volatile i64, ptr %ptr
@@ -336,16 +336,16 @@ define void @large_stack0() {
; CHECK64: lgr 0, 3
; CHECK64: llgt 3, 1208
; CHECK64: cg 4, 64(3)
-; CHECK64: jhe @BB7_2
+; CHECK64: jhe L#BB7_2
; CHECK64: %bb.1:
; CHECK64: lg 3, 72(3)
; CHECK64: basr 3, 3
; CHECK64: bcr 0, 7
-; CHECK64: @BB7_2:
+; CHECK64: L#BB7_2:
; CHECK64: stmg 6, 7, 2064(4)
; CHECK64: lgr 3, 0
-; CHECK64: @@PPA1_large_stack1_0:
+; CHECK64: L#PPA1_large_stack1_0:
; CHECK64: .short 6 * Length/4 of Parms
define void @large_stack1(i64 %n1, i64 %n2, i64 %n3) {
%arr = alloca [131072 x i64], align 8
@@ -361,12 +361,12 @@ define void @large_stack1(i64 %n1, i64 %n2, i64 %n3) {
; CHECK64: agfi 4, -1048768
; CHECK64: llgt 3, 1208
; CHECK64: cg 4, 64(3)
-; CHECK64: jhe @BB8_2
+; CHECK64: jhe L#BB8_2
; CHECK64: %bb.1:
; CHECK64: lg 3, 72(3)
; CHECK64: basr 3, 3
; CHECK64: bcr 0, 7
-; CHECK64: @BB8_2:
+; CHECK64: L#BB8_2:
; CHECK64: lgr 3, 0
; CHECK64: lg 3, 2192(3)
; CHECK64: stmg 4, 12, 2048(4)
diff --git a/llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll b/llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll
index 767b702..a0f8374 100644
--- a/llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll
+++ b/llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll
@@ -42,9 +42,8 @@ define i64 @loopif(ptr nocapture readonly %x, i32 %y, i32 %n) {
; CHECK-NEXT: cmp r2, #1
; CHECK-NEXT: blt .LBB1_4
; CHECK-NEXT: @ %bb.1: @ %for.body.lr.ph
-; CHECK-NEXT: mov lr, r2
-; CHECK-NEXT: mov r12, r0
; CHECK-NEXT: dls lr, r2
+; CHECK-NEXT: mov r12, r0
; CHECK-NEXT: movs r0, #0
; CHECK-NEXT: movs r3, #0
; CHECK-NEXT: .p2align 2
diff --git a/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll b/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
index 4ab5697..93cab25 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
@@ -542,9 +542,7 @@ define arm_aapcs_vfpcc void @gather_inc_v8i16_simple(ptr noalias nocapture reado
; CHECK-NEXT: .pad #28
; CHECK-NEXT: sub sp, #28
; CHECK-NEXT: cmp r2, #1
-; CHECK-NEXT: str r1, [sp, #4] @ 4-byte Spill
-; CHECK-NEXT: mov r1, r2
-; CHECK-NEXT: str r2, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT: strd r1, r2, [sp, #4] @ 8-byte Folded Spill
; CHECK-NEXT: blt .LBB11_5
; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader
; CHECK-NEXT: ldr r1, [sp, #8] @ 4-byte Reload
@@ -661,9 +659,7 @@ define arm_aapcs_vfpcc void @gather_inc_v8i16_complex(ptr noalias nocapture read
; CHECK-NEXT: .pad #136
; CHECK-NEXT: sub sp, #136
; CHECK-NEXT: cmp r2, #1
-; CHECK-NEXT: str r1, [sp, #64] @ 4-byte Spill
-; CHECK-NEXT: mov r1, r2
-; CHECK-NEXT: str r2, [sp, #68] @ 4-byte Spill
+; CHECK-NEXT: strd r1, r2, [sp, #64] @ 8-byte Folded Spill
; CHECK-NEXT: blt.w .LBB12_5
; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader
; CHECK-NEXT: ldr r1, [sp, #68] @ 4-byte Reload
@@ -952,11 +948,9 @@ define arm_aapcs_vfpcc void @gather_inc_v16i8_complex(ptr noalias nocapture read
; CHECK-NEXT: vstrw.32 q1, [sp, #152] @ 16-byte Spill
; CHECK-NEXT: vldrw.u32 q1, [sp, #296] @ 16-byte Reload
; CHECK-NEXT: vstrw.32 q0, [sp, #168] @ 16-byte Spill
-; CHECK-NEXT: vmov q0, q2
-; CHECK-NEXT: vmov q3, q5
-; CHECK-NEXT: vadd.i32 q1, q1, r0
; CHECK-NEXT: vldrw.u32 q0, [sp, #248] @ 16-byte Reload
; CHECK-NEXT: vldrw.u32 q3, [sp, #216] @ 16-byte Reload
+; CHECK-NEXT: vadd.i32 q1, q1, r0
; CHECK-NEXT: vstrw.32 q5, [sp, #120] @ 16-byte Spill
; CHECK-NEXT: vadd.i32 q0, q0, r0
; CHECK-NEXT: subs.w r11, r11, #16
@@ -1243,9 +1237,7 @@ define arm_aapcs_vfpcc void @gather_inc_v16i8_simple(ptr noalias nocapture reado
; CHECK-NEXT: .pad #64
; CHECK-NEXT: sub sp, #64
; CHECK-NEXT: cmp r2, #1
-; CHECK-NEXT: str r1, [sp, #56] @ 4-byte Spill
-; CHECK-NEXT: mov r1, r2
-; CHECK-NEXT: str r2, [sp, #60] @ 4-byte Spill
+; CHECK-NEXT: strd r1, r2, [sp, #56] @ 8-byte Folded Spill
; CHECK-NEXT: blt.w .LBB14_5
; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader
; CHECK-NEXT: adr r5, .LCPI14_3
diff --git a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
index 18c8a8a..7b8b884 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
@@ -609,7 +609,6 @@ define dso_local void @arm_mat_mult_q15(ptr noalias nocapture readonly %A, ptr n
; CHECK-NEXT: strd r0, r2, [sp, #24] @ 8-byte Folded Spill
; CHECK-NEXT: cmp r3, #0
; CHECK-NEXT: str r3, [sp, #8] @ 4-byte Spill
-; CHECK-NEXT: mov r0, r3
; CHECK-NEXT: itt ne
; CHECK-NEXT: ldrne r0, [sp, #136]
; CHECK-NEXT: cmpne r0, #0
diff --git a/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-reduct.ll b/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-reduct.ll
index 9987ff9..77980be 100644
--- a/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-reduct.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-reduct.ll
@@ -108,9 +108,7 @@ define void @correlate(ptr nocapture noundef readonly %ID, ptr nocapture noundef
; CHECK-NEXT: .pad #12
; CHECK-NEXT: sub sp, #12
; CHECK-NEXT: cmp r3, #1
-; CHECK-NEXT: strd r0, r1, [sp] @ 8-byte Folded Spill
-; CHECK-NEXT: mov r1, r3
-; CHECK-NEXT: str r3, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT: stm.w sp, {r0, r1, r3} @ 12-byte Folded Spill
; CHECK-NEXT: blt .LBB4_12
; CHECK-NEXT: @ %bb.1: @ %for.body.lr.ph
; CHECK-NEXT: ldr r1, [sp, #48]
diff --git a/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll b/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll
index 82a186b..c03339b 100644
--- a/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll
@@ -1062,9 +1062,8 @@ define arm_aapcs_vfpcc void @_Z37_arm_radix4_butterfly_inverse_f32_mvePK21arm_cf
; CHECK-NEXT: .pad #40
; CHECK-NEXT: sub sp, #40
; CHECK-NEXT: cmp r2, #8
-; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill
; CHECK-NEXT: vstr s0, [sp] @ 4-byte Spill
-; CHECK-NEXT: mov r1, r2
+; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill
; CHECK-NEXT: str r2, [sp, #4] @ 4-byte Spill
; CHECK-NEXT: blo .LBB7_9
; CHECK-NEXT: @ %bb.1:
diff --git a/llvm/test/CodeGen/Thumb2/mve-vldst4.ll b/llvm/test/CodeGen/Thumb2/mve-vldst4.ll
index 219541c..2e51e9e 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vldst4.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vldst4.ll
@@ -95,14 +95,13 @@ define void @vldst4(ptr nocapture readonly %pIn, ptr nocapture %pOut, i32 %numRo
; CHECK-NEXT: vmovx.f16 s8, s27
; CHECK-NEXT: vins.f16 s12, s24
; CHECK-NEXT: vins.f16 s13, s25
+; CHECK-NEXT: vins.f16 s2, s10
; CHECK-NEXT: vins.f16 s3, s11
; CHECK-NEXT: vins.f16 s1, s9
-; CHECK-NEXT: vins.f16 s2, s10
; CHECK-NEXT: vins.f16 s22, s8
; CHECK-NEXT: vmov q2, q3
-; CHECK-NEXT: vmov.f32 s17, s0
-; CHECK-NEXT: vmov.f32 s10, s4
; CHECK-NEXT: vmov q6, q0
+; CHECK-NEXT: vmov.f32 s10, s4
; CHECK-NEXT: vmov.f32 s11, s7
; CHECK-NEXT: vmov.f32 s9, s0
; CHECK-NEXT: vmov.f32 s17, s2
diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-optimisations.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-optimisations.mir
index f28311e..f9b175e 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vpt-optimisations.mir
+++ b/llvm/test/CodeGen/Thumb2/mve-vpt-optimisations.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode -run-pass arm-mve-vpt-opts %s -o - | FileCheck %s
+# RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode -run-pass arm-mve-vpt-opts -verify-machineinstrs %s -o - | FileCheck %s
---
name: vcmp_with_opposite_cond
@@ -1021,3 +1021,26 @@ body: |
%16:mqpr = MVE_VORR %15, %15, 1, %10, $noreg, undef %16
%17:mqpr = MVE_VORR %16, %16, 1, %11, $noreg, undef %17
...
+---
+name: reuse_kill_flags
+alignment: 4
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: reuse_kill_flags
+ ; CHECK: [[t2MOVi:%[0-9]+]]:tgpreven = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vccr = COPY [[t2MOVi]]
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:mqpr = IMPLICIT_DEF
+ ; CHECK-NEXT: [[MVE_VORR:%[0-9]+]]:mqpr = MVE_VORR [[DEF]], [[DEF]], 1, [[COPY]], $noreg, undef [[MVE_VORR]]
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:mqpr = IMPLICIT_DEF
+ ; CHECK-NEXT: [[MVE_VORR1:%[0-9]+]]:mqpr = MVE_VORR [[DEF1]], [[DEF1]], 1, killed [[COPY]], $noreg, undef [[MVE_VORR1]]
+ ; CHECK-NEXT: tBX_RET 14 /* CC::al */, $noreg, implicit [[DEF1]]
+ %0:tgpreven = t2MOVi 0, 14, $noreg, $noreg
+ %1:vccr = COPY %0:tgpreven
+ %2:mqpr = IMPLICIT_DEF
+ %3:mqpr = MVE_VORR %2:mqpr, %2:mqpr, 1, killed %1, $noreg, undef %3
+ %4:vccr = COPY %0:tgpreven
+ %5:mqpr = IMPLICIT_DEF
+ %6:mqpr = MVE_VORR %5:mqpr, %5:mqpr, 1, killed %4, $noreg, undef %6
+ tBX_RET 14 /* CC::al */, $noreg, implicit %5:mqpr
+
+...
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll
index aa4d877..4a63c81 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll
@@ -59,12 +59,12 @@ entry:
%call = call i32 @setjmp(ptr %buf) #0
call void @longjmp(ptr %buf, i32 1) #1
unreachable
-; SJLJ: call saveSetjmp
+; SJLJ: call __wasm_setjmp
; SJLJ: i32.const emscripten_longjmp
; SJLJ-NOT: i32.const emscripten_longjmp_jmpbuf
; SJLJ: call invoke_vii
; SJLJ-NOT: call "__invoke_void_ptr_i32"
-; SJLJ: call testSetjmp
+; SJLJ: call __wasm_setjmp_test
; NONE: call setjmp
; NONE: call longjmp
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj.ll b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj.ll
index 7cf05cc..32942cd 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj.ll
@@ -49,7 +49,7 @@ try.cont: ; preds = %lpad, %entry
; longjmp checking part
; CHECK: if.then1:
-; CHECK: call i32 @testSetjmp
+; CHECK: call i32 @__wasm_setjmp_test
}
; @foo can either throw an exception or longjmp. Because this function doesn't
@@ -117,7 +117,6 @@ if.end: ; preds = %entry
; CHECK: rethrow.exn:
; CHECK-NEXT: %exn = call ptr @__cxa_find_matching_catch_2()
-; CHECK-NEXT: call void @free(ptr %setjmpTable{{.*}})
; CHECK-NEXT: call void @__resumeException(ptr %exn)
; CHECK-NEXT: unreachable
@@ -147,7 +146,6 @@ throw: ; preds = %if.end, %entry
unreachable
; CHECK: throw:
-; CHECK-NEXT: call void @free(ptr %setjmpTable{{.*}})
; CHECK-NEXT: call void @__cxa_throw(ptr null, ptr null, ptr null)
; CHECK-NEXT: unreachable
}
@@ -208,7 +206,6 @@ return: ; preds = %entry, %if.end
; CHECK: rethrow.exn:
; CHECK-NEXT: %exn = call ptr @__cxa_find_matching_catch_2()
-; CHECK-NEXT: tail call void @free(ptr %setjmpTable{{.*}})
; CHECK-NEXT: call void @__resumeException(ptr %exn)
; CHECK-NEXT: unreachable
}
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll
index 1a85a63..79ae161 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll
@@ -12,7 +12,7 @@ target triple = "wasm32-unknown-emscripten"
; CHECK-LABEL: @malloc_test
define void @malloc_test() {
entry:
- ; CHECK: call ptr @malloc
+ ; CHECK: alloca i32
%retval = alloca i32, align 4
%jmp = alloca [1 x %struct.__jmp_buf_tag], align 16
store i32 0, ptr %retval, align 4
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-debuginfo.ll b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-debuginfo.ll
index 4f69415..fec9836 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-debuginfo.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-debuginfo.ll
@@ -16,25 +16,22 @@ entry:
call void @foo(), !dbg !7
ret void, !dbg !8
; CHECK: entry:
- ; CHECK-NEXT: call ptr @malloc(i32 40), !dbg ![[DL0:.*]]
+ ; CHECK-NEXT: %functionInvocationId = alloca i32, align 4, !dbg ![[DL0:.*]]
; CHECK: entry.split:
; CHECK: alloca {{.*}}, !dbg ![[DL0]]
- ; CHECK: call ptr @saveSetjmp{{.*}}, !dbg ![[DL1:.*]]
- ; CHECK-NEXT: call i32 @getTempRet0{{.*}}, !dbg ![[DL1]]
+ ; CHECK: call void @__wasm_setjmp{{.*}}, !dbg ![[DL1:.*]]
; CHECK-NEXT: br {{.*}}, !dbg ![[DL2:.*]]
; CHECK: entry.split.split:
; CHECK: call {{.*}} void @__invoke_void{{.*}}, !dbg ![[DL2]]
; CHECK: entry.split.split.split:
- ; CHECK-NEXT: call void @free{{.*}}, !dbg ![[DL3:.*]]
; CHECK: if.then1:
- ; CHECK: call i32 @testSetjmp{{.*}}, !dbg ![[DL2]]
+ ; CHECK: call i32 @__wasm_setjmp_test{{.*}}, !dbg ![[DL2]]
; CHECK: if.end:
- ; CHECK: call i32 @getTempRet0{{.*}}, !dbg ![[DL2]]
; CHECK: call.em.longjmp:
; CHECK: call void @emscripten_longjmp{{.*}}, !dbg ![[DL2]]
@@ -43,26 +40,6 @@ entry:
; CHECK: call void @setTempRet0{{.*}}, !dbg ![[DL2]]
}
-; No instruction has debug info but the current function (setjmp_debug_info2)
-; and the called function (malloc / free) have DISubprograms, so the newly
-; generated calls should have debug info attached. We don't have an instruction
-; to take debug info from, so we create dummy debug info.
-define void @setjmp_debug_info1() !dbg !9 {
-; CHECK-LABEL: @setjmp_debug_info1
-entry:
- %buf = alloca [1 x %struct.__jmp_buf_tag], align 16
- %arraydecay = getelementptr inbounds [1 x %struct.__jmp_buf_tag], ptr %buf, i32 0, i32 0
- %call = call i32 @setjmp(ptr %arraydecay) #0
- call void @foo()
- ret void
- ; CHECK: call ptr @malloc(i32 40), !dbg ![[DL_DUMMY:.*]]
- ; CHECK: call void @free{{.*}}, !dbg ![[DL_DUMMY]]
-}
-
-; Note that these functions have DISubprograms.
-declare !dbg !10 ptr @malloc(i32)
-declare !dbg !11 void @free(ptr)
-
declare void @foo()
; Function Attrs: returns_twice
declare i32 @setjmp(ptr) #0
@@ -79,9 +56,3 @@ declare i32 @setjmp(ptr) #0
!6 = !DILocation(line:4, scope: !3)
!7 = !DILocation(line:5, scope: !3)
!8 = !DILocation(line:6, scope: !3)
-!9 = distinct !DISubprogram(name: "setjmp_debug_info1", unit:!2, file: !1, line: 50)
-!10 = !DISubprogram(name: "malloc", file: !1, line: 10, isDefinition: false)
-!11 = !DISubprogram(name: "free", file: !1, line: 20, isDefinition: false)
-
-; Dummy debug info generated
-; CHECK: ![[DL_DUMMY]] = !DILocation(line: 50, column: 1, scope: !9)
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj.ll b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj.ll
index 7115b01..27ec95a 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj.ll
@@ -22,15 +22,12 @@ entry:
call void @longjmp(ptr %buf, i32 1) #1
unreachable
; CHECK: entry:
-; CHECK-NEXT: %[[MALLOCCALL:.*]] = tail call ptr @malloc([[PTR]] 40)
-; CHECK-NEXT: store i32 0, ptr %[[MALLOCCALL]]
-; CHECK-NEXT: %[[SETJMP_TABLE_SIZE:.*]] = add i32 4, 0
+; CHECK-NEXT: %functionInvocationId = alloca i32, align 4
; CHECK-NEXT: br label %entry.split
; CHECK: entry.split
; CHECK-NEXT: %[[BUF:.*]] = alloca [1 x %struct.__jmp_buf_tag]
-; CHECK-NEXT: %[[SETJMP_TABLE1:.*]] = call ptr @saveSetjmp(ptr %[[BUF]], i32 1, ptr %[[MALLOCCALL]], i32 %[[SETJMP_TABLE_SIZE]])
-; CHECK-NEXT: %[[SETJMP_TABLE_SIZE1:.*]] = call i32 @getTempRet0()
+; CHECK-NEXT: call void @__wasm_setjmp(ptr %[[BUF]], i32 1, ptr %functionInvocationId)
; CHECK-NEXT: br label %entry.split.split
; CHECK: entry.split.split:
@@ -51,8 +48,7 @@ entry:
; CHECK: if.then1:
; CHECK-NEXT: %[[__THREW__VAL_P:.*]] = inttoptr [[PTR]] %[[__THREW__VAL]] to ptr
-; CHECK-NEXT: %[[__THREW__VAL_P_LOADED:.*]] = load [[PTR]], ptr %[[__THREW__VAL_P]]
-; CHECK-NEXT: %[[LABEL:.*]] = call i32 @testSetjmp([[PTR]] %[[__THREW__VAL_P_LOADED]], ptr %[[SETJMP_TABLE1]], i32 %[[SETJMP_TABLE_SIZE1]])
+; CHECK-NEXT: %[[LABEL:.*]] = call i32 @__wasm_setjmp_test(ptr %[[__THREW__VAL_P]], ptr %functionInvocationId)
; CHECK-NEXT: %[[CMP:.*]] = icmp eq i32 %[[LABEL]], 0
; CHECK-NEXT: br i1 %[[CMP]], label %call.em.longjmp, label %if.end2
@@ -69,7 +65,6 @@ entry:
; CHECK: call.em.longjmp:
; CHECK-NEXT: %threw.phi = phi [[PTR]] [ %[[__THREW__VAL]], %if.then1 ]
; CHECK-NEXT: %threwvalue.phi = phi i32 [ %[[THREWVALUE_VAL]], %if.then1 ]
-; CHECK-NEXT: tail call void @free(ptr %[[SETJMP_TABLE1]])
; CHECK-NEXT: call void @emscripten_longjmp([[PTR]] %threw.phi, i32 %threwvalue.phi)
; CHECK-NEXT: unreachable
@@ -87,13 +82,12 @@ entry:
call void @foo()
ret void
; CHECK: entry:
-; CHECK: %[[SETJMP_TABLE:.*]] = call ptr @saveSetjmp(
+; CHECK: call void @__wasm_setjmp(
; CHECK: entry.split.split:
; CHECK: @__invoke_void(ptr @foo)
; CHECK: entry.split.split.split:
-; CHECK-NEXT: tail call void @free(ptr %[[SETJMP_TABLE]])
; CHECK-NEXT: ret void
}
@@ -110,9 +104,8 @@ entry:
call void @foo()
ret void
; CHECK: call.em.longjmp:
-; CHECK-NEXT: %threw.phi = phi [[PTR]] [ %__THREW__.val, %if.then1 ], [ %__THREW__.val4, %if.then15 ]
-; CHECK-NEXT: %threwvalue.phi = phi i32 [ %__threwValue.val, %if.then1 ], [ %__threwValue.val8, %if.then15 ]
-; CHECK-NEXT: tail call void @free(ptr %[[SETJMP_TABLE1]])
+; CHECK-NEXT: %threw.phi = phi [[PTR]] [ %__THREW__.val, %if.then1 ], [ %__THREW__.val2, %if.then13 ]
+; CHECK-NEXT: %threwvalue.phi = phi i32 [ %__threwValue.val, %if.then1 ], [ %__threwValue.val6, %if.then13 ]
; CHECK-NEXT: call void @emscripten_longjmp([[PTR]] %threw.phi, i32 %threwvalue.phi)
; CHECK-NEXT: unreachable
}
@@ -145,7 +138,6 @@ entry:
%cmp = icmp sgt i32 %n, 5
br i1 %cmp, label %if.then, label %if.end
; CHECK: entry:
-; CHECK: %[[SETJMP_TABLE_SIZE0:.*]] = add i32 4, 0
if.then: ; preds = %entry
%0 = load i32, ptr @global_var, align 4
@@ -154,13 +146,10 @@ if.then: ; preds = %entry
br label %if.end
; CHECK: if.then:
; CHECK: %[[VAR0:.*]] = load i32, ptr @global_var, align 4
-; CHECK: %[[SETJMP_TABLE1:.*]] = call ptr @saveSetjmp(
-; CHECK-NEXT: %[[SETJMP_TABLE_SIZE1:.*]] = call i32 @getTempRet0()
+; CHECK: call void @__wasm_setjmp(
; CHECK: if.then.split:
-; CHECK: %[[VAR1:.*]] = phi i32 [ %[[VAR2:.*]], %if.end3 ], [ %[[VAR0]], %if.then ]
-; CHECK: %[[SETJMP_TABLE_SIZE2:.*]] = phi i32 [ %[[SETJMP_TABLE_SIZE1]], %if.then ], [ %[[SETJMP_TABLE_SIZE3:.*]], %if.end3 ]
-; CHECK: %[[SETJMP_TABLE2:.*]] = phi ptr [ %[[SETJMP_TABLE1]], %if.then ], [ %[[SETJMP_TABLE3:.*]], %if.end3 ]
+; CHECK: %[[VAR1:.*]] = phi i32 [ %[[VAR2:.*]], %if.end1 ], [ %[[VAR0]], %if.then ]
; CHECK: store i32 %[[VAR1]], ptr @global_var, align 4
if.end: ; preds = %if.then, %entry
@@ -168,8 +157,6 @@ if.end: ; preds = %if.then, %entry
unreachable
; CHECK: if.end:
; CHECK: %[[VAR2]] = phi i32 [ %[[VAR1]], %if.then.split ], [ undef, %entry.split ]
-; CHECK: %[[SETJMP_TABLE_SIZE3]] = phi i32 [ %[[SETJMP_TABLE_SIZE2]], %if.then.split ], [ %[[SETJMP_TABLE_SIZE0]], %entry.split ]
-; CHECK: %[[SETJMP_TABLE3]] = phi ptr [ %[[SETJMP_TABLE2]], %if.then.split ], [ %setjmpTable, %entry.split ]
}
; Test a case when a function only calls other functions that are neither setjmp nor longjmp
@@ -296,8 +283,8 @@ declare void @free(ptr)
; JS glue functions and invoke wrappers declaration
; CHECK-DAG: declare i32 @getTempRet0()
; CHECK-DAG: declare void @setTempRet0(i32)
-; CHECK-DAG: declare ptr @saveSetjmp(ptr, i32, ptr, i32)
-; CHECK-DAG: declare i32 @testSetjmp([[PTR]], ptr, i32)
+; CHECK-DAG: declare void @__wasm_setjmp(ptr, i32, ptr)
+; CHECK-DAG: declare i32 @__wasm_setjmp_test(ptr, ptr)
; CHECK-DAG: declare void @emscripten_longjmp([[PTR]], i32)
; CHECK-DAG: declare void @__invoke_void(ptr)
@@ -308,8 +295,8 @@ attributes #3 = { allocsize(0) }
; CHECK-DAG: attributes #{{[0-9]+}} = { nounwind "wasm-import-module"="env" "wasm-import-name"="getTempRet0" }
; CHECK-DAG: attributes #{{[0-9]+}} = { nounwind "wasm-import-module"="env" "wasm-import-name"="setTempRet0" }
; CHECK-DAG: attributes #{{[0-9]+}} = { "wasm-import-module"="env" "wasm-import-name"="__invoke_void" }
-; CHECK-DAG: attributes #{{[0-9]+}} = { "wasm-import-module"="env" "wasm-import-name"="saveSetjmp" }
-; CHECK-DAG: attributes #{{[0-9]+}} = { "wasm-import-module"="env" "wasm-import-name"="testSetjmp" }
+; CHECK-DAG: attributes #{{[0-9]+}} = { "wasm-import-module"="env" "wasm-import-name"="__wasm_setjmp" }
+; CHECK-DAG: attributes #{{[0-9]+}} = { "wasm-import-module"="env" "wasm-import-name"="__wasm_setjmp_test" }
; CHECK-DAG: attributes #{{[0-9]+}} = { noreturn "wasm-import-module"="env" "wasm-import-name"="emscripten_longjmp" }
; CHECK-DAG: attributes #{{[0-9]+}} = { "wasm-import-module"="env" "wasm-import-name"="__invoke_ptr_i32_ptr" }
; CHECK-DAG: attributes #[[ALLOCSIZE_ATTR]] = { allocsize(1) }
diff --git a/llvm/test/CodeGen/WebAssembly/lower-wasm-ehsjlj.ll b/llvm/test/CodeGen/WebAssembly/lower-wasm-ehsjlj.ll
index 25471eb..bd8db83 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-wasm-ehsjlj.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-wasm-ehsjlj.ll
@@ -108,8 +108,8 @@ catch: ; preds = %catch.start
call void @__cxa_end_catch() [ "funclet"(token %2) ]
catchret from %2 to label %catchret.dest
; CHECK: catch: ; preds = %catch.start
-; CHECK-NEXT: %exn = load ptr, ptr %exn.slot15, align 4
-; CHECK-NEXT: %5 = call ptr @__cxa_begin_catch(ptr %exn) #7 [ "funclet"(token %2) ]
+; CHECK-NEXT: %exn = load ptr, ptr %exn.slot6, align 4
+; CHECK-NEXT: %5 = call ptr @__cxa_begin_catch(ptr %exn) #6 [ "funclet"(token %2) ]
; CHECK-NEXT: invoke void @__cxa_end_catch() [ "funclet"(token %2) ]
; CHECK-NEXT: to label %.noexc unwind label %catch.dispatch.longjmp
@@ -265,7 +265,7 @@ ehcleanup: ; preds = %entry
; (cleanuppad), whose parent is 'none', so we should unwind directly to
; %catch.dispatch.longjmp.
%call2 = call noundef ptr @_ZN4TempD2Ev(ptr noundef %t) #2 [ "funclet"(token %0) ]
-; CHECK: %call13 = invoke {{.*}} ptr @_ZN4TempD2Ev(ptr
+; CHECK: %call11 = invoke {{.*}} ptr @_ZN4TempD2Ev(ptr
; CHECK-NEXT: to label {{.*}} unwind label %catch.dispatch.longjmp
cleanupret from %0 unwind to caller
}
diff --git a/llvm/test/CodeGen/WebAssembly/lower-wasm-sjlj.ll b/llvm/test/CodeGen/WebAssembly/lower-wasm-sjlj.ll
index b8d2230..82c04e2 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-wasm-sjlj.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-wasm-sjlj.ll
@@ -25,16 +25,12 @@ entry:
unreachable
; CHECK: entry:
-; CHECK-NEXT: %setjmpTable = tail call ptr @malloc([[PTR]] 40)
-; CHECK-NEXT: store i32 0, ptr %setjmpTable, align 4
-; CHECK-NEXT: %setjmpTableSize = add i32 4, 0
+; CHECK-NEXT: %functionInvocationId = alloca i32, align 4
; CHECK-NEXT: br label %setjmp.dispatch
; CHECK: setjmp.dispatch:
; CHECK-NEXT: %[[VAL2:.*]] = phi i32 [ %val, %if.end ], [ undef, %entry ]
; CHECK-NEXT: %[[BUF:.*]] = phi ptr [ %[[BUF2:.*]], %if.end ], [ undef, %entry ]
-; CHECK-NEXT: %[[SETJMPTABLESIZE2:.*]] = phi i32 [ %[[SETJMPTABLESIZE3:.*]], %if.end ], [ %setjmpTableSize, %entry ]
-; CHECK-NEXT: %[[SETJMPTABLE2:.*]] = phi ptr [ %[[SETJMPTABLE3:.*]], %if.end ], [ %setjmpTable, %entry ]
; CHECK-NEXT: %label.phi = phi i32 [ %label, %if.end ], [ -1, %entry ]
; CHECK-NEXT: switch i32 %label.phi, label %entry.split [
; CHECK-NEXT: i32 1, label %entry.split.split
@@ -42,14 +38,11 @@ entry:
; CHECK: entry.split:
; CHECK-NEXT: %buf = alloca [1 x %struct.__jmp_buf_tag], align 16
-; CHECK-NEXT: %[[SETJMPTABLE4:.*]] = call ptr @saveSetjmp(ptr %buf, i32 1, ptr %[[SETJMPTABLE2]], i32 %[[SETJMPTABLESIZE2]])
-; CHECK-NEXT: %[[SETJMPTABLESIZE4:.*]] = call i32 @getTempRet0()
+; CHECK-NEXT: call void @__wasm_setjmp(ptr %buf, i32 1, ptr %functionInvocationId)
; CHECK-NEXT: br label %entry.split.split
; CHECK: entry.split.split:
; CHECK-NEXT: %[[BUF2]] = phi ptr [ %[[BUF]], %setjmp.dispatch ], [ %buf, %entry.split ]
-; CHECK-NEXT: %[[SETJMPTABLESIZE3]] = phi i32 [ %[[SETJMPTABLESIZE4]], %entry.split ], [ %[[SETJMPTABLESIZE2]], %setjmp.dispatch ]
-; CHECK-NEXT: %[[SETJMPTABLE3]] = phi ptr [ %[[SETJMPTABLE4]], %entry.split ], [ %[[SETJMPTABLE2]], %setjmp.dispatch ]
; CHECK-NEXT: %setjmp.ret = phi i32 [ 0, %entry.split ], [ %[[VAL2]], %setjmp.dispatch ]
; CHECK-NEXT: invoke void @__wasm_longjmp(ptr %[[BUF2]], i32 1)
; CHECK-NEXT: to label %.noexc unwind label %catch.dispatch.longjmp
@@ -67,13 +60,11 @@ entry:
; CHECK-NEXT: %val_gep = getelementptr { ptr, i32 }, ptr %thrown, i32 0, i32 1
; CHECK-NEXT: %env = load ptr, ptr %env_gep, align {{.*}}
; CHECK-NEXT: %val = load i32, ptr %val_gep, align 4
-; CHECK-NEXT: %setjmp.id = load [[PTR]], ptr %env, align {{.*}}
-; CHECK-NEXT: %label = call i32 @testSetjmp([[PTR]] %setjmp.id, ptr %[[SETJMPTABLE3]], i32 %[[SETJMPTABLESIZE3]]) [ "funclet"(token %1) ]
+; CHECK-NEXT: %label = call i32 @__wasm_setjmp_test(ptr %env, ptr %functionInvocationId) [ "funclet"(token %1) ]
; CHECK-NEXT: %2 = icmp eq i32 %label, 0
; CHECK-NEXT: br i1 %2, label %if.then, label %if.end
; CHECK: if.then:
-; CHECK-NEXT: tail call void @free(ptr %[[SETJMPTABLE3]]) [ "funclet"(token %1) ]
; CHECK-NEXT: call void @__wasm_longjmp(ptr %env, i32 %val) [ "funclet"(token %1) ]
; CHECK-NEXT: unreachable
@@ -142,10 +133,9 @@ declare ptr @__cxa_begin_catch(ptr)
declare void @__cxa_end_catch()
declare void @free(ptr)
-; JS glue function declarations
-; CHECK-DAG: declare i32 @getTempRet0()
-; CHECK-DAG: declare ptr @saveSetjmp(ptr, i32, ptr, i32)
-; CHECK-DAG: declare i32 @testSetjmp([[PTR]], ptr, i32)
+; Runtime glue function declarations
+; CHECK-DAG: declare void @__wasm_setjmp(ptr, i32, ptr)
+; CHECK-DAG: declare i32 @__wasm_setjmp_test(ptr, ptr)
; CHECK-DAG: declare void @__wasm_longjmp(ptr, i32)
attributes #0 = { returns_twice }
diff --git a/llvm/test/CodeGen/WebAssembly/pr63817.ll b/llvm/test/CodeGen/WebAssembly/pr63817.ll
new file mode 100644
index 0000000..252768d
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/pr63817.ll
@@ -0,0 +1,15 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=wasm32 -mattr=+simd128 | FileCheck %s
+
+;; Regression test for a bug in which BUILD_VECTOR nodes with large unsigned
+;; lane constants were not properly selected.
+define <4 x i8> @test(<4 x i8> %0) {
+; CHECK-LABEL: test:
+; CHECK: .functype test (v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: v128.const 255, 17, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK-NEXT: # fallthrough-return
+ %V1 = or <4 x i8> <i8 255, i8 255, i8 255, i8 255>, %0
+ %V2 = insertelement <4 x i8> %V1, i8 17, i32 1
+ ret <4 x i8> %V2
+}
diff --git a/llvm/test/CodeGen/WinCFGuard/cfguard-mingw.ll b/llvm/test/CodeGen/WinCFGuard/cfguard-mingw.ll
index 085cde8..7a5baa0 100644
--- a/llvm/test/CodeGen/WinCFGuard/cfguard-mingw.ll
+++ b/llvm/test/CodeGen/WinCFGuard/cfguard-mingw.ll
@@ -97,7 +97,7 @@ $_ZTI7Derived = comdat any
; Function Attrs: nounwind uwtable
define weak_odr dso_local dllexport void @_ZN4BaseC2Ev(ptr noundef nonnull align 8 dereferenceable(12) %0) unnamed_addr #0 comdat align 2 {
- store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV4Base, i64 0, inrange i32 0, i64 2), ptr %0, align 8, !tbaa !5
+ store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV4Base, i64 0, i32 0, i64 2), ptr %0, align 8, !tbaa !5
%2 = getelementptr inbounds %class.Base, ptr %0, i64 0, i32 1
store i32 0, ptr %2, align 8, !tbaa !8
ret void
@@ -105,7 +105,7 @@ define weak_odr dso_local dllexport void @_ZN4BaseC2Ev(ptr noundef nonnull align
; Function Attrs: nounwind uwtable
define weak_odr dso_local dllexport void @_ZN4BaseC1Ev(ptr noundef nonnull align 8 dereferenceable(12) %0) unnamed_addr #0 comdat align 2 {
- store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV4Base, i64 0, inrange i32 0, i64 2), ptr %0, align 8, !tbaa !5
+ store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV4Base, i64 0, i32 0, i64 2), ptr %0, align 8, !tbaa !5
%2 = getelementptr inbounds %class.Base, ptr %0, i64 0, i32 1
store i32 0, ptr %2, align 8, !tbaa !8
ret void
@@ -140,10 +140,10 @@ declare dso_local void @_ZdlPv(ptr noundef) local_unnamed_addr #2
; Function Attrs: nounwind uwtable
define weak_odr dso_local dllexport void @_ZN7DerivedC2Ev(ptr noundef nonnull align 8 dereferenceable(16) %0) unnamed_addr #0 comdat align 2 {
- store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV4Base, i64 0, inrange i32 0, i64 2), ptr %0, align 8, !tbaa !5
+ store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV4Base, i64 0, i32 0, i64 2), ptr %0, align 8, !tbaa !5
%2 = getelementptr inbounds %class.Base, ptr %0, i64 0, i32 1
store i32 0, ptr %2, align 8, !tbaa !8
- store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV7Derived, i64 0, inrange i32 0, i64 2), ptr %0, align 8, !tbaa !5
+ store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV7Derived, i64 0, i32 0, i64 2), ptr %0, align 8, !tbaa !5
%3 = getelementptr inbounds %class.Derived, ptr %0, i64 0, i32 1
store i32 0, ptr %3, align 4, !tbaa !12
ret void
@@ -151,10 +151,10 @@ define weak_odr dso_local dllexport void @_ZN7DerivedC2Ev(ptr noundef nonnull al
; Function Attrs: nounwind uwtable
define weak_odr dso_local dllexport void @_ZN7DerivedC1Ev(ptr noundef nonnull align 8 dereferenceable(16) %0) unnamed_addr #0 comdat align 2 {
- store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV4Base, i64 0, inrange i32 0, i64 2), ptr %0, align 8, !tbaa !5
+ store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV4Base, i64 0, i32 0, i64 2), ptr %0, align 8, !tbaa !5
%2 = getelementptr inbounds %class.Base, ptr %0, i64 0, i32 1
store i32 0, ptr %2, align 8, !tbaa !8
- store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV7Derived, i64 0, inrange i32 0, i64 2), ptr %0, align 8, !tbaa !5
+ store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV7Derived, i64 0, i32 0, i64 2), ptr %0, align 8, !tbaa !5
%3 = getelementptr inbounds %class.Derived, ptr %0, i64 0, i32 1
store i32 0, ptr %3, align 4, !tbaa !12
ret void
diff --git a/llvm/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll b/llvm/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll
index 535450a..695a2d0 100644
--- a/llvm/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll
+++ b/llvm/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll
@@ -9,11 +9,11 @@ define <2 x i64> @_mm_insert_epi16(<2 x i64> %a, i32 %b, i32 %imm) nounwind read
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-16, %esp
; X86-NEXT: subl $32, %esp
-; X86-NEXT: movzwl 8(%ebp), %eax
-; X86-NEXT: movl 12(%ebp), %ecx
-; X86-NEXT: andl $7, %ecx
+; X86-NEXT: movl 12(%ebp), %eax
+; X86-NEXT: movzwl 8(%ebp), %ecx
+; X86-NEXT: andl $7, %eax
; X86-NEXT: movaps %xmm0, (%esp)
-; X86-NEXT: movw %ax, (%esp,%ecx,2)
+; X86-NEXT: movw %cx, (%esp,%eax,2)
; X86-NEXT: movaps (%esp), %xmm0
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-icmp-vec.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-icmp-vec.mir
new file mode 100644
index 0000000..e0fb0fc
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-icmp-vec.mir
@@ -0,0 +1,25 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=legalizer -global-isel-abort=2 -pass-remarks-missed='gisel*' %s -o - 2>%t | FileCheck %s
+# RUN: FileCheck -check-prefix=ILLEGAL %s < %t
+
+# ILLEGAL: remark: <unknown>:0:0: unable to legalize instruction: %2:_(<4 x s1>) = G_ICMP intpred(sle), %0:_(<4 x s64>), %1:_ (in function: test_icmp_v4i64)
+
+# PR86203
+---
+name: test_icmp_v4i64
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: test_icmp_v4i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<4 x s1>) = G_ICMP intpred(sle), [[DEF]](<4 x s64>), [[DEF1]]
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<4 x s32>) = G_ANYEXT [[ICMP]](<4 x s1>)
+ ; CHECK-NEXT: $xmm0 = COPY [[ANYEXT]](<4 x s32>)
+ ; CHECK-NEXT: RET 0, implicit $xmm0
+ %0:_(<4 x s64>) = G_IMPLICIT_DEF
+ %1:_(<4 x s64>) = G_IMPLICIT_DEF
+ %3:_(<4 x s1>) = G_ICMP intpred(sle), %0(<4 x s64>), %1
+ %4:_(<4 x s32>) = G_ANYEXT %3(<4 x s1>)
+ $xmm0 = COPY %4(<4 x s32>)
+ RET 0, implicit $xmm0
+...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-select-trap.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-select-trap.mir
index ea548c2..20b8b67 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-select-trap.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-select-trap.mir
@@ -23,6 +23,6 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: trap
; CHECK: TRAP
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
...
diff --git a/llvm/test/CodeGen/X86/addcarry.ll b/llvm/test/CodeGen/X86/addcarry.ll
index 3fc4ed9..f8d32fc 100644
--- a/llvm/test/CodeGen/X86/addcarry.ll
+++ b/llvm/test/CodeGen/X86/addcarry.ll
@@ -1490,3 +1490,26 @@ define { i64, i64 } @addcarry_commutative_2(i64 %x0, i64 %x1, i64 %y0, i64 %y1)
%r1 = insertvalue { i64, i64 } %r0, i64 %b1s, 1
ret { i64, i64 } %r1
}
+
+define i1 @pr84831(i64 %arg) {
+; CHECK-LABEL: pr84831:
+; CHECK: # %bb.0:
+; CHECK-NEXT: testq %rdi, %rdi
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: addb $-1, %al
+; CHECK-NEXT: adcq $1, %rcx
+; CHECK-NEXT: setb %al
+; CHECK-NEXT: retq
+ %a = icmp ult i64 0, %arg
+ %add1 = add i64 0, 1
+ %carryout1 = icmp ult i64 %add1, 0
+ %b = zext i1 %a to i64
+ %add2 = add i64 %add1, %b
+ %carryout2 = icmp ult i64 %add2, %add1
+ %zc1 = zext i1 %carryout1 to i63
+ %zc2 = zext i1 %carryout2 to i63
+ %or = or i63 %zc1, %zc2
+ %trunc = trunc i63 %or to i1
+ ret i1 %trunc
+}
diff --git a/llvm/test/CodeGen/X86/allow-check.ll b/llvm/test/CodeGen/X86/allow-check.ll
new file mode 100644
index 0000000..602e5a9
--- /dev/null
+++ b/llvm/test/CodeGen/X86/allow-check.ll
@@ -0,0 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=x86_64 -global-isel=0 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64 -global-isel=1 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64 -global-isel=0 -fast-isel=1 | FileCheck %s
+
+define i1 @test_runtime() local_unnamed_addr {
+; CHECK-LABEL: test_runtime:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movb $1, %al
+; CHECK-NEXT: retq
+entry:
+ %allow = call i1 @llvm.allow.runtime.check(metadata !"test_check")
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.runtime.check(metadata) nounwind
+
+define i1 @test_ubsan() local_unnamed_addr {
+; CHECK-LABEL: test_ubsan:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movb $1, %al
+; CHECK-NEXT: retq
+entry:
+ %allow = call i1 @llvm.allow.ubsan.check(i8 7)
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.ubsan.check(i8) nounwind
diff --git a/llvm/test/CodeGen/X86/apx/add.ll b/llvm/test/CodeGen/X86/apx/add.ll
index cdb29a7..d3301ec 100644
--- a/llvm/test/CodeGen/X86/apx/add.ll
+++ b/llvm/test/CodeGen/X86/apx/add.ll
@@ -298,9 +298,9 @@ define i8 @addflag8rr(i8 noundef %a, i8 noundef %b) {
; CHECK-LABEL: addflag8rr:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addb %sil, %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x00,0xf7]
-; CHECK-NEXT: movzbl %al, %ecx # encoding: [0x0f,0xb6,0xc8]
-; CHECK-NEXT: movl $255, %eax # encoding: [0xb8,0xff,0x00,0x00,0x00]
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: movzbl %al, %eax # encoding: [0x0f,0xb6,0xc0]
+; CHECK-NEXT: movl $255, %ecx # encoding: [0xb9,0xff,0x00,0x00,0x00]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -311,10 +311,10 @@ entry:
define i16 @addflag16rr(i16 noundef %a, i16 noundef %b) {
; CHECK-LABEL: addflag16rr:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addw %si, %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x01,0xf7]
-; CHECK-NEXT: movl $65535, %eax # encoding: [0xb8,0xff,0xff,0x00,0x00]
+; CHECK-NEXT: addw %si, %di, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x01,0xf7]
+; CHECK-NEXT: movl $65535, %ecx # encoding: [0xb9,0xff,0xff,0x00,0x00]
; CHECK-NEXT: # imm = 0xFFFF
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -325,9 +325,9 @@ entry:
define i32 @addflag32rr(i32 noundef %a, i32 noundef %b) {
; CHECK-LABEL: addflag32rr:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addl %esi, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x01,0xf7]
-; CHECK-NEXT: movl $-1, %eax # encoding: [0xb8,0xff,0xff,0xff,0xff]
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: addl %esi, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x01,0xf7]
+; CHECK-NEXT: movl $-1, %ecx # encoding: [0xb9,0xff,0xff,0xff,0xff]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%add = call i32 @llvm.uadd.sat.i32(i32 %a, i32 %b)
@@ -337,9 +337,9 @@ entry:
define i64 @addflag64rr(i64 noundef %a, i64 noundef %b) {
; CHECK-LABEL: addflag64rr:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addq %rsi, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x01,0xf7]
-; CHECK-NEXT: movq $-1, %rax # encoding: [0x48,0xc7,0xc0,0xff,0xff,0xff,0xff]
-; CHECK-NEXT: cmovaeq %rcx, %rax # encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: addq %rsi, %rdi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x01,0xf7]
+; CHECK-NEXT: movq $-1, %rcx # encoding: [0x48,0xc7,0xc1,0xff,0xff,0xff,0xff]
+; CHECK-NEXT: cmovbq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x42,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%add = call i64 @llvm.uadd.sat.i64(i64 %a, i64 %b)
@@ -350,9 +350,9 @@ define i8 @addflag8rm(i8 noundef %a, ptr %b) {
; CHECK-LABEL: addflag8rm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addb (%rsi), %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x02,0x3e]
-; CHECK-NEXT: movzbl %al, %ecx # encoding: [0x0f,0xb6,0xc8]
-; CHECK-NEXT: movl $255, %eax # encoding: [0xb8,0xff,0x00,0x00,0x00]
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: movzbl %al, %eax # encoding: [0x0f,0xb6,0xc0]
+; CHECK-NEXT: movl $255, %ecx # encoding: [0xb9,0xff,0x00,0x00,0x00]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -364,10 +364,10 @@ entry:
define i16 @addflag16rm(i16 noundef %a, ptr %b) {
; CHECK-LABEL: addflag16rm:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addw (%rsi), %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x03,0x3e]
-; CHECK-NEXT: movl $65535, %eax # encoding: [0xb8,0xff,0xff,0x00,0x00]
+; CHECK-NEXT: addw (%rsi), %di, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x03,0x3e]
+; CHECK-NEXT: movl $65535, %ecx # encoding: [0xb9,0xff,0xff,0x00,0x00]
; CHECK-NEXT: # imm = 0xFFFF
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -379,9 +379,9 @@ entry:
define i32 @addflag32rm(i32 noundef %a, ptr %b) {
; CHECK-LABEL: addflag32rm:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addl (%rsi), %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x03,0x3e]
-; CHECK-NEXT: movl $-1, %eax # encoding: [0xb8,0xff,0xff,0xff,0xff]
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: addl (%rsi), %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x03,0x3e]
+; CHECK-NEXT: movl $-1, %ecx # encoding: [0xb9,0xff,0xff,0xff,0xff]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%t = load i32, ptr %b
@@ -392,9 +392,9 @@ entry:
define i64 @addflag64rm(i64 noundef %a, ptr %b) {
; CHECK-LABEL: addflag64rm:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addq (%rsi), %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x03,0x3e]
-; CHECK-NEXT: movq $-1, %rax # encoding: [0x48,0xc7,0xc0,0xff,0xff,0xff,0xff]
-; CHECK-NEXT: cmovaeq %rcx, %rax # encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: addq (%rsi), %rdi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x03,0x3e]
+; CHECK-NEXT: movq $-1, %rcx # encoding: [0x48,0xc7,0xc1,0xff,0xff,0xff,0xff]
+; CHECK-NEXT: cmovbq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x42,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%t = load i64, ptr %b
@@ -405,10 +405,10 @@ entry:
define i16 @addflag16ri8(i16 noundef %a) {
; CHECK-LABEL: addflag16ri8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addw $123, %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x83,0xc7,0x7b]
-; CHECK-NEXT: movl $65535, %eax # encoding: [0xb8,0xff,0xff,0x00,0x00]
+; CHECK-NEXT: addw $123, %di, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x83,0xc7,0x7b]
+; CHECK-NEXT: movl $65535, %ecx # encoding: [0xb9,0xff,0xff,0x00,0x00]
; CHECK-NEXT: # imm = 0xFFFF
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -419,9 +419,9 @@ entry:
define i32 @addflag32ri8(i32 noundef %a) {
; CHECK-LABEL: addflag32ri8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addl $123, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x83,0xc7,0x7b]
-; CHECK-NEXT: movl $-1, %eax # encoding: [0xb8,0xff,0xff,0xff,0xff]
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: addl $123, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x83,0xc7,0x7b]
+; CHECK-NEXT: movl $-1, %ecx # encoding: [0xb9,0xff,0xff,0xff,0xff]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%add = call i32 @llvm.uadd.sat.i32(i32 %a, i32 123)
@@ -431,9 +431,9 @@ entry:
define i64 @addflag64ri8(i64 noundef %a) {
; CHECK-LABEL: addflag64ri8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addq $123, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x83,0xc7,0x7b]
-; CHECK-NEXT: movq $-1, %rax # encoding: [0x48,0xc7,0xc0,0xff,0xff,0xff,0xff]
-; CHECK-NEXT: cmovaeq %rcx, %rax # encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: addq $123, %rdi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x83,0xc7,0x7b]
+; CHECK-NEXT: movq $-1, %rcx # encoding: [0x48,0xc7,0xc1,0xff,0xff,0xff,0xff]
+; CHECK-NEXT: cmovbq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x42,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%add = call i64 @llvm.uadd.sat.i64(i64 %a, i64 123)
@@ -444,9 +444,9 @@ define i8 @addflag8ri(i8 noundef %a) {
; CHECK-LABEL: addflag8ri:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addb $123, %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x80,0xc7,0x7b]
-; CHECK-NEXT: movzbl %al, %ecx # encoding: [0x0f,0xb6,0xc8]
-; CHECK-NEXT: movl $255, %eax # encoding: [0xb8,0xff,0x00,0x00,0x00]
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: movzbl %al, %eax # encoding: [0x0f,0xb6,0xc0]
+; CHECK-NEXT: movl $255, %ecx # encoding: [0xb9,0xff,0x00,0x00,0x00]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -457,11 +457,11 @@ entry:
define i16 @addflag16ri(i16 noundef %a) {
; CHECK-LABEL: addflag16ri:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addw $1234, %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x81,0xc7,0xd2,0x04]
+; CHECK-NEXT: addw $1234, %di, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x81,0xc7,0xd2,0x04]
; CHECK-NEXT: # imm = 0x4D2
-; CHECK-NEXT: movl $65535, %eax # encoding: [0xb8,0xff,0xff,0x00,0x00]
+; CHECK-NEXT: movl $65535, %ecx # encoding: [0xb9,0xff,0xff,0x00,0x00]
; CHECK-NEXT: # imm = 0xFFFF
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -472,10 +472,10 @@ entry:
define i32 @addflag32ri(i32 noundef %a) {
; CHECK-LABEL: addflag32ri:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addl $123456, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x81,0xc7,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: addl $123456, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x81,0xc7,0x40,0xe2,0x01,0x00]
; CHECK-NEXT: # imm = 0x1E240
-; CHECK-NEXT: movl $-1, %eax # encoding: [0xb8,0xff,0xff,0xff,0xff]
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: movl $-1, %ecx # encoding: [0xb9,0xff,0xff,0xff,0xff]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%add = call i32 @llvm.uadd.sat.i32(i32 %a, i32 123456)
@@ -485,10 +485,10 @@ entry:
define i64 @addflag64ri(i64 noundef %a) {
; CHECK-LABEL: addflag64ri:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addq $123456, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x81,0xc7,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: addq $123456, %rdi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x81,0xc7,0x40,0xe2,0x01,0x00]
; CHECK-NEXT: # imm = 0x1E240
-; CHECK-NEXT: movq $-1, %rax # encoding: [0x48,0xc7,0xc0,0xff,0xff,0xff,0xff]
-; CHECK-NEXT: cmovaeq %rcx, %rax # encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: movq $-1, %rcx # encoding: [0x48,0xc7,0xc1,0xff,0xff,0xff,0xff]
+; CHECK-NEXT: cmovbq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x42,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%add = call i64 @llvm.uadd.sat.i64(i64 %a, i64 123456)
diff --git a/llvm/test/CodeGen/X86/apx/cfcmov.ll b/llvm/test/CodeGen/X86/apx/cfcmov.ll
new file mode 100644
index 0000000..f643120
--- /dev/null
+++ b/llvm/test/CodeGen/X86/apx/cfcmov.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+cf -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+cf -x86-cmov-converter=false -verify-machineinstrs | FileCheck %s
+
+define i8 @cfcmov8rr(i8 %0) {
+; CHECK-LABEL: cfcmov8rr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cmpb $1, %dil
+; CHECK-NEXT: cfcmovel %edi, %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
+; CHECK-NEXT: retq
+ %2 = icmp eq i8 %0, 1
+ %3 = select i1 %2, i8 %0, i8 0
+ ret i8 %3
+}
+
+define i16 @cfcmov16rr(i16 %0) {
+; CHECK-LABEL: cfcmov16rr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cmpw $1, %di
+; CHECK-NEXT: cfcmovnel %edi, %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
+; CHECK-NEXT: retq
+ %2 = icmp ne i16 %0, 1
+ %3 = select i1 %2, i16 %0, i16 0
+ ret i16 %3
+}
+
+define i32 @cfcmov32rr(i32 %0) {
+; CHECK-LABEL: cfcmov32rr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cmpl $2, %edi
+; CHECK-NEXT: cfcmovael %edi, %eax
+; CHECK-NEXT: retq
+ %2 = icmp ugt i32 %0, 1
+ %3 = select i1 %2, i32 %0, i32 0
+ ret i32 %3
+}
+
+define i64 @cfcmov64rr(i64 %0) {
+; CHECK-LABEL: cfcmov64rr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: testq %rdi, %rdi
+; CHECK-NEXT: cfcmoveq %rdi, %rax
+; CHECK-NEXT: retq
+ %2 = icmp ult i64 %0, 1
+ %3 = select i1 %2, i64 %0, i64 0
+ ret i64 %3
+}
+
+define i8 @cfcmov8rr_inv(i8 %0) {
+; CHECK-LABEL: cfcmov8rr_inv:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cmpb $1, %dil
+; CHECK-NEXT: cfcmovnel %edi, %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
+; CHECK-NEXT: retq
+ %2 = icmp eq i8 %0, 1
+ %3 = select i1 %2, i8 0, i8 %0
+ ret i8 %3
+}
+
+define i16 @cfcmov16rr_inv(i16 %0) {
+; CHECK-LABEL: cfcmov16rr_inv:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cmpw $1, %di
+; CHECK-NEXT: cfcmovel %edi, %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
+; CHECK-NEXT: retq
+ %2 = icmp ne i16 %0, 1
+ %3 = select i1 %2, i16 0, i16 %0
+ ret i16 %3
+}
+
+define i32 @cfcmov32rr_inv(i32 %0) {
+; CHECK-LABEL: cfcmov32rr_inv:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cmpl $2, %edi
+; CHECK-NEXT: cfcmovbl %edi, %eax
+; CHECK-NEXT: retq
+ %2 = icmp ugt i32 %0, 1
+ %3 = select i1 %2, i32 0, i32 %0
+ ret i32 %3
+}
+
+define i64 @cfcmov64rr_inv(i64 %0) {
+; CHECK-LABEL: cfcmov64rr_inv:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cmpq $2, %rdi
+; CHECK-NEXT: cfcmovaeq %rdi, %rax
+; CHECK-NEXT: retq
+ %2 = icmp ule i64 %0, 1
+ %3 = select i1 %2, i64 0, i64 %0
+ ret i64 %3
+}
diff --git a/llvm/test/CodeGen/X86/apx/domain-reassignment.mir b/llvm/test/CodeGen/X86/apx/domain-reassignment.mir
new file mode 100644
index 0000000..7352aa2
--- /dev/null
+++ b/llvm/test/CodeGen/X86/apx/domain-reassignment.mir
@@ -0,0 +1,929 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -run-pass x86-domain-reassignment -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+ndd -o - %s | FileCheck %s
+--- |
+ ; ModuleID = '../test/CodeGen/X86/gpr-to-mask.ll'
+ source_filename = "../test/CodeGen/X86/gpr-to-mask.ll"
+ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+ target triple = "x86_64-unknown-unknown"
+
+ define void @test_fcmp_storefloat(i1 %cond, ptr %fptr, float %f1, float %f2, float %f3, float %f4, float %f5, float %f6) #0 {
+ entry:
+ br i1 %cond, label %if, label %else
+
+ if: ; preds = %entry
+ %cmp1 = fcmp oeq float %f3, %f4
+ br label %exit
+
+ else: ; preds = %entry
+ %cmp2 = fcmp oeq float %f5, %f6
+ br label %exit
+
+ exit: ; preds = %else, %if
+ %val = phi i1 [ %cmp1, %if ], [ %cmp2, %else ]
+ %selected = select i1 %val, float %f1, float %f2
+ store float %selected, ptr %fptr
+ ret void
+ }
+
+ define void @test_8bitops() #0 {
+ ret void
+ }
+ define void @test_16bitops() #0 {
+ ret void
+ }
+ define void @test_32bitops() #0 {
+ ret void
+ }
+ define void @test_64bitops() #0 {
+ ret void
+ }
+ define void @test_16bitext() #0 {
+ ret void
+ }
+ define void @test_32bitext() #0 {
+ ret void
+ }
+ define void @test_64bitext() #0 {
+ ret void
+ }
+ ; Note that this function need to be compiled with -global-isel
+ ; to obtain testable MIR
+ define void @test_unused(i64 %0) #0 {
+ %unused = lshr i64 %0, 7
+ ret void
+ }
+...
+---
+name: test_fcmp_storefloat
+alignment: 16
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr8, preferred-register: '' }
+ - { id: 1, class: gr8, preferred-register: '' }
+ - { id: 2, class: gr8, preferred-register: '' }
+ - { id: 3, class: gr32, preferred-register: '' }
+ - { id: 4, class: gr64, preferred-register: '' }
+ - { id: 5, class: vr128x, preferred-register: '' }
+ - { id: 6, class: fr32x, preferred-register: '' }
+ - { id: 7, class: fr32x, preferred-register: '' }
+ - { id: 8, class: fr32x, preferred-register: '' }
+ - { id: 9, class: fr32x, preferred-register: '' }
+ - { id: 10, class: fr32x, preferred-register: '' }
+ - { id: 11, class: gr8, preferred-register: '' }
+ - { id: 12, class: vk1, preferred-register: '' }
+ - { id: 13, class: gr32, preferred-register: '' }
+ - { id: 14, class: vk1, preferred-register: '' }
+ - { id: 15, class: gr32, preferred-register: '' }
+ - { id: 16, class: gr32, preferred-register: '' }
+ - { id: 17, class: gr32, preferred-register: '' }
+ - { id: 18, class: vk1wm, preferred-register: '' }
+ - { id: 19, class: vr128x, preferred-register: '' }
+ - { id: 20, class: vr128, preferred-register: '' }
+ - { id: 21, class: vr128, preferred-register: '' }
+ - { id: 22, class: fr32x, preferred-register: '' }
+liveins:
+ - { reg: '$edi', virtual-reg: '%3' }
+ - { reg: '$rsi', virtual-reg: '%4' }
+ - { reg: '$xmm0', virtual-reg: '%5' }
+ - { reg: '$xmm1', virtual-reg: '%6' }
+ - { reg: '$xmm2', virtual-reg: '%7' }
+ - { reg: '$xmm3', virtual-reg: '%8' }
+ - { reg: '$xmm4', virtual-reg: '%9' }
+ - { reg: '$xmm5', virtual-reg: '%10' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ ; CHECK-LABEL: name: test_fcmp_storefloat
+ ; CHECK: bb.0.entry:
+ ; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK: liveins: $edi, $rsi, $xmm0, $xmm1, $xmm2, $xmm3, $xmm4, $xmm5
+ ; CHECK: [[COPY:%[0-9]+]]:fr32x = COPY $xmm5
+ ; CHECK: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm4
+ ; CHECK: [[COPY2:%[0-9]+]]:fr32x = COPY $xmm3
+ ; CHECK: [[COPY3:%[0-9]+]]:fr32x = COPY $xmm2
+ ; CHECK: [[COPY4:%[0-9]+]]:fr32x = COPY $xmm1
+ ; CHECK: [[COPY5:%[0-9]+]]:vr128x = COPY $xmm0
+ ; CHECK: [[COPY6:%[0-9]+]]:gr64 = COPY $rsi
+ ; CHECK: [[COPY7:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY8:%[0-9]+]]:gr8 = COPY [[COPY7]].sub_8bit
+ ; CHECK: TEST8ri killed [[COPY8]], 1, implicit-def $eflags
+ ; CHECK: JCC_1 %bb.2, 4, implicit $eflags
+ ; CHECK: JMP_1 %bb.1
+ ; CHECK: bb.1.if:
+ ; CHECK: successors: %bb.3(0x80000000)
+ ; CHECK: [[VCMPSSZrri:%[0-9]+]]:vk1 = VCMPSSZrri [[COPY3]], [[COPY2]], 0
+ ; CHECK: [[COPY9:%[0-9]+]]:vk32 = COPY [[VCMPSSZrri]]
+ ; CHECK: [[COPY10:%[0-9]+]]:vk8 = COPY [[COPY9]]
+ ; CHECK: JMP_1 %bb.3
+ ; CHECK: bb.2.else:
+ ; CHECK: successors: %bb.3(0x80000000)
+ ; CHECK: [[VCMPSSZrri1:%[0-9]+]]:vk1 = VCMPSSZrri [[COPY1]], [[COPY]], 0
+ ; CHECK: [[COPY11:%[0-9]+]]:vk32 = COPY [[VCMPSSZrri1]]
+ ; CHECK: [[COPY12:%[0-9]+]]:vk8 = COPY [[COPY11]]
+ ; CHECK: bb.3.exit:
+ ; CHECK: [[PHI:%[0-9]+]]:vk8 = PHI [[COPY12]], %bb.2, [[COPY10]], %bb.1
+ ; CHECK: [[DEF:%[0-9]+]]:vk32 = IMPLICIT_DEF
+ ; CHECK: [[COPY13:%[0-9]+]]:vk32 = COPY [[PHI]]
+ ; CHECK: [[COPY14:%[0-9]+]]:vk1wm = COPY [[COPY13]]
+ ; CHECK: [[COPY15:%[0-9]+]]:vr128x = COPY [[COPY4]]
+ ; CHECK: [[DEF1:%[0-9]+]]:vr128 = IMPLICIT_DEF
+ ; CHECK: [[VMOVSSZrrk:%[0-9]+]]:vr128 = VMOVSSZrrk [[COPY15]], killed [[COPY14]], killed [[DEF1]], [[COPY5]]
+ ; CHECK: [[COPY16:%[0-9]+]]:fr32x = COPY [[VMOVSSZrrk]]
+ ; CHECK: VMOVSSZmr [[COPY6]], 1, $noreg, 0, $noreg, killed [[COPY16]] :: (store (s32) into %ir.fptr)
+ ; CHECK: RET 0
+ bb.0.entry:
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ liveins: $edi, $rsi, $xmm0, $xmm1, $xmm2, $xmm3, $xmm4, $xmm5
+
+ %10 = COPY $xmm5
+ %9 = COPY $xmm4
+ %8 = COPY $xmm3
+ %7 = COPY $xmm2
+ %6 = COPY $xmm1
+ %5 = COPY $xmm0
+ %4 = COPY $rsi
+ %3 = COPY $edi
+ %11 = COPY %3.sub_8bit
+ TEST8ri killed %11, 1, implicit-def $eflags
+ JCC_1 %bb.2, 4, implicit $eflags
+ JMP_1 %bb.1
+
+ bb.1.if:
+ successors: %bb.3(0x80000000)
+
+ %14 = VCMPSSZrri %7, %8, 0, implicit $mxcsr
+
+ ; check that cross domain copies are replaced with same domain copies.
+
+ %15 = COPY %14
+ %0 = COPY %15.sub_8bit
+ JMP_1 %bb.3
+
+ bb.2.else:
+ successors: %bb.3(0x80000000)
+ %12 = VCMPSSZrri %9, %10, 0, implicit $mxcsr
+
+ ; check that cross domain copies are replaced with same domain copies.
+
+ %13 = COPY %12
+ %1 = COPY %13.sub_8bit
+
+ bb.3.exit:
+
+ ; check PHI, IMPLICIT_DEF, and INSERT_SUBREG replacers.
+
+ %2 = PHI %1, %bb.2, %0, %bb.1
+ %17 = IMPLICIT_DEF
+ %16 = INSERT_SUBREG %17, %2, %subreg.sub_8bit_hi
+ %18 = COPY %16
+ %19 = COPY %6
+ %21 = IMPLICIT_DEF
+ %20 = VMOVSSZrrk %19, killed %18, killed %21, %5
+ %22 = COPY %20
+ VMOVSSZmr %4, 1, $noreg, 0, $noreg, killed %22 :: (store (s32) into %ir.fptr)
+ RET 0
+
+...
+---
+name: test_8bitops
+alignment: 16
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr64, preferred-register: '' }
+ - { id: 1, class: vr512, preferred-register: '' }
+ - { id: 2, class: vr512, preferred-register: '' }
+ - { id: 3, class: vr512, preferred-register: '' }
+ - { id: 4, class: vr512, preferred-register: '' }
+ - { id: 5, class: vk8, preferred-register: '' }
+ - { id: 6, class: gr32, preferred-register: '' }
+ - { id: 7, class: gr8, preferred-register: '' }
+ - { id: 8, class: gr32, preferred-register: '' }
+ - { id: 9, class: gr32, preferred-register: '' }
+ - { id: 10, class: vk8wm, preferred-register: '' }
+ - { id: 11, class: vr512, preferred-register: '' }
+ - { id: 12, class: gr8, preferred-register: '' }
+ - { id: 13, class: gr8, preferred-register: '' }
+ - { id: 14, class: gr8, preferred-register: '' }
+ - { id: 15, class: gr8, preferred-register: '' }
+ - { id: 16, class: gr8, preferred-register: '' }
+ - { id: 17, class: gr8, preferred-register: '' }
+ - { id: 18, class: gr8, preferred-register: '' }
+liveins:
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
+ - { reg: '$zmm2', virtual-reg: '%3' }
+ - { reg: '$zmm3', virtual-reg: '%4' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ ; CHECK-LABEL: name: test_8bitops
+ ; CHECK: bb.0:
+ ; CHECK: successors: %bb.1(0x80000000)
+ ; CHECK: liveins: $rdi, $zmm0, $zmm1, $zmm2, $zmm3
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+ ; CHECK: [[COPY3:%[0-9]+]]:vr512 = COPY $zmm2
+ ; CHECK: [[COPY4:%[0-9]+]]:vr512 = COPY $zmm3
+ ; CHECK: [[VCMPPDZrri:%[0-9]+]]:vk8 = VCMPPDZrri [[COPY3]], [[COPY4]], 0
+ ; CHECK: [[COPY5:%[0-9]+]]:vk32 = COPY [[VCMPPDZrri]]
+ ; CHECK: [[COPY6:%[0-9]+]]:vk8 = COPY [[COPY5]]
+ ; CHECK: [[KSHIFTRBri:%[0-9]+]]:vk8 = KSHIFTRBri [[COPY6]], 2
+ ; CHECK: [[KSHIFTLBri:%[0-9]+]]:vk8 = KSHIFTLBri [[KSHIFTRBri]], 1
+ ; CHECK: [[KNOTBrr:%[0-9]+]]:vk8 = KNOTBrr [[KSHIFTLBri]]
+ ; CHECK: [[KORBrr:%[0-9]+]]:vk8 = KORBrr [[KNOTBrr]], [[KSHIFTRBri]]
+ ; CHECK: [[KANDBrr:%[0-9]+]]:vk8 = KANDBrr [[KORBrr]], [[KSHIFTLBri]]
+ ; CHECK: [[KXORBrr:%[0-9]+]]:vk8 = KXORBrr [[KANDBrr]], [[KSHIFTRBri]]
+ ; CHECK: [[KADDBrr:%[0-9]+]]:vk8 = KADDBrr [[KXORBrr]], [[KNOTBrr]]
+ ; CHECK: [[DEF:%[0-9]+]]:vk32 = IMPLICIT_DEF
+ ; CHECK: [[COPY7:%[0-9]+]]:vk32 = COPY [[KADDBrr]]
+ ; CHECK: [[COPY8:%[0-9]+]]:vk8wm = COPY [[COPY7]]
+ ; CHECK: [[VMOVAPDZrrk:%[0-9]+]]:vr512 = VMOVAPDZrrk [[COPY2]], killed [[COPY8]], [[COPY1]]
+ ; CHECK: VMOVAPDZmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVAPDZrrk]]
+ ; CHECK: bb.1:
+ ; CHECK: successors: %bb.2(0x80000000)
+ ; CHECK: bb.2:
+ ; CHECK: RET 0
+ bb.0:
+ liveins: $rdi, $zmm0, $zmm1, $zmm2, $zmm3
+
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
+ %3 = COPY $zmm2
+ %4 = COPY $zmm3
+
+ %5 = VCMPPDZrri %3, %4, 0, implicit $mxcsr
+ %6 = COPY %5
+ %7 = COPY %6.sub_8bit
+
+ %12 = SHR8ri_ND %7, 2, implicit-def dead $eflags
+ %13 = SHL8ri_ND %12, 1, implicit-def dead $eflags
+ %14 = NOT8r_ND %13
+ %15 = OR8rr_ND %14, %12, implicit-def dead $eflags
+ %16 = AND8rr_ND %15, %13, implicit-def dead $eflags
+ %17 = XOR8rr_ND %16, %12, implicit-def dead $eflags
+ %18 = ADD8rr_ND %17, %14, implicit-def dead $eflags
+
+ %8 = IMPLICIT_DEF
+ %9 = INSERT_SUBREG %8, %18, %subreg.sub_8bit_hi
+ %10 = COPY %9
+ %11 = VMOVAPDZrrk %2, killed %10, %1
+ VMOVAPDZmr %0, 1, $noreg, 0, $noreg, killed %11
+
+ ; FIXME We can't replace TEST with KTEST due to flag differences
+ ; TEST8rr %18, %18, implicit-def $eflags
+ ; JCC_1 %bb.1, 4, implicit $eflags
+ ; JMP_1 %bb.2
+
+ bb.1:
+
+ bb.2:
+ RET 0
+
+...
+---
+name: test_16bitops
+alignment: 16
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr64, preferred-register: '' }
+ - { id: 1, class: vr512, preferred-register: '' }
+ - { id: 2, class: vr512, preferred-register: '' }
+ - { id: 3, class: vr512, preferred-register: '' }
+ - { id: 4, class: vr512, preferred-register: '' }
+ - { id: 5, class: vk16, preferred-register: '' }
+ - { id: 6, class: gr32, preferred-register: '' }
+ - { id: 7, class: gr16, preferred-register: '' }
+ - { id: 8, class: gr32, preferred-register: '' }
+ - { id: 9, class: gr32, preferred-register: '' }
+ - { id: 10, class: vk16wm, preferred-register: '' }
+ - { id: 11, class: vr512, preferred-register: '' }
+ - { id: 12, class: gr16, preferred-register: '' }
+ - { id: 13, class: gr16, preferred-register: '' }
+ - { id: 14, class: gr16, preferred-register: '' }
+ - { id: 15, class: gr16, preferred-register: '' }
+ - { id: 16, class: gr16, preferred-register: '' }
+ - { id: 17, class: gr16, preferred-register: '' }
+liveins:
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
+ - { reg: '$zmm2', virtual-reg: '%3' }
+ - { reg: '$zmm3', virtual-reg: '%4' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ ; CHECK-LABEL: name: test_16bitops
+ ; CHECK: bb.0:
+ ; CHECK: successors: %bb.1(0x80000000)
+ ; CHECK: liveins: $rdi, $zmm0, $zmm1, $zmm2, $zmm3
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+ ; CHECK: [[COPY3:%[0-9]+]]:vr512 = COPY $zmm2
+ ; CHECK: [[COPY4:%[0-9]+]]:vr512 = COPY $zmm3
+ ; CHECK: [[VCMPPSZrri:%[0-9]+]]:vk16 = VCMPPSZrri [[COPY3]], [[COPY4]], 0
+ ; CHECK: [[COPY5:%[0-9]+]]:vk32 = COPY [[VCMPPSZrri]]
+ ; CHECK: [[COPY6:%[0-9]+]]:vk16 = COPY [[COPY5]]
+ ; CHECK: [[KSHIFTRWri:%[0-9]+]]:vk16 = KSHIFTRWri [[COPY6]], 2
+ ; CHECK: [[KSHIFTLWri:%[0-9]+]]:vk16 = KSHIFTLWri [[KSHIFTRWri]], 1
+ ; CHECK: [[KNOTWrr:%[0-9]+]]:vk16 = KNOTWrr [[KSHIFTLWri]]
+ ; CHECK: [[KORWrr:%[0-9]+]]:vk16 = KORWrr [[KNOTWrr]], [[KSHIFTRWri]]
+ ; CHECK: [[KANDWrr:%[0-9]+]]:vk16 = KANDWrr [[KORWrr]], [[KSHIFTLWri]]
+ ; CHECK: [[KXORWrr:%[0-9]+]]:vk16 = KXORWrr [[KANDWrr]], [[KSHIFTRWri]]
+ ; CHECK: [[DEF:%[0-9]+]]:vk32 = IMPLICIT_DEF
+ ; CHECK: [[COPY7:%[0-9]+]]:vk32 = COPY [[KXORWrr]]
+ ; CHECK: [[COPY8:%[0-9]+]]:vk16wm = COPY [[COPY7]]
+ ; CHECK: [[VMOVAPSZrrk:%[0-9]+]]:vr512 = VMOVAPSZrrk [[COPY2]], killed [[COPY8]], [[COPY1]]
+ ; CHECK: VMOVAPSZmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVAPSZrrk]]
+ ; CHECK: bb.1:
+ ; CHECK: successors: %bb.2(0x80000000)
+ ; CHECK: bb.2:
+ ; CHECK: RET 0
+ bb.0:
+ liveins: $rdi, $zmm0, $zmm1, $zmm2, $zmm3
+
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
+ %3 = COPY $zmm2
+ %4 = COPY $zmm3
+
+ %5 = VCMPPSZrri %3, %4, 0, implicit $mxcsr
+ %6 = COPY %5
+ %7 = COPY %6.sub_16bit
+
+ %12 = SHR16ri_ND %7, 2, implicit-def dead $eflags
+ %13 = SHL16ri_ND %12, 1, implicit-def dead $eflags
+ %14 = NOT16r_ND %13
+ %15 = OR16rr_ND %14, %12, implicit-def dead $eflags
+ %16 = AND16rr_ND %15, %13, implicit-def dead $eflags
+ %17 = XOR16rr_ND %16, %12, implicit-def dead $eflags
+
+ %8 = IMPLICIT_DEF
+ %9 = INSERT_SUBREG %8, %17, %subreg.sub_16bit
+ %10 = COPY %9
+ %11 = VMOVAPSZrrk %2, killed %10, %1
+ VMOVAPSZmr %0, 1, $noreg, 0, $noreg, killed %11
+
+ ; FIXME We can't replace TEST with KTEST due to flag differences
+ ; FIXME TEST16rr %17, %17, implicit-def $eflags
+ ; FIXME JCC_1 %bb.1, 4, implicit $eflags
+ ; FIXME JMP_1 %bb.2
+
+ bb.1:
+
+ bb.2:
+ RET 0
+
+...
+---
+name: test_32bitops
+alignment: 16
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr64, preferred-register: '' }
+ - { id: 1, class: vr512, preferred-register: '' }
+ - { id: 2, class: vr512, preferred-register: '' }
+ - { id: 3, class: vk32wm, preferred-register: '' }
+ - { id: 4, class: vr512, preferred-register: '' }
+ - { id: 5, class: gr32, preferred-register: '' }
+ - { id: 6, class: gr32, preferred-register: '' }
+ - { id: 7, class: gr32, preferred-register: '' }
+ - { id: 8, class: gr32, preferred-register: '' }
+ - { id: 9, class: gr32, preferred-register: '' }
+ - { id: 10, class: gr32, preferred-register: '' }
+ - { id: 11, class: gr32, preferred-register: '' }
+ - { id: 12, class: gr32, preferred-register: '' }
+ - { id: 13, class: gr32, preferred-register: '' }
+liveins:
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ ; CHECK-LABEL: name: test_32bitops
+ ; CHECK: bb.0:
+ ; CHECK: successors: %bb.1(0x80000000)
+ ; CHECK: liveins: $rdi, $zmm0, $zmm1
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+ ; CHECK: [[KMOVDkm:%[0-9]+]]:vk32 = KMOVDkm [[COPY]], 1, $noreg, 0, $noreg
+ ; CHECK: [[KSHIFTRDri:%[0-9]+]]:vk32 = KSHIFTRDri [[KMOVDkm]], 2
+ ; CHECK: [[KSHIFTLDri:%[0-9]+]]:vk32 = KSHIFTLDri [[KSHIFTRDri]], 1
+ ; CHECK: [[KNOTDrr:%[0-9]+]]:vk32 = KNOTDrr [[KSHIFTLDri]]
+ ; CHECK: [[KORDrr:%[0-9]+]]:vk32 = KORDrr [[KNOTDrr]], [[KSHIFTRDri]]
+ ; CHECK: [[KANDDrr:%[0-9]+]]:vk32 = KANDDrr [[KORDrr]], [[KSHIFTLDri]]
+ ; CHECK: [[KXORDrr:%[0-9]+]]:vk32 = KXORDrr [[KANDDrr]], [[KSHIFTRDri]]
+ ; CHECK: [[KANDNDrr:%[0-9]+]]:vk32 = KANDNDrr [[KXORDrr]], [[KORDrr]]
+ ; CHECK: [[KADDDrr:%[0-9]+]]:vk32 = KADDDrr [[KANDNDrr]], [[KXORDrr]]
+ ; CHECK: [[COPY3:%[0-9]+]]:vk32wm = COPY [[KADDDrr]]
+ ; CHECK: [[VMOVDQU16Zrrk:%[0-9]+]]:vr512 = VMOVDQU16Zrrk [[COPY2]], killed [[COPY3]], [[COPY1]]
+ ; CHECK: VMOVDQA32Zmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVDQU16Zrrk]]
+ ; CHECK: bb.1:
+ ; CHECK: successors: %bb.2(0x80000000)
+ ; CHECK: bb.2:
+ ; CHECK: RET 0
+ bb.0:
+ liveins: $rdi, $zmm0, $zmm1
+
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
+
+ %5 = MOV32rm %0, 1, $noreg, 0, $noreg
+ %6 = SHR32ri_ND %5, 2, implicit-def dead $eflags
+ %7 = SHL32ri_ND %6, 1, implicit-def dead $eflags
+ %8 = NOT32r_ND %7
+ %9 = OR32rr_ND %8, %6, implicit-def dead $eflags
+ %10 = AND32rr_ND %9, %7, implicit-def dead $eflags
+ %11 = XOR32rr_ND %10, %6, implicit-def dead $eflags
+ %12 = ANDN32rr %11, %9, implicit-def dead $eflags
+ %13 = ADD32rr_ND %12, %11, implicit-def dead $eflags
+
+ %3 = COPY %13
+ %4 = VMOVDQU16Zrrk %2, killed %3, %1
+ VMOVDQA32Zmr %0, 1, $noreg, 0, $noreg, killed %4
+
+ ; FIXME We can't replace TEST with KTEST due to flag differences
+ ; FIXME TEST32rr %13, %13, implicit-def $eflags
+ ; FIXME JCC_1 %bb.1, 4, implicit $eflags
+ ; FIXME JMP_1 %bb.2
+
+ bb.1:
+
+ bb.2:
+ RET 0
+
+...
+---
+name: test_64bitops
+alignment: 16
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr64, preferred-register: '' }
+ - { id: 1, class: vr512, preferred-register: '' }
+ - { id: 2, class: vr512, preferred-register: '' }
+ - { id: 3, class: vk64wm, preferred-register: '' }
+ - { id: 4, class: vr512, preferred-register: '' }
+ - { id: 5, class: gr64, preferred-register: '' }
+ - { id: 6, class: gr64, preferred-register: '' }
+ - { id: 7, class: gr64, preferred-register: '' }
+ - { id: 8, class: gr64, preferred-register: '' }
+ - { id: 9, class: gr64, preferred-register: '' }
+ - { id: 10, class: gr64, preferred-register: '' }
+ - { id: 11, class: gr64, preferred-register: '' }
+ - { id: 12, class: gr64, preferred-register: '' }
+ - { id: 13, class: gr64, preferred-register: '' }
+liveins:
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ ; CHECK-LABEL: name: test_64bitops
+ ; CHECK: bb.0:
+ ; CHECK: successors: %bb.1(0x80000000)
+ ; CHECK: liveins: $rdi, $zmm0, $zmm1
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+ ; CHECK: [[KMOVQkm:%[0-9]+]]:vk64 = KMOVQkm [[COPY]], 1, $noreg, 0, $noreg
+ ; CHECK: [[KSHIFTRQri:%[0-9]+]]:vk64 = KSHIFTRQri [[KMOVQkm]], 2
+ ; CHECK: [[KSHIFTLQri:%[0-9]+]]:vk64 = KSHIFTLQri [[KSHIFTRQri]], 1
+ ; CHECK: [[KNOTQrr:%[0-9]+]]:vk64 = KNOTQrr [[KSHIFTLQri]]
+ ; CHECK: [[KORQrr:%[0-9]+]]:vk64 = KORQrr [[KNOTQrr]], [[KSHIFTRQri]]
+ ; CHECK: [[KANDQrr:%[0-9]+]]:vk64 = KANDQrr [[KORQrr]], [[KSHIFTLQri]]
+ ; CHECK: [[KXORQrr:%[0-9]+]]:vk64 = KXORQrr [[KANDQrr]], [[KSHIFTRQri]]
+ ; CHECK: [[KANDNQrr:%[0-9]+]]:vk64 = KANDNQrr [[KXORQrr]], [[KORQrr]]
+ ; CHECK: [[KADDQrr:%[0-9]+]]:vk64 = KADDQrr [[KANDNQrr]], [[KXORQrr]]
+ ; CHECK: [[COPY3:%[0-9]+]]:vk64wm = COPY [[KADDQrr]]
+ ; CHECK: [[VMOVDQU8Zrrk:%[0-9]+]]:vr512 = VMOVDQU8Zrrk [[COPY2]], killed [[COPY3]], [[COPY1]]
+ ; CHECK: VMOVDQA32Zmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVDQU8Zrrk]]
+ ; CHECK: bb.1:
+ ; CHECK: successors: %bb.2(0x80000000)
+ ; CHECK: bb.2:
+ ; CHECK: RET 0
+ bb.0:
+ liveins: $rdi, $zmm0, $zmm1
+
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
+
+ %5 = MOV64rm %0, 1, $noreg, 0, $noreg
+ %6 = SHR64ri_ND %5, 2, implicit-def dead $eflags
+ %7 = SHL64ri_ND %6, 1, implicit-def dead $eflags
+ %8 = NOT64r_ND %7
+ %9 = OR64rr_ND %8, %6, implicit-def dead $eflags
+ %10 = AND64rr_ND %9, %7, implicit-def dead $eflags
+ %11 = XOR64rr_ND %10, %6, implicit-def dead $eflags
+ %12 = ANDN64rr %11, %9, implicit-def dead $eflags
+ %13 = ADD64rr_ND %12, %11, implicit-def dead $eflags
+
+ %3 = COPY %13
+ %4 = VMOVDQU8Zrrk %2, killed %3, %1
+ VMOVDQA32Zmr %0, 1, $noreg, 0, $noreg, killed %4
+
+ ; FIXME We can't replace TEST with KTEST due to flag differences
+ ; FIXME TEST64rr %13, %13, implicit-def $eflags
+ ; FIXME JCC_1 %bb.1, 4, implicit $eflags
+ ; FIXME JMP_1 %bb.2
+
+ bb.1:
+
+ bb.2:
+ RET 0
+
+...
+---
+name: test_16bitext
+alignment: 16
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr64, preferred-register: '' }
+ - { id: 1, class: vr512, preferred-register: '' }
+ - { id: 2, class: vr512, preferred-register: '' }
+ - { id: 3, class: vk16wm, preferred-register: '' }
+ - { id: 4, class: vr512, preferred-register: '' }
+ - { id: 5, class: gr16, preferred-register: '' }
+ - { id: 6, class: gr16, preferred-register: '' }
+liveins:
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0:
+ liveins: $rdi, $zmm0, $zmm1
+
+ ; CHECK-LABEL: name: test_16bitext
+ ; CHECK: liveins: $rdi, $zmm0, $zmm1
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+ ; CHECK: [[KMOVBkm:%[0-9]+]]:vk8 = KMOVBkm [[COPY]], 1, $noreg, 0, $noreg
+ ; CHECK: [[COPY3:%[0-9]+]]:vk16 = COPY [[KMOVBkm]]
+ ; CHECK: [[KNOTWrr:%[0-9]+]]:vk16 = KNOTWrr [[COPY3]]
+ ; CHECK: [[COPY4:%[0-9]+]]:vk16wm = COPY [[KNOTWrr]]
+ ; CHECK: [[VMOVAPSZrrk:%[0-9]+]]:vr512 = VMOVAPSZrrk [[COPY2]], killed [[COPY4]], [[COPY1]]
+ ; CHECK: VMOVAPSZmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVAPSZrrk]]
+ ; CHECK: RET 0
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
+
+ %5 = MOVZX16rm8 %0, 1, $noreg, 0, $noreg
+ %6 = NOT16r_ND %5
+
+ %3 = COPY %6
+ %4 = VMOVAPSZrrk %2, killed %3, %1
+ VMOVAPSZmr %0, 1, $noreg, 0, $noreg, killed %4
+ RET 0
+
+...
+---
+name: test_32bitext
+alignment: 16
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr64, preferred-register: '' }
+ - { id: 1, class: vr512, preferred-register: '' }
+ - { id: 2, class: vr512, preferred-register: '' }
+ - { id: 3, class: vk64wm, preferred-register: '' }
+ - { id: 4, class: vr512, preferred-register: '' }
+ - { id: 5, class: gr32, preferred-register: '' }
+ - { id: 6, class: gr32, preferred-register: '' }
+ - { id: 7, class: gr32, preferred-register: '' }
+liveins:
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0:
+ liveins: $rdi, $zmm0, $zmm1
+
+ ; CHECK-LABEL: name: test_32bitext
+ ; CHECK: liveins: $rdi, $zmm0, $zmm1
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+ ; CHECK: [[KMOVBkm:%[0-9]+]]:vk8 = KMOVBkm [[COPY]], 1, $noreg, 0, $noreg
+ ; CHECK: [[COPY3:%[0-9]+]]:vk32 = COPY [[KMOVBkm]]
+ ; CHECK: [[KMOVWkm:%[0-9]+]]:vk16 = KMOVWkm [[COPY]], 1, $noreg, 0, $noreg
+ ; CHECK: [[COPY4:%[0-9]+]]:vk32 = COPY [[KMOVWkm]]
+ ; CHECK: [[KADDDrr:%[0-9]+]]:vk32 = KADDDrr [[COPY3]], [[COPY4]]
+ ; CHECK: [[COPY5:%[0-9]+]]:vk64wm = COPY [[KADDDrr]]
+ ; CHECK: [[VMOVDQU16Zrrk:%[0-9]+]]:vr512 = VMOVDQU16Zrrk [[COPY2]], killed [[COPY5]], [[COPY1]]
+ ; CHECK: VMOVDQA32Zmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVDQU16Zrrk]]
+ ; CHECK: RET 0
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
+
+ %5 = MOVZX32rm8 %0, 1, $noreg, 0, $noreg
+ %6 = MOVZX32rm16 %0, 1, $noreg, 0, $noreg
+ %7 = ADD32rr_ND %5, %6, implicit-def dead $eflags
+
+ %3 = COPY %7
+ %4 = VMOVDQU16Zrrk %2, killed %3, %1
+ VMOVDQA32Zmr %0, 1, $noreg, 0, $noreg, killed %4
+ RET 0
+
+...
+---
+name: test_64bitext
+alignment: 16
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr64, preferred-register: '' }
+ - { id: 1, class: vr512, preferred-register: '' }
+ - { id: 2, class: vr512, preferred-register: '' }
+ - { id: 3, class: vk64wm, preferred-register: '' }
+ - { id: 4, class: vr512, preferred-register: '' }
+ - { id: 5, class: gr64, preferred-register: '' }
+ - { id: 6, class: gr64, preferred-register: '' }
+ - { id: 7, class: gr64, preferred-register: '' }
+liveins:
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0:
+ liveins: $rdi, $zmm0, $zmm1
+
+ ; CHECK-LABEL: name: test_64bitext
+ ; CHECK: liveins: $rdi, $zmm0, $zmm1
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+ ; CHECK: [[KMOVBkm:%[0-9]+]]:vk8 = KMOVBkm [[COPY]], 1, $noreg, 0, $noreg
+ ; CHECK: [[COPY3:%[0-9]+]]:vk64 = COPY [[KMOVBkm]]
+ ; CHECK: [[KMOVWkm:%[0-9]+]]:vk16 = KMOVWkm [[COPY]], 1, $noreg, 0, $noreg
+ ; CHECK: [[COPY4:%[0-9]+]]:vk64 = COPY [[KMOVWkm]]
+ ; CHECK: [[KADDQrr:%[0-9]+]]:vk64 = KADDQrr [[COPY3]], [[COPY4]]
+ ; CHECK: [[COPY5:%[0-9]+]]:vk64wm = COPY [[KADDQrr]]
+ ; CHECK: [[VMOVDQU8Zrrk:%[0-9]+]]:vr512 = VMOVDQU8Zrrk [[COPY2]], killed [[COPY5]], [[COPY1]]
+ ; CHECK: VMOVDQA32Zmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVDQU8Zrrk]]
+ ; CHECK: RET 0
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
+
+ %5 = MOVZX64rm8 %0, 1, $noreg, 0, $noreg
+ %6 = MOVZX64rm16 %0, 1, $noreg, 0, $noreg
+ %7 = ADD64rr_ND %5, %6, implicit-def dead $eflags
+
+ %3 = COPY %7
+ %4 = VMOVDQU8Zrrk %2, killed %3, %1
+ VMOVDQA32Zmr %0, 1, $noreg, 0, $noreg, killed %4
+ RET 0
+
+...
+---
+name: test_unused
+alignment: 16
+exposesReturnsTwice: false
+legalized: true
+regBankSelected: true
+selected: true
+failedISel: false
+tracksRegLiveness: true
+hasWinCFI: false
+callsEHReturn: false
+callsUnwindInit: false
+hasEHCatchret: false
+hasEHScopes: false
+hasEHFunclets: false
+isOutlined: false
+debugInstrRef: false
+failsVerification: false
+tracksDebugUserValues: false
+registers:
+# Note that this test is supposed to have registers without classes
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+liveins:
+ - { reg: '$rdi', virtual-reg: '' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 1
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ functionContext: ''
+ maxCallFrameSize: 4294967295
+ cvBytesOfCalleeSavedRegisters: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ hasTailCall: false
+ localFrameSize: 0
+ savePoint: ''
+ restorePoint: ''
+fixedStack: []
+stack: []
+entry_values: []
+callSites: []
+debugValueSubstitutions: []
+constants: []
+machineFunctionInfo: {}
+body: |
+ bb.1 (%ir-block.1):
+ liveins: $rdi
+
+ RET 0
+
+...
diff --git a/llvm/test/CodeGen/X86/apx/flags-copy-lowering.mir b/llvm/test/CodeGen/X86/apx/flags-copy-lowering.mir
index d6a9cda..e81a448 100644
--- a/llvm/test/CodeGen/X86/apx/flags-copy-lowering.mir
+++ b/llvm/test/CodeGen/X86/apx/flags-copy-lowering.mir
@@ -29,6 +29,18 @@
call void @foo()
ret void
}
+
+ define void @test_cmov(i64 %a, i64 %b) {
+ entry:
+ call void @foo()
+ ret void
+ }
+
+ define void @test_cfcmov(i64 %a, i64 %b) {
+ entry:
+ call void @foo()
+ ret void
+ }
...
---
name: test_adc
@@ -166,3 +178,93 @@ body: |
RET 0
...
+---
+name: test_cmov
+# CHECK-LABEL: name: test_cmov
+liveins:
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$rsi', virtual-reg: '%1' }
+body: |
+ bb.0:
+ liveins: $rdi, $rsi
+
+ %0:gr64 = COPY $rdi
+ %1:gr64 = COPY $rsi
+ CMP64rr %0, %1, implicit-def $eflags
+ %2:gr64 = COPY $eflags
+ ; CHECK-NOT: COPY{{( killed)?}} $eflags
+ ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags
+ ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags
+ ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETCCr 4, implicit $eflags
+ ; CHECK-NOT: COPY{{( killed)?}} $eflags
+
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ CALL64pcrel32 @foo, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def $eax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+
+ $eflags = COPY %2
+ %3:gr64 = CMOV64rr_ND %0, %1, 7, implicit $eflags
+ %4:gr64 = CMOV64rr_ND %0, %1, 2, implicit $eflags
+ %5:gr64 = CMOV64rr_ND %0, %1, 4, implicit $eflags
+ %6:gr64 = CMOV64rr_ND %0, %1, 5, implicit killed $eflags
+ ; CHECK-NOT: $eflags =
+ ; CHECK: TEST8rr %[[A_REG]], %[[A_REG]], implicit-def $eflags
+ ; CHECK-NEXT: %3:gr64 = CMOV64rr_ND %0, %1, 5, implicit killed $eflags
+ ; CHECK-NEXT: TEST8rr %[[B_REG]], %[[B_REG]], implicit-def $eflags
+ ; CHECK-NEXT: %4:gr64 = CMOV64rr_ND %0, %1, 5, implicit killed $eflags
+ ; CHECK-NEXT: TEST8rr %[[E_REG]], %[[E_REG]], implicit-def $eflags
+ ; CHECK-NEXT: %5:gr64 = CMOV64rr_ND %0, %1, 5, implicit killed $eflags
+ ; CHECK-NEXT: TEST8rr %[[E_REG]], %[[E_REG]], implicit-def $eflags
+ ; CHECK-NEXT: %6:gr64 = CMOV64rr_ND %0, %1, 4, implicit killed $eflags
+ MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %3
+ MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %4
+ MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %5
+ MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %6
+
+ RET 0
+...
+---
+name: test_cfcmov
+# CHECK-LABEL: name: test_cfcmov
+liveins:
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$rsi', virtual-reg: '%1' }
+body: |
+ bb.0:
+ liveins: $rdi, $rsi
+
+ %0:gr64 = COPY $rdi
+ %1:gr64 = COPY $rsi
+ CMP64rr %0, %1, implicit-def $eflags
+ %2:gr64 = COPY $eflags
+ ; CHECK-NOT: COPY{{( killed)?}} $eflags
+ ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags
+ ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags
+ ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETCCr 4, implicit $eflags
+ ; CHECK-NOT: COPY{{( killed)?}} $eflags
+
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ CALL64pcrel32 @foo, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def $eax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+
+ $eflags = COPY %2
+ %3:gr64 = CFCMOV64rr %1, 7, implicit $eflags
+ %4:gr64 = CFCMOV64rr %1, 2, implicit $eflags
+ %5:gr64 = CFCMOV64rr_ND %0, %1, 4, implicit $eflags
+ %6:gr64 = CFCMOV64rr_ND %0, %1, 5, implicit killed $eflags
+ ; CHECK-NOT: $eflags =
+ ; CHECK: TEST8rr %[[A_REG]], %[[A_REG]], implicit-def $eflags
+ ; CHECK-NEXT: %3:gr64 = CFCMOV64rr %1, 5, implicit killed $eflags
+ ; CHECK-NEXT: TEST8rr %[[B_REG]], %[[B_REG]], implicit-def $eflags
+ ; CHECK-NEXT: %4:gr64 = CFCMOV64rr %1, 5, implicit killed $eflags
+ ; CHECK-NEXT: TEST8rr %[[E_REG]], %[[E_REG]], implicit-def $eflags
+ ; CHECK-NEXT: %5:gr64 = CFCMOV64rr_ND %0, %1, 5, implicit killed $eflags
+ ; CHECK-NEXT: TEST8rr %[[E_REG]], %[[E_REG]], implicit-def $eflags
+ ; CHECK-NEXT: %6:gr64 = CFCMOV64rr_ND %0, %1, 4, implicit killed $eflags
+ MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %3
+ MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %4
+ MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %5
+ MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %6
+
+ RET 0
+...
diff --git a/llvm/test/CodeGen/X86/apx/foldimmediate.mir b/llvm/test/CodeGen/X86/apx/foldimmediate.mir
new file mode 100644
index 0000000..310fc64
--- /dev/null
+++ b/llvm/test/CodeGen/X86/apx/foldimmediate.mir
@@ -0,0 +1,70 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
+# RUN: llc -mtriple=x86_64-- -run-pass=peephole-opt %s -o - | FileCheck %s
+--- |
+ define void @foldImmediate() { ret void }
+...
+---
+# Check that immediates can be folded into ALU instructions.
+name: foldImmediate
+registers:
+ - { id: 0, class: gr32 }
+ - { id: 1, class: gr32 }
+ - { id: 2, class: gr32 }
+ - { id: 3, class: gr32 }
+ - { id: 4, class: gr32 }
+ - { id: 5, class: gr32 }
+ - { id: 6, class: gr32 }
+ - { id: 7, class: gr64 }
+ - { id: 8, class: gr64 }
+ - { id: 9, class: gr64 }
+ - { id: 10, class: gr64 }
+ - { id: 11, class: gr64 }
+ - { id: 12, class: gr64 }
+ - { id: 13, class: gr64 }
+ - { id: 14, class: gr64 }
+ - { id: 15, class: gr64 }
+ - { id: 16, class: gr32 }
+ - { id: 17, class: gr64 }
+ - { id: 18, class: gr32 }
+
+body: |
+ bb.0:
+ liveins: $rdi, $rsi
+
+ ; CHECK-LABEL: name: foldImmediate
+ ; CHECK: liveins: $rdi, $rsi
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 81
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK-NEXT: CTEST32ri [[COPY]], 81, 2, 10, implicit-def $eflags, implicit $eflags
+ ; CHECK-NEXT: NOOP implicit $eflags
+ ; CHECK-NEXT: CCMP32ri [[COPY]], 81, 2, 10, implicit-def $eflags, implicit $eflags
+ ; CHECK-NEXT: NOOP implicit $eflags
+ ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, killed [[MOV32ri]], %subreg.sub_32bit
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; CHECK-NEXT: CTEST64ri32 [[COPY1]], 81, 2, 10, implicit-def $eflags, implicit $eflags
+ ; CHECK-NEXT: NOOP implicit $eflags
+ ; CHECK-NEXT: CCMP64ri32 [[COPY1]], 81, 2, 10, implicit-def $eflags, implicit $eflags
+ ; CHECK-NEXT: NOOP implicit $eflags
+ ; CHECK-NEXT: CCMP64rr [[SUBREG_TO_REG]], [[COPY1]], 2, 10, implicit-def $eflags, implicit $eflags
+ ; CHECK-NEXT: NOOP implicit $eflags
+ %0 = MOV32ri 81
+ %1 = COPY $edi
+
+ CTEST32rr %0, %1, 2, 10, implicit-def $eflags, implicit $eflags
+ NOOP implicit $eflags
+
+ CCMP32rr %1, %0, 2, 10, implicit-def $eflags, implicit $eflags
+ NOOP implicit $eflags
+
+ %7 = SUBREG_TO_REG 0, killed %0:gr32, %subreg.sub_32bit
+ %8 = COPY $rsi
+
+ CTEST64rr %8, %7, 2, 10, implicit-def $eflags, implicit $eflags
+ NOOP implicit $eflags
+
+ CCMP64rr %8, %7, 2, 10, implicit-def $eflags, implicit $eflags
+ NOOP implicit $eflags
+ CCMP64rr %7, %8, 2, 10, implicit-def $eflags, implicit $eflags
+ NOOP implicit $eflags
+...
diff --git a/llvm/test/CodeGen/X86/apx/inc.ll b/llvm/test/CodeGen/X86/apx/inc.ll
index 613f786..a9c6d74 100644
--- a/llvm/test/CodeGen/X86/apx/inc.ll
+++ b/llvm/test/CodeGen/X86/apx/inc.ll
@@ -92,9 +92,9 @@ define i8 @uinc8r(i8 noundef %a) {
; CHECK-LABEL: uinc8r:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: incb %dil, %al
-; CHECK-NEXT: movzbl %al, %ecx
-; CHECK-NEXT: movl $255, %eax
-; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: movl $255, %ecx
+; CHECK-NEXT: cmovel %ecx, %eax
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
entry:
@@ -105,9 +105,9 @@ entry:
define i16 @uinc16r(i16 noundef %a) {
; CHECK-LABEL: uinc16r:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: incw %di, %cx
-; CHECK-NEXT: movl $65535, %eax # imm = 0xFFFF
-; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: incw %di, %ax
+; CHECK-NEXT: movl $65535, %ecx # imm = 0xFFFF
+; CHECK-NEXT: cmovel %ecx, %eax
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
entry:
@@ -118,9 +118,9 @@ entry:
define i32 @uinc32r(i32 noundef %a) {
; CHECK-LABEL: uinc32r:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: incl %edi, %ecx
-; CHECK-NEXT: movl $-1, %eax
-; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: incl %edi, %eax
+; CHECK-NEXT: movl $-1, %ecx
+; CHECK-NEXT: cmovel %ecx, %eax
; CHECK-NEXT: retq
entry:
%inc = call i32 @llvm.uadd.sat.i32(i32 %a, i32 1)
@@ -130,9 +130,9 @@ entry:
define i64 @uinc64r(i64 noundef %a) {
; CHECK-LABEL: uinc64r:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: incq %rdi, %rcx
-; CHECK-NEXT: movq $-1, %rax
-; CHECK-NEXT: cmovneq %rcx, %rax
+; CHECK-NEXT: incq %rdi, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: cmoveq %rcx, %rax
; CHECK-NEXT: retq
entry:
%inc = call i64 @llvm.uadd.sat.i64(i64 %a, i64 1)
diff --git a/llvm/test/CodeGen/X86/apx/shift-eflags.ll b/llvm/test/CodeGen/X86/apx/shift-eflags.ll
index f34dc6c..932cdc1 100644
--- a/llvm/test/CodeGen/X86/apx/shift-eflags.ll
+++ b/llvm/test/CodeGen/X86/apx/shift-eflags.ll
@@ -7,9 +7,8 @@
define i32 @ashr_const(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: ashr_const:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edx, %eax
-; CHECK-NEXT: sarl $14, %edi, %edx
-; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: sarl $14, %edi, %eax
+; CHECK-NEXT: cmovel %edx, %ecx, %eax
; CHECK-NEXT: retq
%s = ashr i32 %a0, 14
%c = icmp eq i32 %s, 0
@@ -21,9 +20,8 @@ define i32 @ashr_const(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
define i32 @lshr_const(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: lshr_const:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edx, %eax
; CHECK-NEXT: testl $-16384, %edi # imm = 0xC000
-; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: cmovel %edx, %ecx, %eax
; CHECK-NEXT: retq
%s = lshr i32 %a0, 14
%c = icmp eq i32 %s, 0
@@ -35,9 +33,8 @@ define i32 @lshr_const(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
define i32 @shl_const(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: shl_const:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edx, %eax
; CHECK-NEXT: testl $262143, %edi # imm = 0x3FFFF
-; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: cmovel %edx, %ecx, %eax
; CHECK-NEXT: retq
%s = shl i32 %a0, 14
%c = icmp eq i32 %s, 0
@@ -88,9 +85,8 @@ define i32 @shl_const_self_select(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
define i32 @ashr_const1(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: ashr_const1:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edx, %eax
-; CHECK-NEXT: sarl %edi, %edx
-; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: sarl %edi, %eax
+; CHECK-NEXT: cmovel %edx, %ecx, %eax
; CHECK-NEXT: retq
%s = ashr i32 %a0, 1
%c = icmp eq i32 %s, 0
@@ -102,9 +98,8 @@ define i32 @ashr_const1(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
define i32 @lshr_const1(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: lshr_const1:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edx, %eax
; CHECK-NEXT: testl $-2, %edi
-; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: cmovel %edx, %ecx, %eax
; CHECK-NEXT: retq
%s = lshr i32 %a0, 1
%c = icmp eq i32 %s, 0
@@ -116,9 +111,8 @@ define i32 @lshr_const1(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
define i32 @shl_const1(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: shl_const1:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edx, %eax
; CHECK-NEXT: testl $2147483647, %edi # imm = 0x7FFFFFFF
-; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: cmovel %edx, %ecx, %eax
; CHECK-NEXT: retq
%s = shl i32 %a0, 1
%c = icmp eq i32 %s, 0
diff --git a/llvm/test/CodeGen/X86/apx/sub.ll b/llvm/test/CodeGen/X86/apx/sub.ll
index 4b0bd14..be0914c 100644
--- a/llvm/test/CodeGen/X86/apx/sub.ll
+++ b/llvm/test/CodeGen/X86/apx/sub.ll
@@ -299,10 +299,10 @@ declare i64 @llvm.usub.sat.i64(i64, i64)
define i8 @subflag8rr(i8 noundef %a, i8 noundef %b) {
; CHECK-LABEL: subflag8rr:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subb %sil, %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x28,0xf7]
-; CHECK-NEXT: movzbl %al, %eax # encoding: [0x0f,0xb6,0xc0]
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subb %sil, %dil, %cl # encoding: [0x62,0xf4,0x74,0x18,0x28,0xf7]
+; CHECK-NEXT: movzbl %cl, %ecx # encoding: [0x0f,0xb6,0xc9]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -313,9 +313,9 @@ entry:
define i16 @subflag16rr(i16 noundef %a, i16 noundef %b) {
; CHECK-LABEL: subflag16rr:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subw %si, %di, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x29,0xf7]
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subw %si, %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x29,0xf7]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -326,9 +326,9 @@ entry:
define i32 @subflag32rr(i32 noundef %a, i32 noundef %b) {
; CHECK-LABEL: subflag32rr:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subl %esi, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x29,0xf7]
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subl %esi, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x29,0xf7]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%sub = call i32 @llvm.usub.sat.i32(i32 %a, i32 %b)
@@ -340,7 +340,7 @@ define i64 @subflag64rr(i64 noundef %a, i64 noundef %b) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: subq %rsi, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x29,0xf7]
-; CHECK-NEXT: cmovaeq %rcx, %rax # encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: cmovaeq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%sub = call i64 @llvm.usub.sat.i64(i64 %a, i64 %b)
@@ -350,10 +350,10 @@ entry:
define i8 @subflag8rm(i8 noundef %a, ptr %b) {
; CHECK-LABEL: subflag8rm:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subb (%rsi), %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x2a,0x3e]
-; CHECK-NEXT: movzbl %al, %eax # encoding: [0x0f,0xb6,0xc0]
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subb (%rsi), %dil, %cl # encoding: [0x62,0xf4,0x74,0x18,0x2a,0x3e]
+; CHECK-NEXT: movzbl %cl, %ecx # encoding: [0x0f,0xb6,0xc9]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -365,9 +365,9 @@ entry:
define i16 @subflag16rm(i16 noundef %a, ptr %b) {
; CHECK-LABEL: subflag16rm:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subw (%rsi), %di, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x2b,0x3e]
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subw (%rsi), %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x2b,0x3e]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -379,9 +379,9 @@ entry:
define i32 @subflag32rm(i32 noundef %a, ptr %b) {
; CHECK-LABEL: subflag32rm:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subl (%rsi), %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x2b,0x3e]
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subl (%rsi), %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x2b,0x3e]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%t = load i32, ptr %b
@@ -394,7 +394,7 @@ define i64 @subflag64rm(i64 noundef %a, ptr %b) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: subq (%rsi), %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x2b,0x3e]
-; CHECK-NEXT: cmovaeq %rcx, %rax # encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: cmovaeq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%t = load i64, ptr %b
@@ -405,9 +405,9 @@ entry:
define i16 @subflag16ri8(i16 noundef %a) {
; CHECK-LABEL: subflag16ri8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subw $123, %di, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x83,0xef,0x7b]
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subw $123, %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x83,0xef,0x7b]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -418,9 +418,9 @@ entry:
define i32 @subflag32ri8(i32 noundef %a) {
; CHECK-LABEL: subflag32ri8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subl $123, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x83,0xef,0x7b]
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subl $123, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x83,0xef,0x7b]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%sub = call i32 @llvm.usub.sat.i32(i32 %a, i32 123)
@@ -432,7 +432,7 @@ define i64 @subflag64ri8(i64 noundef %a) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: subq $123, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x83,0xef,0x7b]
-; CHECK-NEXT: cmovaeq %rcx, %rax # encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: cmovaeq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%sub = call i64 @llvm.usub.sat.i64(i64 %a, i64 123)
@@ -442,10 +442,10 @@ entry:
define i8 @subflag8ri(i8 noundef %a) {
; CHECK-LABEL: subflag8ri:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subb $123, %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x80,0xef,0x7b]
-; CHECK-NEXT: movzbl %al, %eax # encoding: [0x0f,0xb6,0xc0]
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subb $123, %dil, %cl # encoding: [0x62,0xf4,0x74,0x18,0x80,0xef,0x7b]
+; CHECK-NEXT: movzbl %cl, %ecx # encoding: [0x0f,0xb6,0xc9]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -456,10 +456,10 @@ entry:
define i16 @subflag16ri(i16 noundef %a) {
; CHECK-LABEL: subflag16ri:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subw $1234, %di, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x81,0xef,0xd2,0x04]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subw $1234, %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x81,0xef,0xd2,0x04]
; CHECK-NEXT: # imm = 0x4D2
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -470,10 +470,10 @@ entry:
define i32 @subflag32ri(i32 noundef %a) {
; CHECK-LABEL: subflag32ri:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subl $123456, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x81,0xef,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subl $123456, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x81,0xef,0x40,0xe2,0x01,0x00]
; CHECK-NEXT: # imm = 0x1E240
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%sub = call i32 @llvm.usub.sat.i32(i32 %a, i32 123456)
@@ -486,7 +486,7 @@ define i64 @subflag64ri(i64 noundef %a) {
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: subq $123456, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x81,0xef,0x40,0xe2,0x01,0x00]
; CHECK-NEXT: # imm = 0x1E240
-; CHECK-NEXT: cmovaeq %rcx, %rax # encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: cmovaeq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%sub = call i64 @llvm.usub.sat.i64(i64 %a, i64 123456)
diff --git a/llvm/test/CodeGen/X86/asm-dialect-module.ll b/llvm/test/CodeGen/X86/asm-dialect-module.ll
new file mode 100644
index 0000000..2c00a44
--- /dev/null
+++ b/llvm/test/CodeGen/X86/asm-dialect-module.ll
@@ -0,0 +1,10 @@
+;; Test that we respect the assembler dialect when parsing module-level inline asm.
+; RUN: not llc < %s -mtriple=x86_64 2>&1 | FileCheck %s --check-prefix=ERR
+; RUN: llc < %s -mtriple=x86_64 -x86-asm-syntax=intel | FileCheck %s
+
+; ERR: <inline asm>:1:1: error: unknown use of instruction mnemonic without a size suffix
+
+; CHECK: .intel_syntax noprefix
+; CHECK: mov eax, eax
+
+module asm "mov eax, eax"
diff --git a/llvm/test/CodeGen/X86/avgceils.ll b/llvm/test/CodeGen/X86/avgceils.ll
new file mode 100644
index 0000000..4529ea2
--- /dev/null
+++ b/llvm/test/CodeGen/X86/avgceils.ll
@@ -0,0 +1,3821 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE4
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512
+
+;
+; 128-bit vectors
+;
+
+define <16 x i8> @test_fixed_v16i8(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE-LABEL: test_fixed_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psubb %xmm0, %xmm2
+; SSE-NEXT: paddb %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpbroadcastb {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsubb %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX512-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
+; AVX512-NEXT: vpsubb %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %or = or <16 x i8> %a0, %a1
+ %xor = xor <16 x i8> %a0, %a1
+ %shift = ashr <16 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = sub <16 x i8> %or, %shift
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_ext_v16i8(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE2-LABEL: test_ext_v16i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: psraw $8, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
+; SSE2-NEXT: psraw $8, %xmm4
+; SSE2-NEXT: paddw %xmm2, %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: psraw $8, %xmm0
+; SSE2-NEXT: paddw %xmm3, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: psubw %xmm1, %xmm4
+; SSE2-NEXT: psubw %xmm1, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE2-NEXT: pand %xmm1, %xmm4
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: packuswb %xmm4, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i8:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm3
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm0
+; SSE4-NEXT: paddw %xmm2, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm1
+; SSE4-NEXT: paddw %xmm3, %xmm1
+; SSE4-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE4-NEXT: psubw %xmm2, %xmm0
+; SSE4-NEXT: psubw %xmm2, %xmm1
+; SSE4-NEXT: psrlw $1, %xmm1
+; SSE4-NEXT: psrlw $1, %xmm0
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE4-NEXT: pand %xmm2, %xmm0
+; SSE4-NEXT: pand %xmm2, %xmm1
+; SSE4-NEXT: packuswb %xmm1, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1
+; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubw %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX512-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX512-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovwb %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = sext <16 x i8> %a0 to <16 x i16>
+ %x1 = sext <16 x i8> %a1 to <16 x i16>
+ %sum = add <16 x i16> %x0, %x1
+ %inc = add <16 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shift = ashr <16 x i16> %inc, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <16 x i16> %shift to <16 x i8>
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @test_fixed_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: test_fixed_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm0, %xmm1
+; SSE-NEXT: psraw $1, %xmm1
+; SSE-NEXT: psubw %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX-NEXT: vpsubw %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %or = or <8 x i16> %a0, %a1
+ %xor = xor <8 x i16> %a1, %a0
+ %shift = ashr <8 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = sub <8 x i16> %or, %shift
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_ext_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE2-LABEL: test_ext_v8i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: paddd %xmm2, %xmm0
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: paddd %xmm3, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE2-NEXT: psubd %xmm2, %xmm0
+; SSE2-NEXT: psubd %xmm2, %xmm1
+; SSE2-NEXT: pslld $15, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: pslld $15, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i16:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm3
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm0
+; SSE4-NEXT: paddd %xmm2, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm1
+; SSE4-NEXT: paddd %xmm3, %xmm1
+; SSE4-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE4-NEXT: psubd %xmm2, %xmm0
+; SSE4-NEXT: psubd %xmm2, %xmm1
+; SSE4-NEXT: psrld $1, %xmm1
+; SSE4-NEXT: psrld $1, %xmm0
+; SSE4-NEXT: pxor %xmm2, %xmm2
+; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
+; SSE4-NEXT: packusdw %xmm1, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm3
+; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
+; AVX1-NEXT: vpackusdw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX512-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = sext <8 x i16> %a0 to <8 x i32>
+ %x1 = sext <8 x i16> %a1 to <8 x i32>
+ %sum = add <8 x i32> %x0, %x1
+ %inc = add <8 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %shift = ashr <8 x i32> %inc, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <8 x i32> %shift to <8 x i16>
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @test_fixed_v4i32(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: test_fixed_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm0, %xmm1
+; SSE-NEXT: psrad $1, %xmm1
+; SSE-NEXT: psubd %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX-NEXT: vpsubd %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %or = or <4 x i32> %a0, %a1
+ %xor = xor <4 x i32> %a1, %a0
+ %shift = ashr <4 x i32> %xor, <i32 1, i32 1, i32 1, i32 1>
+ %res = sub <4 x i32> %or, %shift
+ ret <4 x i32> %res
+}
+
+define <4 x i32> @test_ext_v4i32(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE2-LABEL: test_ext_v4i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm5
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: paddq %xmm2, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-NEXT: paddq %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: psubq %xmm1, %xmm4
+; SSE2-NEXT: psubq %xmm1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v4i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm2, %xmm2
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm3
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm4
+; SSE4-NEXT: paddq %xmm2, %xmm4
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm0
+; SSE4-NEXT: paddq %xmm3, %xmm0
+; SSE4-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE4-NEXT: psubq %xmm1, %xmm4
+; SSE4-NEXT: psubq %xmm1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v4i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX512-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovqd %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = sext <4 x i32> %a0 to <4 x i64>
+ %x1 = sext <4 x i32> %a1 to <4 x i64>
+ %sum = add <4 x i64> %x0, %x1
+ %inc = add <4 x i64> %sum, <i64 1, i64 1, i64 1, i64 1>
+ %shift = ashr <4 x i64> %inc, <i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <4 x i64> %shift to <4 x i32>
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @test_fixed_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE2-LABEL: test_fixed_v2i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: psubq %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_fixed_v2i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm0, %xmm2
+; SSE4-NEXT: por %xmm1, %xmm2
+; SSE4-NEXT: pxor %xmm0, %xmm1
+; SSE4-NEXT: movdqa %xmm1, %xmm0
+; SSE4-NEXT: psrad $1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
+; SSE4-NEXT: psubq %xmm1, %xmm2
+; SSE4-NEXT: movdqa %xmm2, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v2i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v2i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpsrad $1, %xmm0, %xmm1
+; AVX2-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; AVX2-NEXT: vpsubq %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v2i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX512-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vpsraq $1, %xmm0, %xmm0
+; AVX512-NEXT: vpsubq %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: retq
+ %or = or <2 x i64> %a0, %a1
+ %xor = xor <2 x i64> %a1, %a0
+ %shift = ashr <2 x i64> %xor, <i64 1, i64 1>
+ %res = sub <2 x i64> %or, %shift
+ ret <2 x i64> %res
+}
+
+define <2 x i64> @test_ext_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE2-LABEL: test_ext_v2i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rdx
+; SSE2-NEXT: movq %rdx, %rsi
+; SSE2-NEXT: sarq $63, %rsi
+; SSE2-NEXT: movq %xmm1, %rdi
+; SSE2-NEXT: movq %rdi, %r8
+; SSE2-NEXT: sarq $63, %r8
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r9
+; SSE2-NEXT: movq %r9, %r10
+; SSE2-NEXT: sarq $63, %r10
+; SSE2-NEXT: addq %r9, %rdx
+; SSE2-NEXT: adcq %rsi, %r10
+; SSE2-NEXT: addq %rdi, %rax
+; SSE2-NEXT: adcq %rcx, %r8
+; SSE2-NEXT: addq $1, %rax
+; SSE2-NEXT: adcq $0, %r8
+; SSE2-NEXT: addq $1, %rdx
+; SSE2-NEXT: adcq $0, %r10
+; SSE2-NEXT: shldq $63, %rdx, %r10
+; SSE2-NEXT: shldq $63, %rax, %r8
+; SSE2-NEXT: movq %r8, %xmm0
+; SSE2-NEXT: movq %r10, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v2i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pextrq $1, %xmm0, %rax
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %xmm0, %rdx
+; SSE4-NEXT: movq %rdx, %rsi
+; SSE4-NEXT: sarq $63, %rsi
+; SSE4-NEXT: pextrq $1, %xmm1, %rdi
+; SSE4-NEXT: movq %rdi, %r8
+; SSE4-NEXT: sarq $63, %r8
+; SSE4-NEXT: movq %xmm1, %r9
+; SSE4-NEXT: movq %r9, %r10
+; SSE4-NEXT: sarq $63, %r10
+; SSE4-NEXT: addq %r9, %rdx
+; SSE4-NEXT: adcq %rsi, %r10
+; SSE4-NEXT: addq %rdi, %rax
+; SSE4-NEXT: adcq %rcx, %r8
+; SSE4-NEXT: addq $1, %rax
+; SSE4-NEXT: adcq $0, %r8
+; SSE4-NEXT: addq $1, %rdx
+; SSE4-NEXT: adcq $0, %r10
+; SSE4-NEXT: shldq $63, %rdx, %r10
+; SSE4-NEXT: shldq $63, %rax, %r8
+; SSE4-NEXT: movq %r8, %xmm1
+; SSE4-NEXT: movq %r10, %xmm0
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE4-NEXT: retq
+;
+; AVX-LABEL: test_ext_v2i64:
+; AVX: # %bb.0:
+; AVX-NEXT: vpextrq $1, %xmm0, %rax
+; AVX-NEXT: movq %rax, %rcx
+; AVX-NEXT: sarq $63, %rcx
+; AVX-NEXT: vmovq %xmm0, %rdx
+; AVX-NEXT: movq %rdx, %rsi
+; AVX-NEXT: sarq $63, %rsi
+; AVX-NEXT: vpextrq $1, %xmm1, %rdi
+; AVX-NEXT: movq %rdi, %r8
+; AVX-NEXT: sarq $63, %r8
+; AVX-NEXT: vmovq %xmm1, %r9
+; AVX-NEXT: movq %r9, %r10
+; AVX-NEXT: sarq $63, %r10
+; AVX-NEXT: addq %r9, %rdx
+; AVX-NEXT: adcq %rsi, %r10
+; AVX-NEXT: addq %rdi, %rax
+; AVX-NEXT: adcq %rcx, %r8
+; AVX-NEXT: addq $1, %rax
+; AVX-NEXT: adcq $0, %r8
+; AVX-NEXT: addq $1, %rdx
+; AVX-NEXT: adcq $0, %r10
+; AVX-NEXT: shldq $63, %rdx, %r10
+; AVX-NEXT: shldq $63, %rax, %r8
+; AVX-NEXT: vmovq %r8, %xmm0
+; AVX-NEXT: vmovq %r10, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: retq
+ %x0 = sext <2 x i64> %a0 to <2 x i128>
+ %x1 = sext <2 x i64> %a1 to <2 x i128>
+ %sum = add <2 x i128> %x0, %x1
+ %inc = add <2 x i128> %sum, <i128 1, i128 1>
+ %shift = ashr <2 x i128> %inc, <i128 1, i128 1>
+ %res = trunc <2 x i128> %shift to <2 x i64>
+ ret <2 x i64> %res
+}
+
+;
+; 256-bit vectors
+;
+
+define <32 x i8> @test_fixed_v32i8(<32 x i8> %a0, <32 x i8> %a1) {
+; SSE-LABEL: test_fixed_v32i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: por %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: por %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psubb %xmm1, %xmm4
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm0
+; SSE-NEXT: psubb %xmm0, %xmm5
+; SSE-NEXT: paddb %xmm3, %xmm5
+; SSE-NEXT: paddb %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpsubb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddb %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpaddb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubb %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v32i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} ymm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX512-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm0
+; AVX512-NEXT: vpsubb %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %or = or <32 x i8> %a0, %a1
+ %xor = xor <32 x i8> %a0, %a1
+ %shift = ashr <32 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = sub <32 x i8> %or, %shift
+ ret <32 x i8> %res
+}
+
+define <32 x i8> @test_ext_v32i8(<32 x i8> %a0, <32 x i8> %a1) {
+; SSE2-LABEL: test_ext_v32i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; SSE2-NEXT: psraw $8, %xmm5
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; SSE2-NEXT: psraw $8, %xmm6
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm1[8],xmm7[9],xmm1[9],xmm7[10],xmm1[10],xmm7[11],xmm1[11],xmm7[12],xmm1[12],xmm7[13],xmm1[13],xmm7[14],xmm1[14],xmm7[15],xmm1[15]
+; SSE2-NEXT: psraw $8, %xmm7
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3],xmm8[4],xmm1[4],xmm8[5],xmm1[5],xmm8[6],xmm1[6],xmm8[7],xmm1[7]
+; SSE2-NEXT: psraw $8, %xmm8
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
+; SSE2-NEXT: psraw $8, %xmm4
+; SSE2-NEXT: paddw %xmm5, %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: psraw $8, %xmm0
+; SSE2-NEXT: paddw %xmm6, %xmm0
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: paddw %xmm7, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-NEXT: psraw $8, %xmm1
+; SSE2-NEXT: paddw %xmm8, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE2-NEXT: psubw %xmm3, %xmm4
+; SSE2-NEXT: psubw %xmm3, %xmm0
+; SSE2-NEXT: psubw %xmm3, %xmm2
+; SSE2-NEXT: psubw %xmm3, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm2
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE2-NEXT: pand %xmm3, %xmm4
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: packuswb %xmm4, %xmm0
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: packuswb %xmm2, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v32i8:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm4
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm5
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm6
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm7
+; SSE4-NEXT: pmovsxbw %xmm2, %xmm0
+; SSE4-NEXT: paddw %xmm4, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm2
+; SSE4-NEXT: paddw %xmm5, %xmm2
+; SSE4-NEXT: pmovsxbw %xmm3, %xmm1
+; SSE4-NEXT: paddw %xmm6, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm3, %xmm3
+; SSE4-NEXT: paddw %xmm7, %xmm3
+; SSE4-NEXT: pcmpeqd %xmm4, %xmm4
+; SSE4-NEXT: psubw %xmm4, %xmm0
+; SSE4-NEXT: psubw %xmm4, %xmm2
+; SSE4-NEXT: psubw %xmm4, %xmm1
+; SSE4-NEXT: psubw %xmm4, %xmm3
+; SSE4-NEXT: psrlw $1, %xmm3
+; SSE4-NEXT: psrlw $1, %xmm1
+; SSE4-NEXT: psrlw $1, %xmm2
+; SSE4-NEXT: psrlw $1, %xmm0
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; SSE4-NEXT: pand %xmm4, %xmm0
+; SSE4-NEXT: pand %xmm4, %xmm2
+; SSE4-NEXT: packuswb %xmm2, %xmm0
+; SSE4-NEXT: pand %xmm4, %xmm1
+; SSE4-NEXT: pand %xmm4, %xmm3
+; SSE4-NEXT: packuswb %xmm3, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpmovsxbw %xmm5, %xmm6
+; AVX1-NEXT: vpaddw %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm5, %xmm5
+; AVX1-NEXT: vpaddw %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm5
+; AVX1-NEXT: vpaddw %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1
+; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubw %xmm1, %xmm3, %xmm3
+; AVX1-NEXT: vpsubw %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubw %xmm1, %xmm4, %xmm4
+; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpmovsxbw %xmm3, %ymm3
+; AVX2-NEXT: vpaddw %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubw %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm2, %ymm1
+; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v32i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxbw %ymm0, %zmm0
+; AVX512-NEXT: vpmovsxbw %ymm1, %zmm1
+; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpsubw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = sext <32 x i8> %a0 to <32 x i16>
+ %x1 = sext <32 x i8> %a1 to <32 x i16>
+ %sum = add <32 x i16> %x0, %x1
+ %inc = add <32 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shift = ashr <32 x i16> %inc, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <32 x i16> %shift to <32 x i8>
+ ret <32 x i8> %res
+}
+
+define <16 x i16> @test_fixed_v16i16(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE-LABEL: test_fixed_v16i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: por %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: por %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm0, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm3
+; SSE-NEXT: psraw $1, %xmm3
+; SSE-NEXT: psubw %xmm3, %xmm4
+; SSE-NEXT: psraw $1, %xmm2
+; SSE-NEXT: psubw %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpsubw %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpsubw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubw %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpsubw %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %or = or <16 x i16> %a0, %a1
+ %xor = xor <16 x i16> %a1, %a0
+ %shift = ashr <16 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = sub <16 x i16> %or, %shift
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_ext_v16i16(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE2-LABEL: test_ext_v16i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; SSE2-NEXT: psrad $16, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
+; SSE2-NEXT: psrad $16, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; SSE2-NEXT: psrad $16, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: paddd %xmm4, %xmm1
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: paddd %xmm5, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: paddd %xmm6, %xmm0
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: paddd %xmm7, %xmm2
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
+; SSE2-NEXT: psubd %xmm4, %xmm1
+; SSE2-NEXT: psubd %xmm4, %xmm3
+; SSE2-NEXT: psubd %xmm4, %xmm0
+; SSE2-NEXT: psubd %xmm4, %xmm2
+; SSE2-NEXT: pslld $15, %xmm2
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: pslld $15, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm2, %xmm0
+; SSE2-NEXT: pslld $15, %xmm3
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: pslld $15, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: packssdw %xmm3, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i16:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm4
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm5
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm6
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm7
+; SSE4-NEXT: pmovsxwd %xmm2, %xmm0
+; SSE4-NEXT: paddd %xmm4, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm2
+; SSE4-NEXT: paddd %xmm5, %xmm2
+; SSE4-NEXT: pmovsxwd %xmm3, %xmm1
+; SSE4-NEXT: paddd %xmm6, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm3, %xmm3
+; SSE4-NEXT: paddd %xmm7, %xmm3
+; SSE4-NEXT: pcmpeqd %xmm4, %xmm4
+; SSE4-NEXT: psubd %xmm4, %xmm0
+; SSE4-NEXT: psubd %xmm4, %xmm2
+; SSE4-NEXT: psubd %xmm4, %xmm1
+; SSE4-NEXT: psubd %xmm4, %xmm3
+; SSE4-NEXT: psrld $1, %xmm3
+; SSE4-NEXT: psrld $1, %xmm1
+; SSE4-NEXT: psrld $1, %xmm2
+; SSE4-NEXT: psrld $1, %xmm0
+; SSE4-NEXT: pxor %xmm4, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4],xmm4[5],xmm2[6],xmm4[7]
+; SSE4-NEXT: packusdw %xmm2, %xmm0
+; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4],xmm4[5],xmm3[6],xmm4[7]
+; SSE4-NEXT: packusdw %xmm3, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpmovsxwd %xmm5, %xmm6
+; AVX1-NEXT: vpaddd %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm5, %xmm5
+; AVX1-NEXT: vpaddd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm5
+; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm3
+; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubd %xmm1, %xmm4, %xmm4
+; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm4, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4],xmm4[5],xmm3[6],xmm4[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4],xmm4[5],xmm2[6],xmm4[7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
+; AVX1-NEXT: vpackusdw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2
+; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3
+; AVX2-NEXT: vpaddd %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubd %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm2, %ymm1
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7],ymm1[8],ymm2[9],ymm1[10],ymm2[11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
+; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpsubd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = sext <16 x i16> %a0 to <16 x i32>
+ %x1 = sext <16 x i16> %a1 to <16 x i32>
+ %sum = add <16 x i32> %x0, %x1
+ %inc = add <16 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %shift = ashr <16 x i32> %inc, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <16 x i32> %shift to <16 x i16>
+ ret <16 x i16> %res
+}
+
+define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE-LABEL: test_fixed_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: por %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: por %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm0, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm3
+; SSE-NEXT: psrad $1, %xmm3
+; SSE-NEXT: psubd %xmm3, %xmm4
+; SSE-NEXT: psrad $1, %xmm2
+; SSE-NEXT: psubd %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpsubd %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsrad $1, %ymm0, %ymm0
+; AVX512-NEXT: vpsubd %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %or = or <8 x i32> %a0, %a1
+ %xor = xor <8 x i32> %a1, %a0
+ %shift = ashr <8 x i32> %xor, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = sub <8 x i32> %or, %shift
+ ret <8 x i32> %res
+}
+
+define <8 x i32> @test_ext_v8i32(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE2-LABEL: test_ext_v8i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm0[2,3,2,3]
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm1[2,3,2,3]
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm7, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[2,3,2,3]
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1]
+; SSE2-NEXT: paddq %xmm6, %xmm4
+; SSE2-NEXT: pxor %xmm6, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm6
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
+; SSE2-NEXT: paddq %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
+; SSE2-NEXT: pxor %xmm6, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm6
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
+; SSE2-NEXT: paddq %xmm7, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; SSE2-NEXT: paddq %xmm3, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE2-NEXT: psubq %xmm3, %xmm4
+; SSE2-NEXT: psubq %xmm3, %xmm0
+; SSE2-NEXT: psubq %xmm3, %xmm2
+; SSE2-NEXT: psubq %xmm3, %xmm1
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm4, %xmm5
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm6
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm7
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm8
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm4
+; SSE4-NEXT: paddq %xmm5, %xmm4
+; SSE4-NEXT: pmovsxdq %xmm2, %xmm0
+; SSE4-NEXT: paddq %xmm6, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm2
+; SSE4-NEXT: paddq %xmm7, %xmm2
+; SSE4-NEXT: pmovsxdq %xmm3, %xmm1
+; SSE4-NEXT: paddq %xmm8, %xmm1
+; SSE4-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE4-NEXT: psubq %xmm3, %xmm4
+; SSE4-NEXT: psubq %xmm3, %xmm0
+; SSE4-NEXT: psubq %xmm3, %xmm2
+; SSE4-NEXT: psubq %xmm3, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpmovsxdq %xmm3, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm5
+; AVX1-NEXT: vpaddq %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpmovsxdq %xmm5, %xmm6
+; AVX1-NEXT: vpaddq %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpsubq %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm2[0,2],ymm0[0,2],ymm2[4,6],ymm0[4,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxdq %xmm0, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxdq %xmm1, %ymm3
+; AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm0[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
+; AVX512-NEXT: vpmovsxdq %ymm1, %zmm1
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpsubq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = sext <8 x i32> %a0 to <8 x i64>
+ %x1 = sext <8 x i32> %a1 to <8 x i64>
+ %sum = add <8 x i64> %x0, %x1
+ %inc = add <8 x i64> %sum, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %shift = ashr <8 x i64> %inc, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <8 x i64> %shift to <8 x i32>
+ ret <8 x i32> %res
+}
+
+define <4 x i64> @test_fixed_v4i64(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE2-LABEL: test_fixed_v4i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: por %xmm3, %xmm4
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: por %xmm2, %xmm5
+; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm1, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: psubq %xmm1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: psubq %xmm1, %xmm5
+; SSE2-NEXT: movdqa %xmm5, %xmm0
+; SSE2-NEXT: movdqa %xmm4, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_fixed_v4i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm1, %xmm4
+; SSE4-NEXT: por %xmm3, %xmm4
+; SSE4-NEXT: movdqa %xmm0, %xmm5
+; SSE4-NEXT: por %xmm2, %xmm5
+; SSE4-NEXT: pxor %xmm0, %xmm2
+; SSE4-NEXT: pxor %xmm1, %xmm3
+; SSE4-NEXT: movdqa %xmm3, %xmm0
+; SSE4-NEXT: psrad $1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm3
+; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3],xmm3[4,5],xmm0[6,7]
+; SSE4-NEXT: psubq %xmm3, %xmm4
+; SSE4-NEXT: movdqa %xmm2, %xmm0
+; SSE4-NEXT: psrad $1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
+; SSE4-NEXT: psubq %xmm2, %xmm5
+; SSE4-NEXT: movdqa %xmm5, %xmm0
+; SSE4-NEXT: movdqa %xmm4, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v4i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3],xmm3[4,5],xmm1[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpsubq %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v4i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX2-NEXT: vpsubq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v4i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsraq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpsubq %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %or = or <4 x i64> %a0, %a1
+ %xor = xor <4 x i64> %a1, %a0
+ %shift = ashr <4 x i64> %xor, <i64 1, i64 1, i64 1, i64 1>
+ %res = sub <4 x i64> %or, %shift
+ ret <4 x i64> %res
+}
+
+define <4 x i64> @test_ext_v4i64(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE2-LABEL: test_ext_v4i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: pushq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: pushq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: pushq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 56
+; SSE2-NEXT: .cfi_offset %rbx, -56
+; SSE2-NEXT: .cfi_offset %r12, -48
+; SSE2-NEXT: .cfi_offset %r13, -40
+; SSE2-NEXT: .cfi_offset %r14, -32
+; SSE2-NEXT: .cfi_offset %r15, -24
+; SSE2-NEXT: .cfi_offset %rbp, -16
+; SSE2-NEXT: movq %xmm0, %r11
+; SSE2-NEXT: movq %r11, %r12
+; SSE2-NEXT: sarq $63, %r12
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rcx
+; SSE2-NEXT: movq %rcx, %rbx
+; SSE2-NEXT: sarq $63, %rbx
+; SSE2-NEXT: movq %xmm1, %rdx
+; SSE2-NEXT: movq %rdx, %r14
+; SSE2-NEXT: sarq $63, %r14
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r9
+; SSE2-NEXT: movq %r9, %r15
+; SSE2-NEXT: sarq $63, %r15
+; SSE2-NEXT: movq %xmm2, %rsi
+; SSE2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %rsi
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r13
+; SSE2-NEXT: movq %r13, %r8
+; SSE2-NEXT: sarq $63, %r8
+; SSE2-NEXT: movq %xmm3, %rbp
+; SSE2-NEXT: movq %rbp, %rdi
+; SSE2-NEXT: sarq $63, %rdi
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: movq %rax, %r10
+; SSE2-NEXT: sarq $63, %r10
+; SSE2-NEXT: addq %rax, %r9
+; SSE2-NEXT: adcq %r15, %r10
+; SSE2-NEXT: addq %rbp, %rdx
+; SSE2-NEXT: adcq %r14, %rdi
+; SSE2-NEXT: addq %r13, %rcx
+; SSE2-NEXT: adcq %rbx, %r8
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; SSE2-NEXT: adcq %r12, %rsi
+; SSE2-NEXT: addq $1, %r11
+; SSE2-NEXT: adcq $0, %rsi
+; SSE2-NEXT: addq $1, %rcx
+; SSE2-NEXT: adcq $0, %r8
+; SSE2-NEXT: addq $1, %rdx
+; SSE2-NEXT: adcq $0, %rdi
+; SSE2-NEXT: addq $1, %r9
+; SSE2-NEXT: adcq $0, %r10
+; SSE2-NEXT: shldq $63, %r9, %r10
+; SSE2-NEXT: shldq $63, %rdx, %rdi
+; SSE2-NEXT: shldq $63, %rcx, %r8
+; SSE2-NEXT: shldq $63, %r11, %rsi
+; SSE2-NEXT: movq %rsi, %xmm0
+; SSE2-NEXT: movq %r8, %xmm2
+; SSE2-NEXT: movq %rdi, %xmm1
+; SSE2-NEXT: movq %r10, %xmm3
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: popq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: popq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: popq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 8
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v4i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pushq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: pushq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: pushq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: pushq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: pushq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: pushq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 56
+; SSE4-NEXT: .cfi_offset %rbx, -56
+; SSE4-NEXT: .cfi_offset %r12, -48
+; SSE4-NEXT: .cfi_offset %r13, -40
+; SSE4-NEXT: .cfi_offset %r14, -32
+; SSE4-NEXT: .cfi_offset %r15, -24
+; SSE4-NEXT: .cfi_offset %rbp, -16
+; SSE4-NEXT: pextrq $1, %xmm0, %r11
+; SSE4-NEXT: movq %r11, %r12
+; SSE4-NEXT: sarq $63, %r12
+; SSE4-NEXT: movq %xmm0, %rcx
+; SSE4-NEXT: movq %rcx, %rbx
+; SSE4-NEXT: sarq $63, %rbx
+; SSE4-NEXT: pextrq $1, %xmm1, %rdx
+; SSE4-NEXT: movq %rdx, %r14
+; SSE4-NEXT: sarq $63, %r14
+; SSE4-NEXT: movq %xmm1, %r9
+; SSE4-NEXT: movq %r9, %r15
+; SSE4-NEXT: sarq $63, %r15
+; SSE4-NEXT: pextrq $1, %xmm2, %rsi
+; SSE4-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %rsi
+; SSE4-NEXT: movq %xmm2, %r13
+; SSE4-NEXT: movq %r13, %r8
+; SSE4-NEXT: sarq $63, %r8
+; SSE4-NEXT: pextrq $1, %xmm3, %rbp
+; SSE4-NEXT: movq %rbp, %rdi
+; SSE4-NEXT: sarq $63, %rdi
+; SSE4-NEXT: movq %xmm3, %rax
+; SSE4-NEXT: movq %rax, %r10
+; SSE4-NEXT: sarq $63, %r10
+; SSE4-NEXT: addq %rax, %r9
+; SSE4-NEXT: adcq %r15, %r10
+; SSE4-NEXT: addq %rbp, %rdx
+; SSE4-NEXT: adcq %r14, %rdi
+; SSE4-NEXT: addq %r13, %rcx
+; SSE4-NEXT: adcq %rbx, %r8
+; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; SSE4-NEXT: adcq %r12, %rsi
+; SSE4-NEXT: addq $1, %r11
+; SSE4-NEXT: adcq $0, %rsi
+; SSE4-NEXT: addq $1, %rcx
+; SSE4-NEXT: adcq $0, %r8
+; SSE4-NEXT: addq $1, %rdx
+; SSE4-NEXT: adcq $0, %rdi
+; SSE4-NEXT: addq $1, %r9
+; SSE4-NEXT: adcq $0, %r10
+; SSE4-NEXT: shldq $63, %r9, %r10
+; SSE4-NEXT: shldq $63, %rdx, %rdi
+; SSE4-NEXT: shldq $63, %rcx, %r8
+; SSE4-NEXT: shldq $63, %r11, %rsi
+; SSE4-NEXT: movq %rsi, %xmm2
+; SSE4-NEXT: movq %r8, %xmm0
+; SSE4-NEXT: movq %rdi, %xmm3
+; SSE4-NEXT: movq %r10, %xmm1
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; SSE4-NEXT: popq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: popq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: popq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: popq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: popq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: popq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 8
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v4i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: pushq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: pushq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 56
+; AVX1-NEXT: .cfi_offset %rbx, -56
+; AVX1-NEXT: .cfi_offset %r12, -48
+; AVX1-NEXT: .cfi_offset %r13, -40
+; AVX1-NEXT: .cfi_offset %r14, -32
+; AVX1-NEXT: .cfi_offset %r15, -24
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpextrq $1, %xmm2, %r11
+; AVX1-NEXT: movq %r11, %r12
+; AVX1-NEXT: sarq $63, %r12
+; AVX1-NEXT: vmovq %xmm2, %rcx
+; AVX1-NEXT: movq %rcx, %rbx
+; AVX1-NEXT: sarq $63, %rbx
+; AVX1-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX1-NEXT: movq %rdx, %r14
+; AVX1-NEXT: sarq $63, %r14
+; AVX1-NEXT: vmovq %xmm0, %r8
+; AVX1-NEXT: movq %r8, %r15
+; AVX1-NEXT: sarq $63, %r15
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
+; AVX1-NEXT: vpextrq $1, %xmm0, %rsi
+; AVX1-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %rsi
+; AVX1-NEXT: vmovq %xmm0, %r13
+; AVX1-NEXT: movq %r13, %rdi
+; AVX1-NEXT: sarq $63, %rdi
+; AVX1-NEXT: vpextrq $1, %xmm1, %rbp
+; AVX1-NEXT: movq %rbp, %r9
+; AVX1-NEXT: sarq $63, %r9
+; AVX1-NEXT: vmovq %xmm1, %rax
+; AVX1-NEXT: movq %rax, %r10
+; AVX1-NEXT: sarq $63, %r10
+; AVX1-NEXT: addq %rax, %r8
+; AVX1-NEXT: adcq %r15, %r10
+; AVX1-NEXT: addq %rbp, %rdx
+; AVX1-NEXT: adcq %r14, %r9
+; AVX1-NEXT: addq %r13, %rcx
+; AVX1-NEXT: adcq %rbx, %rdi
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX1-NEXT: adcq %r12, %rsi
+; AVX1-NEXT: addq $1, %r11
+; AVX1-NEXT: adcq $0, %rsi
+; AVX1-NEXT: addq $1, %rcx
+; AVX1-NEXT: adcq $0, %rdi
+; AVX1-NEXT: addq $1, %rdx
+; AVX1-NEXT: adcq $0, %r9
+; AVX1-NEXT: addq $1, %r8
+; AVX1-NEXT: adcq $0, %r10
+; AVX1-NEXT: shldq $63, %r8, %r10
+; AVX1-NEXT: shldq $63, %rdx, %r9
+; AVX1-NEXT: shldq $63, %rcx, %rdi
+; AVX1-NEXT: shldq $63, %r11, %rsi
+; AVX1-NEXT: vmovq %rsi, %xmm0
+; AVX1-NEXT: vmovq %rdi, %xmm1
+; AVX1-NEXT: vmovq %r9, %xmm2
+; AVX1-NEXT: vmovq %r10, %xmm3
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: popq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 8
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v4i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 56
+; AVX2-NEXT: .cfi_offset %rbx, -56
+; AVX2-NEXT: .cfi_offset %r12, -48
+; AVX2-NEXT: .cfi_offset %r13, -40
+; AVX2-NEXT: .cfi_offset %r14, -32
+; AVX2-NEXT: .cfi_offset %r15, -24
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpextrq $1, %xmm2, %r11
+; AVX2-NEXT: movq %r11, %r12
+; AVX2-NEXT: sarq $63, %r12
+; AVX2-NEXT: vmovq %xmm2, %rcx
+; AVX2-NEXT: movq %rcx, %rbx
+; AVX2-NEXT: sarq $63, %rbx
+; AVX2-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX2-NEXT: movq %rdx, %r14
+; AVX2-NEXT: sarq $63, %r14
+; AVX2-NEXT: vmovq %xmm0, %r8
+; AVX2-NEXT: movq %r8, %r15
+; AVX2-NEXT: sarq $63, %r15
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm0
+; AVX2-NEXT: vpextrq $1, %xmm0, %rsi
+; AVX2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %rsi
+; AVX2-NEXT: vmovq %xmm0, %r13
+; AVX2-NEXT: movq %r13, %rdi
+; AVX2-NEXT: sarq $63, %rdi
+; AVX2-NEXT: vpextrq $1, %xmm1, %rbp
+; AVX2-NEXT: movq %rbp, %r9
+; AVX2-NEXT: sarq $63, %r9
+; AVX2-NEXT: vmovq %xmm1, %rax
+; AVX2-NEXT: movq %rax, %r10
+; AVX2-NEXT: sarq $63, %r10
+; AVX2-NEXT: addq %rax, %r8
+; AVX2-NEXT: adcq %r15, %r10
+; AVX2-NEXT: addq %rbp, %rdx
+; AVX2-NEXT: adcq %r14, %r9
+; AVX2-NEXT: addq %r13, %rcx
+; AVX2-NEXT: adcq %rbx, %rdi
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX2-NEXT: adcq %r12, %rsi
+; AVX2-NEXT: addq $1, %r11
+; AVX2-NEXT: adcq $0, %rsi
+; AVX2-NEXT: addq $1, %rcx
+; AVX2-NEXT: adcq $0, %rdi
+; AVX2-NEXT: addq $1, %rdx
+; AVX2-NEXT: adcq $0, %r9
+; AVX2-NEXT: addq $1, %r8
+; AVX2-NEXT: adcq $0, %r10
+; AVX2-NEXT: shldq $63, %r8, %r10
+; AVX2-NEXT: shldq $63, %rdx, %r9
+; AVX2-NEXT: shldq $63, %rcx, %rdi
+; AVX2-NEXT: shldq $63, %r11, %rsi
+; AVX2-NEXT: vmovq %rsi, %xmm0
+; AVX2-NEXT: vmovq %rdi, %xmm1
+; AVX2-NEXT: vmovq %r9, %xmm2
+; AVX2-NEXT: vmovq %r10, %xmm3
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 8
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v4i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 56
+; AVX512-NEXT: .cfi_offset %rbx, -56
+; AVX512-NEXT: .cfi_offset %r12, -48
+; AVX512-NEXT: .cfi_offset %r13, -40
+; AVX512-NEXT: .cfi_offset %r14, -32
+; AVX512-NEXT: .cfi_offset %r15, -24
+; AVX512-NEXT: .cfi_offset %rbp, -16
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512-NEXT: vpextrq $1, %xmm2, %r11
+; AVX512-NEXT: movq %r11, %r12
+; AVX512-NEXT: sarq $63, %r12
+; AVX512-NEXT: vmovq %xmm2, %rcx
+; AVX512-NEXT: movq %rcx, %rbx
+; AVX512-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX512-NEXT: sarq $63, %rbx
+; AVX512-NEXT: movq %rdx, %r14
+; AVX512-NEXT: sarq $63, %r14
+; AVX512-NEXT: vmovq %xmm0, %rdi
+; AVX512-NEXT: movq %rdi, %r15
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm0
+; AVX512-NEXT: vpextrq $1, %xmm0, %rsi
+; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r15
+; AVX512-NEXT: sarq $63, %rsi
+; AVX512-NEXT: vmovq %xmm0, %r13
+; AVX512-NEXT: movq %r13, %r8
+; AVX512-NEXT: sarq $63, %r8
+; AVX512-NEXT: vpextrq $1, %xmm1, %rbp
+; AVX512-NEXT: movq %rbp, %r9
+; AVX512-NEXT: sarq $63, %r9
+; AVX512-NEXT: vmovq %xmm1, %rax
+; AVX512-NEXT: movq %rax, %r10
+; AVX512-NEXT: sarq $63, %r10
+; AVX512-NEXT: addq %rax, %rdi
+; AVX512-NEXT: adcq %r15, %r10
+; AVX512-NEXT: addq %rbp, %rdx
+; AVX512-NEXT: adcq %r14, %r9
+; AVX512-NEXT: addq %r13, %rcx
+; AVX512-NEXT: adcq %rbx, %r8
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX512-NEXT: adcq %r12, %rsi
+; AVX512-NEXT: addq $1, %r11
+; AVX512-NEXT: adcq $0, %rsi
+; AVX512-NEXT: addq $1, %rcx
+; AVX512-NEXT: adcq $0, %r8
+; AVX512-NEXT: addq $1, %rdx
+; AVX512-NEXT: adcq $0, %r9
+; AVX512-NEXT: addq $1, %rdi
+; AVX512-NEXT: adcq $0, %r10
+; AVX512-NEXT: shldq $63, %rdi, %r10
+; AVX512-NEXT: shldq $63, %rdx, %r9
+; AVX512-NEXT: shldq $63, %rcx, %r8
+; AVX512-NEXT: shldq $63, %r11, %rsi
+; AVX512-NEXT: vmovq %rsi, %xmm0
+; AVX512-NEXT: vmovq %r8, %xmm1
+; AVX512-NEXT: vmovq %r9, %xmm2
+; AVX512-NEXT: vmovq %r10, %xmm3
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 8
+; AVX512-NEXT: retq
+ %x0 = sext <4 x i64> %a0 to <4 x i128>
+ %x1 = sext <4 x i64> %a1 to <4 x i128>
+ %sum = add <4 x i128> %x0, %x1
+ %inc = add <4 x i128> %sum, <i128 1, i128 1, i128 1, i128 1>
+ %shift = ashr <4 x i128> %inc, <i128 1, i128 1, i128 1, i128 1>
+ %res = trunc <4 x i128> %shift to <4 x i64>
+ ret <4 x i64> %res
+}
+
+;
+; 512-bit vectors
+;
+
+define <64 x i8> @test_fixed_v64i8(<64 x i8> %a0, <64 x i8> %a1) {
+; SSE-LABEL: test_fixed_v64i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm11
+; SSE-NEXT: movdqa %xmm2, %xmm8
+; SSE-NEXT: movdqa %xmm1, %xmm9
+; SSE-NEXT: movdqa %xmm0, %xmm10
+; SSE-NEXT: por %xmm7, %xmm3
+; SSE-NEXT: por %xmm6, %xmm2
+; SSE-NEXT: por %xmm5, %xmm1
+; SSE-NEXT: por %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm4, %xmm10
+; SSE-NEXT: pxor %xmm5, %xmm9
+; SSE-NEXT: pxor %xmm6, %xmm8
+; SSE-NEXT: pxor %xmm7, %xmm11
+; SSE-NEXT: psrlw $1, %xmm11
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm5, %xmm11
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; SSE-NEXT: pxor %xmm4, %xmm11
+; SSE-NEXT: psubb %xmm11, %xmm3
+; SSE-NEXT: psrlw $1, %xmm8
+; SSE-NEXT: pand %xmm5, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm8
+; SSE-NEXT: psubb %xmm8, %xmm2
+; SSE-NEXT: psrlw $1, %xmm9
+; SSE-NEXT: pand %xmm5, %xmm9
+; SSE-NEXT: pxor %xmm4, %xmm9
+; SSE-NEXT: psubb %xmm9, %xmm1
+; SSE-NEXT: psrlw $1, %xmm10
+; SSE-NEXT: pand %xmm5, %xmm10
+; SSE-NEXT: pxor %xmm4, %xmm10
+; SSE-NEXT: psubb %xmm10, %xmm0
+; SSE-NEXT: paddb %xmm4, %xmm0
+; SSE-NEXT: paddb %xmm4, %xmm1
+; SSE-NEXT: paddb %xmm4, %xmm2
+; SSE-NEXT: paddb %xmm4, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v64i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX1-NEXT: vpxor %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm7
+; AVX1-NEXT: vpand %xmm3, %xmm7, %xmm7
+; AVX1-NEXT: vpxor %xmm6, %xmm7, %xmm7
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm6, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm3
+; AVX1-NEXT: vpsubb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddb %xmm6, %xmm0, %xmm0
+; AVX1-NEXT: vpsubb %xmm7, %xmm5, %xmm3
+; AVX1-NEXT: vpaddb %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpsubb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpaddb %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vpsubb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpaddb %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v64i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm3 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsubb %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsubb %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: vpaddb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v64i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} zmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX512-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm0
+; AVX512-NEXT: vpsubb %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %or = or <64 x i8> %a0, %a1
+ %xor = xor <64 x i8> %a0, %a1
+ %shift = ashr <64 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = sub <64 x i8> %or, %shift
+ ret <64 x i8> %res
+}
+
+define <64 x i8> @test_ext_v64i8(<64 x i8> %a0, <64 x i8> %a1) {
+; SSE2-LABEL: test_ext_v64i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm3, %xmm8
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm0[8],xmm14[9],xmm0[9],xmm14[10],xmm0[10],xmm14[11],xmm0[11],xmm14[12],xmm0[12],xmm14[13],xmm0[13],xmm14[14],xmm0[14],xmm14[15],xmm0[15]
+; SSE2-NEXT: psraw $8, %xmm14
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; SSE2-NEXT: psraw $8, %xmm15
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
+; SSE2-NEXT: psraw $8, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm1[0],xmm13[1],xmm1[1],xmm13[2],xmm1[2],xmm13[3],xmm1[3],xmm13[4],xmm1[4],xmm13[5],xmm1[5],xmm13[6],xmm1[6],xmm13[7],xmm1[7]
+; SSE2-NEXT: psraw $8, %xmm13
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm2[8],xmm12[9],xmm2[9],xmm12[10],xmm2[10],xmm12[11],xmm2[11],xmm12[12],xmm2[12],xmm12[13],xmm2[13],xmm12[14],xmm2[14],xmm12[15],xmm2[15]
+; SSE2-NEXT: psraw $8, %xmm12
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm2[0],xmm11[1],xmm2[1],xmm11[2],xmm2[2],xmm11[3],xmm2[3],xmm11[4],xmm2[4],xmm11[5],xmm2[5],xmm11[6],xmm2[6],xmm11[7],xmm2[7]
+; SSE2-NEXT: psraw $8, %xmm11
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
+; SSE2-NEXT: psraw $8, %xmm10
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; SSE2-NEXT: psraw $8, %xmm9
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm4[8],xmm8[9],xmm4[9],xmm8[10],xmm4[10],xmm8[11],xmm4[11],xmm8[12],xmm4[12],xmm8[13],xmm4[13],xmm8[14],xmm4[14],xmm8[15],xmm4[15]
+; SSE2-NEXT: psraw $8, %xmm8
+; SSE2-NEXT: paddw %xmm14, %xmm8
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: psraw $8, %xmm0
+; SSE2-NEXT: paddw %xmm15, %xmm0
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
+; SSE2-NEXT: psraw $8, %xmm4
+; SSE2-NEXT: paddw %xmm3, %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
+; SSE2-NEXT: psraw $8, %xmm1
+; SSE2-NEXT: paddw %xmm13, %xmm1
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm6[8],xmm5[9],xmm6[9],xmm5[10],xmm6[10],xmm5[11],xmm6[11],xmm5[12],xmm6[12],xmm5[13],xmm6[13],xmm5[14],xmm6[14],xmm5[15],xmm6[15]
+; SSE2-NEXT: psraw $8, %xmm5
+; SSE2-NEXT: paddw %xmm12, %xmm5
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: paddw %xmm11, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
+; SSE2-NEXT: psraw $8, %xmm6
+; SSE2-NEXT: paddw %xmm10, %xmm6
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3],xmm3[4],xmm7[4],xmm3[5],xmm7[5],xmm3[6],xmm7[6],xmm3[7],xmm7[7]
+; SSE2-NEXT: psraw $8, %xmm3
+; SSE2-NEXT: paddw %xmm9, %xmm3
+; SSE2-NEXT: pcmpeqd %xmm7, %xmm7
+; SSE2-NEXT: psubw %xmm7, %xmm8
+; SSE2-NEXT: psubw %xmm7, %xmm0
+; SSE2-NEXT: psubw %xmm7, %xmm4
+; SSE2-NEXT: psubw %xmm7, %xmm1
+; SSE2-NEXT: psubw %xmm7, %xmm5
+; SSE2-NEXT: psubw %xmm7, %xmm2
+; SSE2-NEXT: psubw %xmm7, %xmm6
+; SSE2-NEXT: psubw %xmm7, %xmm3
+; SSE2-NEXT: psrlw $1, %xmm3
+; SSE2-NEXT: psrlw $1, %xmm6
+; SSE2-NEXT: psrlw $1, %xmm2
+; SSE2-NEXT: psrlw $1, %xmm5
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm8
+; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE2-NEXT: pand %xmm7, %xmm8
+; SSE2-NEXT: pand %xmm7, %xmm0
+; SSE2-NEXT: packuswb %xmm8, %xmm0
+; SSE2-NEXT: pand %xmm7, %xmm4
+; SSE2-NEXT: pand %xmm7, %xmm1
+; SSE2-NEXT: packuswb %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm7, %xmm5
+; SSE2-NEXT: pand %xmm7, %xmm2
+; SSE2-NEXT: packuswb %xmm5, %xmm2
+; SSE2-NEXT: pand %xmm7, %xmm6
+; SSE2-NEXT: pand %xmm7, %xmm3
+; SSE2-NEXT: packuswb %xmm6, %xmm3
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v64i8:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm8
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm9
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm10
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm11
+; SSE4-NEXT: pmovsxbw %xmm2, %xmm12
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm13
+; SSE4-NEXT: pmovsxbw %xmm3, %xmm14
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm15
+; SSE4-NEXT: pmovsxbw %xmm4, %xmm0
+; SSE4-NEXT: paddw %xmm8, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm4
+; SSE4-NEXT: paddw %xmm9, %xmm4
+; SSE4-NEXT: pmovsxbw %xmm5, %xmm1
+; SSE4-NEXT: paddw %xmm10, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm2, %xmm5
+; SSE4-NEXT: paddw %xmm11, %xmm5
+; SSE4-NEXT: pmovsxbw %xmm6, %xmm2
+; SSE4-NEXT: paddw %xmm12, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm3, %xmm6
+; SSE4-NEXT: paddw %xmm13, %xmm6
+; SSE4-NEXT: pmovsxbw %xmm7, %xmm3
+; SSE4-NEXT: paddw %xmm14, %xmm3
+; SSE4-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm7, %xmm7
+; SSE4-NEXT: paddw %xmm15, %xmm7
+; SSE4-NEXT: pcmpeqd %xmm8, %xmm8
+; SSE4-NEXT: psubw %xmm8, %xmm0
+; SSE4-NEXT: psubw %xmm8, %xmm4
+; SSE4-NEXT: psubw %xmm8, %xmm1
+; SSE4-NEXT: psubw %xmm8, %xmm5
+; SSE4-NEXT: psubw %xmm8, %xmm2
+; SSE4-NEXT: psubw %xmm8, %xmm6
+; SSE4-NEXT: psubw %xmm8, %xmm3
+; SSE4-NEXT: psubw %xmm8, %xmm7
+; SSE4-NEXT: psrlw $1, %xmm7
+; SSE4-NEXT: psrlw $1, %xmm3
+; SSE4-NEXT: psrlw $1, %xmm6
+; SSE4-NEXT: psrlw $1, %xmm2
+; SSE4-NEXT: psrlw $1, %xmm5
+; SSE4-NEXT: psrlw $1, %xmm1
+; SSE4-NEXT: psrlw $1, %xmm4
+; SSE4-NEXT: psrlw $1, %xmm0
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
+; SSE4-NEXT: pand %xmm8, %xmm0
+; SSE4-NEXT: pand %xmm8, %xmm4
+; SSE4-NEXT: packuswb %xmm4, %xmm0
+; SSE4-NEXT: pand %xmm8, %xmm1
+; SSE4-NEXT: pand %xmm8, %xmm5
+; SSE4-NEXT: packuswb %xmm5, %xmm1
+; SSE4-NEXT: pand %xmm8, %xmm2
+; SSE4-NEXT: pand %xmm8, %xmm6
+; SSE4-NEXT: packuswb %xmm6, %xmm2
+; SSE4-NEXT: pand %xmm8, %xmm3
+; SSE4-NEXT: pand %xmm8, %xmm7
+; SSE4-NEXT: packuswb %xmm7, %xmm3
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v64i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpmovsxbw %xmm4, %xmm5
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm6
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT: vpmovsxbw %xmm7, %xmm8
+; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm7, %xmm7
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm9
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm10
+; AVX1-NEXT: vpmovsxbw %xmm10, %xmm11
+; AVX1-NEXT: vpaddw %xmm5, %xmm11, %xmm5
+; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm10, %xmm10
+; AVX1-NEXT: vpaddw %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm10
+; AVX1-NEXT: vpaddw %xmm6, %xmm10, %xmm6
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
+; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm10
+; AVX1-NEXT: vpaddw %xmm10, %xmm8, %xmm8
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
+; AVX1-NEXT: vpaddw %xmm2, %xmm7, %xmm2
+; AVX1-NEXT: vpmovsxbw %xmm3, %xmm7
+; AVX1-NEXT: vpaddw %xmm7, %xmm9, %xmm7
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm3, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpsubw %xmm3, %xmm5, %xmm5
+; AVX1-NEXT: vpsubw %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpsubw %xmm3, %xmm6, %xmm6
+; AVX1-NEXT: vpsubw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubw %xmm3, %xmm8, %xmm8
+; AVX1-NEXT: vpsubw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsubw %xmm3, %xmm7, %xmm7
+; AVX1-NEXT: vpsubw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm7, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm8, %xmm7
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm6, %xmm6
+; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm4
+; AVX1-NEXT: vpsrlw $1, %xmm5, %xmm5
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpand %xmm5, %xmm8, %xmm5
+; AVX1-NEXT: vpand %xmm4, %xmm8, %xmm4
+; AVX1-NEXT: vpackuswb %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpand %xmm6, %xmm8, %xmm5
+; AVX1-NEXT: vpand %xmm0, %xmm8, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm5, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm4
+; AVX1-NEXT: vpand %xmm2, %xmm8, %xmm2
+; AVX1-NEXT: vpackuswb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm8, %xmm3
+; AVX1-NEXT: vpand %xmm1, %xmm8, %xmm1
+; AVX1-NEXT: vpackuswb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v64i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
+; AVX2-NEXT: vpmovsxbw %xmm4, %ymm4
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm5
+; AVX2-NEXT: vpmovsxbw %xmm5, %ymm5
+; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm6
+; AVX2-NEXT: vpmovsxbw %xmm6, %ymm6
+; AVX2-NEXT: vpaddw %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
+; AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2
+; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
+; AVX2-NEXT: vpaddw %ymm2, %ymm5, %ymm2
+; AVX2-NEXT: vpmovsxbw %xmm3, %ymm3
+; AVX2-NEXT: vpaddw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
+; AVX2-NEXT: vpsubw %ymm3, %ymm4, %ymm4
+; AVX2-NEXT: vpsubw %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsubw %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpsubw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm4, %ymm3
+; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
+; AVX2-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v64i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxbw %ymm0, %zmm2
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512-NEXT: vpmovsxbw %ymm0, %zmm0
+; AVX512-NEXT: vpmovsxbw %ymm1, %zmm3
+; AVX512-NEXT: vpaddw %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm1
+; AVX512-NEXT: vpmovsxbw %ymm1, %zmm1
+; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpsubw %zmm1, %zmm2, %zmm2
+; AVX512-NEXT: vpsubw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm2, %zmm1
+; AVX512-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: retq
+ %x0 = sext <64 x i8> %a0 to <64 x i16>
+ %x1 = sext <64 x i8> %a1 to <64 x i16>
+ %sum = add <64 x i16> %x0, %x1
+ %inc = add <64 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shift = ashr <64 x i16> %inc, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <64 x i16> %shift to <64 x i8>
+ ret <64 x i8> %res
+}
+
+define <32 x i16> @test_fixed_v32i16(<32 x i16> %a0, <32 x i16> %a1) {
+; SSE-LABEL: test_fixed_v32i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: por %xmm7, %xmm3
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: por %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: por %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: por %xmm4, %xmm11
+; SSE-NEXT: pxor %xmm0, %xmm4
+; SSE-NEXT: pxor %xmm1, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm6
+; SSE-NEXT: pxor %xmm8, %xmm7
+; SSE-NEXT: psraw $1, %xmm7
+; SSE-NEXT: psubw %xmm7, %xmm3
+; SSE-NEXT: psraw $1, %xmm6
+; SSE-NEXT: psubw %xmm6, %xmm9
+; SSE-NEXT: psraw $1, %xmm5
+; SSE-NEXT: psubw %xmm5, %xmm10
+; SSE-NEXT: psraw $1, %xmm4
+; SSE-NEXT: psubw %xmm4, %xmm11
+; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v32i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsraw $1, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsraw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpsubw %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpsubw %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpsubw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsubw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v32i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsraw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubw %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubw %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v32i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsraw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsubw %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %or = or <32 x i16> %a0, %a1
+ %xor = xor <32 x i16> %a1, %a0
+ %shift = ashr <32 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = sub <32 x i16> %or, %shift
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_ext_v32i16(<32 x i16> %a0, <32 x i16> %a1) {
+; SSE2-LABEL: test_ext_v32i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm3[0],xmm13[1],xmm3[1],xmm13[2],xmm3[2],xmm13[3],xmm3[3]
+; SSE2-NEXT: psrad $16, %xmm13
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm3[4],xmm14[5],xmm3[5],xmm14[6],xmm3[6],xmm14[7],xmm3[7]
+; SSE2-NEXT: psrad $16, %xmm14
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm2[0],xmm15[1],xmm2[1],xmm15[2],xmm2[2],xmm15[3],xmm2[3]
+; SSE2-NEXT: psrad $16, %xmm15
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm2[4],xmm12[5],xmm2[5],xmm12[6],xmm2[6],xmm12[7],xmm2[7]
+; SSE2-NEXT: psrad $16, %xmm12
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm1[0],xmm11[1],xmm1[1],xmm11[2],xmm1[2],xmm11[3],xmm1[3]
+; SSE2-NEXT: psrad $16, %xmm11
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm1[4],xmm10[5],xmm1[5],xmm10[6],xmm1[6],xmm10[7],xmm1[7]
+; SSE2-NEXT: psrad $16, %xmm10
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
+; SSE2-NEXT: psrad $16, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
+; SSE2-NEXT: psrad $16, %xmm8
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3]
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: paddd %xmm13, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $16, %xmm7
+; SSE2-NEXT: paddd %xmm14, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3]
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: paddd %xmm15, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $16, %xmm6
+; SSE2-NEXT: paddd %xmm12, %xmm6
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: paddd %xmm11, %xmm1
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $16, %xmm5
+; SSE2-NEXT: paddd %xmm10, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: paddd %xmm9, %xmm0
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: paddd %xmm8, %xmm4
+; SSE2-NEXT: pcmpeqd %xmm8, %xmm8
+; SSE2-NEXT: psubd %xmm8, %xmm3
+; SSE2-NEXT: psubd %xmm8, %xmm7
+; SSE2-NEXT: psubd %xmm8, %xmm2
+; SSE2-NEXT: psubd %xmm8, %xmm6
+; SSE2-NEXT: psubd %xmm8, %xmm1
+; SSE2-NEXT: psubd %xmm8, %xmm5
+; SSE2-NEXT: psubd %xmm8, %xmm0
+; SSE2-NEXT: psubd %xmm8, %xmm4
+; SSE2-NEXT: pslld $15, %xmm4
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: pslld $15, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm4, %xmm0
+; SSE2-NEXT: pslld $15, %xmm5
+; SSE2-NEXT: psrad $16, %xmm5
+; SSE2-NEXT: pslld $15, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: packssdw %xmm5, %xmm1
+; SSE2-NEXT: pslld $15, %xmm6
+; SSE2-NEXT: psrad $16, %xmm6
+; SSE2-NEXT: pslld $15, %xmm2
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: packssdw %xmm6, %xmm2
+; SSE2-NEXT: pslld $15, %xmm7
+; SSE2-NEXT: psrad $16, %xmm7
+; SSE2-NEXT: pslld $15, %xmm3
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: packssdw %xmm7, %xmm3
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v32i16:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm8
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm9
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm10
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm11
+; SSE4-NEXT: pmovsxwd %xmm2, %xmm12
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm13
+; SSE4-NEXT: pmovsxwd %xmm3, %xmm14
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm15
+; SSE4-NEXT: pmovsxwd %xmm4, %xmm0
+; SSE4-NEXT: paddd %xmm8, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm4
+; SSE4-NEXT: paddd %xmm9, %xmm4
+; SSE4-NEXT: pmovsxwd %xmm5, %xmm1
+; SSE4-NEXT: paddd %xmm10, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm2, %xmm5
+; SSE4-NEXT: paddd %xmm11, %xmm5
+; SSE4-NEXT: pmovsxwd %xmm6, %xmm2
+; SSE4-NEXT: paddd %xmm12, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm3, %xmm6
+; SSE4-NEXT: paddd %xmm13, %xmm6
+; SSE4-NEXT: pmovsxwd %xmm7, %xmm3
+; SSE4-NEXT: paddd %xmm14, %xmm3
+; SSE4-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm7, %xmm7
+; SSE4-NEXT: paddd %xmm15, %xmm7
+; SSE4-NEXT: pcmpeqd %xmm8, %xmm8
+; SSE4-NEXT: psubd %xmm8, %xmm0
+; SSE4-NEXT: psubd %xmm8, %xmm4
+; SSE4-NEXT: psubd %xmm8, %xmm1
+; SSE4-NEXT: psubd %xmm8, %xmm5
+; SSE4-NEXT: psubd %xmm8, %xmm2
+; SSE4-NEXT: psubd %xmm8, %xmm6
+; SSE4-NEXT: psubd %xmm8, %xmm3
+; SSE4-NEXT: psubd %xmm8, %xmm7
+; SSE4-NEXT: psrld $1, %xmm7
+; SSE4-NEXT: psrld $1, %xmm3
+; SSE4-NEXT: psrld $1, %xmm6
+; SSE4-NEXT: psrld $1, %xmm2
+; SSE4-NEXT: psrld $1, %xmm5
+; SSE4-NEXT: psrld $1, %xmm1
+; SSE4-NEXT: psrld $1, %xmm4
+; SSE4-NEXT: psrld $1, %xmm0
+; SSE4-NEXT: pxor %xmm8, %xmm8
+; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm8[1],xmm0[2],xmm8[3],xmm0[4],xmm8[5],xmm0[6],xmm8[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0],xmm8[1],xmm4[2],xmm8[3],xmm4[4],xmm8[5],xmm4[6],xmm8[7]
+; SSE4-NEXT: packusdw %xmm4, %xmm0
+; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm8[1],xmm1[2],xmm8[3],xmm1[4],xmm8[5],xmm1[6],xmm8[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0],xmm8[1],xmm5[2],xmm8[3],xmm5[4],xmm8[5],xmm5[6],xmm8[7]
+; SSE4-NEXT: packusdw %xmm5, %xmm1
+; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2],xmm8[3],xmm2[4],xmm8[5],xmm2[6],xmm8[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2],xmm8[3],xmm6[4],xmm8[5],xmm6[6],xmm8[7]
+; SSE4-NEXT: packusdw %xmm6, %xmm2
+; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm8[1],xmm3[2],xmm8[3],xmm3[4],xmm8[5],xmm3[6],xmm8[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0],xmm8[1],xmm7[2],xmm8[3],xmm7[4],xmm8[5],xmm7[6],xmm8[7]
+; SSE4-NEXT: packusdw %xmm7, %xmm3
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v32i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpmovsxwd %xmm4, %xmm5
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm6
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT: vpmovsxwd %xmm7, %xmm8
+; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm7, %xmm7
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm9
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm10
+; AVX1-NEXT: vpmovsxwd %xmm10, %xmm11
+; AVX1-NEXT: vpaddd %xmm5, %xmm11, %xmm5
+; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm10, %xmm10
+; AVX1-NEXT: vpaddd %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm10
+; AVX1-NEXT: vpaddd %xmm6, %xmm10, %xmm6
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm10
+; AVX1-NEXT: vpaddd %xmm10, %xmm8, %xmm8
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm2, %xmm7, %xmm2
+; AVX1-NEXT: vpmovsxwd %xmm3, %xmm7
+; AVX1-NEXT: vpaddd %xmm7, %xmm9, %xmm7
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3
+; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpsubd %xmm3, %xmm5, %xmm5
+; AVX1-NEXT: vpsubd %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpsubd %xmm3, %xmm6, %xmm6
+; AVX1-NEXT: vpsubd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubd %xmm3, %xmm8, %xmm8
+; AVX1-NEXT: vpsubd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsubd %xmm3, %xmm7, %xmm7
+; AVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm7, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm8, %xmm7
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm6, %xmm6
+; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4
+; AVX1-NEXT: vpsrld $1, %xmm5, %xmm5
+; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
+; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0],xmm8[1],xmm5[2],xmm8[3],xmm5[4],xmm8[5],xmm5[6],xmm8[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm8[1],xmm4[2],xmm8[3],xmm4[4],xmm8[5],xmm4[6],xmm8[7]
+; AVX1-NEXT: vpackusdw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0],xmm8[1],xmm6[2],xmm8[3],xmm6[4],xmm8[5],xmm6[6],xmm8[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm8[1],xmm0[2],xmm8[3],xmm0[4],xmm8[5],xmm0[6],xmm8[7]
+; AVX1-NEXT: vpackusdw %xmm0, %xmm5, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0],xmm8[1],xmm7[2],xmm8[3],xmm7[4],xmm8[5],xmm7[6],xmm8[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2],xmm8[3],xmm2[4],xmm8[5],xmm2[6],xmm8[7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm8[1],xmm3[2],xmm8[3],xmm3[4],xmm8[5],xmm3[6],xmm8[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm8[1],xmm1[2],xmm8[3],xmm1[4],xmm8[5],xmm1[6],xmm8[7]
+; AVX1-NEXT: vpackusdw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v32i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
+; AVX2-NEXT: vpmovsxwd %xmm4, %ymm4
+; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm5
+; AVX2-NEXT: vpmovsxwd %xmm5, %ymm5
+; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm6
+; AVX2-NEXT: vpmovsxwd %xmm6, %ymm6
+; AVX2-NEXT: vpaddd %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2
+; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2
+; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2
+; AVX2-NEXT: vpaddd %ymm2, %ymm5, %ymm2
+; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3
+; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
+; AVX2-NEXT: vpsubd %ymm3, %ymm4, %ymm4
+; AVX2-NEXT: vpsubd %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsubd %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpsubd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm2, %ymm2
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm4, %ymm3
+; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7],ymm3[8],ymm4[9],ymm3[10],ymm4[11],ymm3[12],ymm4[13],ymm3[14],ymm4[15]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2],ymm4[3],ymm0[4],ymm4[5],ymm0[6],ymm4[7],ymm0[8],ymm4[9],ymm0[10],ymm4[11],ymm0[12],ymm4[13],ymm0[14],ymm4[15]
+; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2],ymm4[3],ymm2[4],ymm4[5],ymm2[6],ymm4[7],ymm2[8],ymm4[9],ymm2[10],ymm4[11],ymm2[12],ymm4[13],ymm2[14],ymm4[15]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2],ymm4[3],ymm1[4],ymm4[5],ymm1[6],ymm4[7],ymm1[8],ymm4[9],ymm1[10],ymm4[11],ymm1[12],ymm4[13],ymm1[14],ymm4[15]
+; AVX2-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v32i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxwd %ymm0, %zmm2
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512-NEXT: vpmovsxwd %ymm1, %zmm3
+; AVX512-NEXT: vpaddd %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm1
+; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpsubd %zmm1, %zmm2, %zmm2
+; AVX512-NEXT: vpsubd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm2, %zmm1
+; AVX512-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: retq
+ %x0 = sext <32 x i16> %a0 to <32 x i32>
+ %x1 = sext <32 x i16> %a1 to <32 x i32>
+ %sum = add <32 x i32> %x0, %x1
+ %inc = add <32 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %shift = ashr <32 x i32> %inc, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <32 x i32> %shift to <32 x i16>
+ ret <32 x i16> %res
+}
+
+define <16 x i32> @test_fixed_v16i32(<16 x i32> %a0, <16 x i32> %a1) {
+; SSE-LABEL: test_fixed_v16i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: por %xmm7, %xmm3
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: por %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: por %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: por %xmm4, %xmm11
+; SSE-NEXT: pxor %xmm0, %xmm4
+; SSE-NEXT: pxor %xmm1, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm6
+; SSE-NEXT: pxor %xmm8, %xmm7
+; SSE-NEXT: psrad $1, %xmm7
+; SSE-NEXT: psubd %xmm7, %xmm3
+; SSE-NEXT: psrad $1, %xmm6
+; SSE-NEXT: psubd %xmm6, %xmm9
+; SSE-NEXT: psrad $1, %xmm5
+; SSE-NEXT: psubd %xmm5, %xmm10
+; SSE-NEXT: psrad $1, %xmm4
+; SSE-NEXT: psubd %xmm4, %xmm11
+; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpsubd %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpsubd %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsubd %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubd %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubd %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxord %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsrad $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsubd %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %or = or <16 x i32> %a0, %a1
+ %xor = xor <16 x i32> %a1, %a0
+ %shift = ashr <16 x i32> %xor, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = sub <16 x i32> %or, %shift
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_ext_v16i32(<16 x i32> %a0, <16 x i32> %a1) {
+; SSE2-LABEL: test_ext_v16i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm0[2,3,2,3]
+; SSE2-NEXT: pxor %xmm9, %xmm9
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm13, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm8[0],xmm13[1],xmm8[1]
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm1[2,3,2,3]
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm12, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm8[0],xmm12[1],xmm8[1]
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm2[2,3,2,3]
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm11, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm8[0],xmm11[1],xmm8[1]
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm3[2,3,2,3]
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm4[2,3,2,3]
+; SSE2-NEXT: pxor %xmm14, %xmm14
+; SSE2-NEXT: pcmpgtd %xmm8, %xmm14
+; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1]
+; SSE2-NEXT: paddq %xmm13, %xmm8
+; SSE2-NEXT: pxor %xmm13, %xmm13
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm13
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1]
+; SSE2-NEXT: paddq %xmm4, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,2,3]
+; SSE2-NEXT: pxor %xmm13, %xmm13
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm13
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1]
+; SSE2-NEXT: paddq %xmm12, %xmm4
+; SSE2-NEXT: pxor %xmm12, %xmm12
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm12
+; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1]
+; SSE2-NEXT: paddq %xmm5, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,2,3]
+; SSE2-NEXT: pxor %xmm12, %xmm12
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm12
+; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1]
+; SSE2-NEXT: paddq %xmm11, %xmm5
+; SSE2-NEXT: pxor %xmm11, %xmm11
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm11
+; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1]
+; SSE2-NEXT: paddq %xmm6, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm7[2,3,2,3]
+; SSE2-NEXT: pxor %xmm11, %xmm11
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm11
+; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1]
+; SSE2-NEXT: paddq %xmm10, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm7, %xmm9
+; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
+; SSE2-NEXT: paddq %xmm7, %xmm3
+; SSE2-NEXT: pcmpeqd %xmm7, %xmm7
+; SSE2-NEXT: psubq %xmm7, %xmm8
+; SSE2-NEXT: psubq %xmm7, %xmm0
+; SSE2-NEXT: psubq %xmm7, %xmm4
+; SSE2-NEXT: psubq %xmm7, %xmm1
+; SSE2-NEXT: psubq %xmm7, %xmm5
+; SSE2-NEXT: psubq %xmm7, %xmm2
+; SSE2-NEXT: psubq %xmm7, %xmm6
+; SSE2-NEXT: psubq %xmm7, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm6
+; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm6[0,2]
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: psrlq $1, %xmm5
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[0,2]
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,2]
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm8
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm8[0,2]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pshufd {{.*#+}} xmm8 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm8, %xmm9
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm10
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm11
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm12
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm13
+; SSE4-NEXT: pmovsxdq %xmm2, %xmm14
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm15
+; SSE4-NEXT: pmovsxdq %xmm3, %xmm0
+; SSE4-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm8
+; SSE4-NEXT: paddq %xmm9, %xmm8
+; SSE4-NEXT: pmovsxdq %xmm4, %xmm0
+; SSE4-NEXT: paddq %xmm10, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm4
+; SSE4-NEXT: paddq %xmm11, %xmm4
+; SSE4-NEXT: pmovsxdq %xmm5, %xmm1
+; SSE4-NEXT: paddq %xmm12, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm6[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm2, %xmm5
+; SSE4-NEXT: paddq %xmm13, %xmm5
+; SSE4-NEXT: pmovsxdq %xmm6, %xmm2
+; SSE4-NEXT: paddq %xmm14, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm7[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm3, %xmm6
+; SSE4-NEXT: paddq %xmm15, %xmm6
+; SSE4-NEXT: pmovsxdq %xmm7, %xmm3
+; SSE4-NEXT: paddq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; SSE4-NEXT: pcmpeqd %xmm7, %xmm7
+; SSE4-NEXT: psubq %xmm7, %xmm8
+; SSE4-NEXT: psubq %xmm7, %xmm0
+; SSE4-NEXT: psubq %xmm7, %xmm4
+; SSE4-NEXT: psubq %xmm7, %xmm1
+; SSE4-NEXT: psubq %xmm7, %xmm5
+; SSE4-NEXT: psubq %xmm7, %xmm2
+; SSE4-NEXT: psubq %xmm7, %xmm6
+; SSE4-NEXT: psubq %xmm7, %xmm3
+; SSE4-NEXT: psrlq $1, %xmm3
+; SSE4-NEXT: psrlq $1, %xmm6
+; SSE4-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm6[0,2]
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: psrlq $1, %xmm5
+; SSE4-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[0,2]
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,2]
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm8
+; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm8[0,2]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vpmovsxdq %xmm5, %xmm6
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm5, %xmm5
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm7
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm8
+; AVX1-NEXT: vpmovsxdq %xmm8, %xmm9
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm8 = xmm8[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm8, %xmm8
+; AVX1-NEXT: vpmovsxdq %xmm2, %xmm10
+; AVX1-NEXT: vpaddq %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm10
+; AVX1-NEXT: vpmovsxdq %xmm10, %xmm11
+; AVX1-NEXT: vpaddq %xmm6, %xmm11, %xmm6
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm10[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpmovsxdq %xmm3, %xmm5
+; AVX1-NEXT: vpaddq %xmm5, %xmm7, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm7
+; AVX1-NEXT: vpmovsxdq %xmm7, %xmm10
+; AVX1-NEXT: vpaddq %xmm10, %xmm9, %xmm9
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm7[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm8, %xmm3
+; AVX1-NEXT: vpcmpeqd %xmm7, %xmm7, %xmm7
+; AVX1-NEXT: vpsubq %xmm7, %xmm4, %xmm4
+; AVX1-NEXT: vpsubq %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpsubq %xmm7, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm7, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm7, %xmm5, %xmm5
+; AVX1-NEXT: vpsubq %xmm7, %xmm9, %xmm8
+; AVX1-NEXT: vpsubq %xmm7, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm7, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm8, %xmm7
+; AVX1-NEXT: vpsrlq $1, %xmm5, %xmm5
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm6, %xmm6
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm4[0,2],ymm0[0,2],ymm4[4,6],ymm0[4,6]
+; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm2
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2],ymm1[0,2],ymm2[4,6],ymm1[4,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxdq %xmm1, %ymm4
+; AVX2-NEXT: vpmovsxdq %xmm0, %ymm5
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX2-NEXT: vpmovsxdq %xmm3, %ymm6
+; AVX2-NEXT: vpaddq %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vpmovsxdq %xmm2, %ymm6
+; AVX2-NEXT: vpaddq %ymm6, %ymm5, %ymm5
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2
+; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2
+; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2
+; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpsubq %ymm2, %ymm4, %ymm3
+; AVX2-NEXT: vpsubq %ymm2, %ymm5, %ymm4
+; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm4[2,3],ymm0[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm4, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm3[2,3],ymm1[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxdq %ymm0, %zmm2
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
+; AVX512-NEXT: vpmovsxdq %ymm1, %zmm3
+; AVX512-NEXT: vpaddq %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm1
+; AVX512-NEXT: vpmovsxdq %ymm1, %zmm1
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpsubq %zmm1, %zmm2, %zmm2
+; AVX512-NEXT: vpsubq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm2, %zmm1
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: retq
+ %x0 = sext <16 x i32> %a0 to <16 x i64>
+ %x1 = sext <16 x i32> %a1 to <16 x i64>
+ %sum = add <16 x i64> %x0, %x1
+ %inc = add <16 x i64> %sum, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %shift = ashr <16 x i64> %inc, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <16 x i64> %shift to <16 x i32>
+ ret <16 x i32> %res
+}
+
+define <8 x i64> @test_fixed_v8i64(<8 x i64> %a0, <8 x i64> %a1) {
+; SSE2-LABEL: test_fixed_v8i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm3, %xmm8
+; SSE2-NEXT: movdqa %xmm2, %xmm9
+; SSE2-NEXT: movdqa %xmm1, %xmm10
+; SSE2-NEXT: movdqa %xmm0, %xmm11
+; SSE2-NEXT: por %xmm7, %xmm3
+; SSE2-NEXT: por %xmm6, %xmm2
+; SSE2-NEXT: por %xmm5, %xmm1
+; SSE2-NEXT: por %xmm4, %xmm0
+; SSE2-NEXT: pxor %xmm11, %xmm4
+; SSE2-NEXT: pxor %xmm10, %xmm5
+; SSE2-NEXT: pxor %xmm9, %xmm6
+; SSE2-NEXT: pxor %xmm8, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm8
+; SSE2-NEXT: psrlq $1, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
+; SSE2-NEXT: psubq %xmm7, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm7
+; SSE2-NEXT: psrlq $1, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
+; SSE2-NEXT: psubq %xmm6, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm6
+; SSE2-NEXT: psrlq $1, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
+; SSE2-NEXT: psubq %xmm5, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: psubq %xmm4, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_fixed_v8i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm3, %xmm8
+; SSE4-NEXT: movdqa %xmm2, %xmm9
+; SSE4-NEXT: movdqa %xmm1, %xmm10
+; SSE4-NEXT: movdqa %xmm0, %xmm11
+; SSE4-NEXT: por %xmm7, %xmm3
+; SSE4-NEXT: por %xmm6, %xmm2
+; SSE4-NEXT: por %xmm5, %xmm1
+; SSE4-NEXT: por %xmm4, %xmm0
+; SSE4-NEXT: pxor %xmm11, %xmm4
+; SSE4-NEXT: pxor %xmm10, %xmm5
+; SSE4-NEXT: pxor %xmm9, %xmm6
+; SSE4-NEXT: pxor %xmm8, %xmm7
+; SSE4-NEXT: movdqa %xmm7, %xmm8
+; SSE4-NEXT: psrad $1, %xmm8
+; SSE4-NEXT: psrlq $1, %xmm7
+; SSE4-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm8[2,3],xmm7[4,5],xmm8[6,7]
+; SSE4-NEXT: psubq %xmm7, %xmm3
+; SSE4-NEXT: movdqa %xmm6, %xmm7
+; SSE4-NEXT: psrad $1, %xmm7
+; SSE4-NEXT: psrlq $1, %xmm6
+; SSE4-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3],xmm6[4,5],xmm7[6,7]
+; SSE4-NEXT: psubq %xmm6, %xmm2
+; SSE4-NEXT: movdqa %xmm5, %xmm6
+; SSE4-NEXT: psrad $1, %xmm6
+; SSE4-NEXT: psrlq $1, %xmm5
+; SSE4-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3],xmm5[4,5],xmm6[6,7]
+; SSE4-NEXT: psubq %xmm5, %xmm1
+; SSE4-NEXT: movdqa %xmm4, %xmm5
+; SSE4-NEXT: psrad $1, %xmm5
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; SSE4-NEXT: psubq %xmm4, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v8i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm6
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0,1],xmm3[2,3],xmm6[4,5],xmm3[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm6
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5],xmm6[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpsubq %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpsubq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v8i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm1, %ymm2
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
+; AVX2-NEXT: vpsubq %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm2
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
+; AVX2-NEXT: vpsubq %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v8i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsraq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsubq %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %or = or <8 x i64> %a0, %a1
+ %xor = xor <8 x i64> %a1, %a0
+ %shift = ashr <8 x i64> %xor, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = sub <8 x i64> %or, %shift
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_ext_v8i64(<8 x i64> %a0, <8 x i64> %a1) {
+; SSE2-LABEL: test_ext_v8i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: pushq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: pushq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: pushq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 56
+; SSE2-NEXT: pushq %rax
+; SSE2-NEXT: .cfi_def_cfa_offset 64
+; SSE2-NEXT: .cfi_offset %rbx, -56
+; SSE2-NEXT: .cfi_offset %r12, -48
+; SSE2-NEXT: .cfi_offset %r13, -40
+; SSE2-NEXT: .cfi_offset %r14, -32
+; SSE2-NEXT: .cfi_offset %r15, -24
+; SSE2-NEXT: .cfi_offset %rbp, -16
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %rax
+; SSE2-NEXT: movq %rax, (%rsp) # 8-byte Spill
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %xmm1, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %xmm3, %rbx
+; SSE2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %rbx
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rdi
+; SSE2-NEXT: movq %rdi, %rbp
+; SSE2-NEXT: sarq $63, %rbp
+; SSE2-NEXT: movq %xmm4, %r8
+; SSE2-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %r8
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r11
+; SSE2-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %r11
+; SSE2-NEXT: movq %xmm5, %r10
+; SSE2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %r10
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r15
+; SSE2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %r15
+; SSE2-NEXT: movq %xmm6, %r9
+; SSE2-NEXT: movq %r9, %r14
+; SSE2-NEXT: sarq $63, %r14
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rsi
+; SSE2-NEXT: movq %rsi, %r13
+; SSE2-NEXT: sarq $63, %r13
+; SSE2-NEXT: movq %xmm7, %rdx
+; SSE2-NEXT: movq %rdx, %r12
+; SSE2-NEXT: sarq $63, %r12
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: addq %rax, %rdi
+; SSE2-NEXT: adcq %rbp, %rcx
+; SSE2-NEXT: addq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; SSE2-NEXT: adcq %rbx, %r12
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
+; SSE2-NEXT: addq %rsi, %rbp
+; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; SSE2-NEXT: addq %r9, %rbx
+; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
+; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
+; SSE2-NEXT: adcq (%rsp), %r8 # 8-byte Folded Reload
+; SSE2-NEXT: addq $1, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: adcq $0, %r8
+; SSE2-NEXT: addq $1, %rdx
+; SSE2-NEXT: adcq $0, %r11
+; SSE2-NEXT: addq $1, %rsi
+; SSE2-NEXT: adcq $0, %r10
+; SSE2-NEXT: addq $1, %r9
+; SSE2-NEXT: adcq $0, %r15
+; SSE2-NEXT: addq $1, %rbx
+; SSE2-NEXT: adcq $0, %r14
+; SSE2-NEXT: addq $1, %rbp
+; SSE2-NEXT: adcq $0, %r13
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE2-NEXT: addq $1, %rax
+; SSE2-NEXT: adcq $0, %r12
+; SSE2-NEXT: addq $1, %rdi
+; SSE2-NEXT: adcq $0, %rcx
+; SSE2-NEXT: shldq $63, %rdi, %rcx
+; SSE2-NEXT: shldq $63, %rax, %r12
+; SSE2-NEXT: shldq $63, %rbp, %r13
+; SSE2-NEXT: shldq $63, %rbx, %r14
+; SSE2-NEXT: shldq $63, %r9, %r15
+; SSE2-NEXT: shldq $63, %rsi, %r10
+; SSE2-NEXT: shldq $63, %rdx, %r11
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE2-NEXT: shldq $63, %rax, %r8
+; SSE2-NEXT: movq %r8, %xmm0
+; SSE2-NEXT: movq %r11, %xmm4
+; SSE2-NEXT: movq %r10, %xmm1
+; SSE2-NEXT: movq %r15, %xmm5
+; SSE2-NEXT: movq %r14, %xmm2
+; SSE2-NEXT: movq %r13, %xmm6
+; SSE2-NEXT: movq %r12, %xmm3
+; SSE2-NEXT: movq %rcx, %xmm7
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
+; SSE2-NEXT: addq $8, %rsp
+; SSE2-NEXT: .cfi_def_cfa_offset 56
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: popq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: popq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: popq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 8
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pushq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: pushq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: pushq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: pushq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: pushq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: pushq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 56
+; SSE4-NEXT: subq $16, %rsp
+; SSE4-NEXT: .cfi_def_cfa_offset 72
+; SSE4-NEXT: .cfi_offset %rbx, -56
+; SSE4-NEXT: .cfi_offset %r12, -48
+; SSE4-NEXT: .cfi_offset %r13, -40
+; SSE4-NEXT: .cfi_offset %r14, -32
+; SSE4-NEXT: .cfi_offset %r15, -24
+; SSE4-NEXT: .cfi_offset %rbp, -16
+; SSE4-NEXT: pextrq $1, %xmm0, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %xmm0, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, (%rsp) # 8-byte Spill
+; SSE4-NEXT: pextrq $1, %xmm1, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %xmm1, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: pextrq $1, %xmm2, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %xmm2, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: pextrq $1, %xmm3, %r13
+; SSE4-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %r13
+; SSE4-NEXT: movq %xmm3, %rax
+; SSE4-NEXT: movq %rax, %rsi
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rsi
+; SSE4-NEXT: pextrq $1, %xmm4, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %xmm4, %r11
+; SSE4-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %r11
+; SSE4-NEXT: pextrq $1, %xmm5, %r10
+; SSE4-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %r10
+; SSE4-NEXT: movq %xmm5, %rax
+; SSE4-NEXT: movq %rax, %r14
+; SSE4-NEXT: sarq $63, %r14
+; SSE4-NEXT: pextrq $1, %xmm6, %rdi
+; SSE4-NEXT: movq %rdi, %rbx
+; SSE4-NEXT: sarq $63, %rbx
+; SSE4-NEXT: movq %xmm6, %rdx
+; SSE4-NEXT: movq %rdx, %r12
+; SSE4-NEXT: sarq $63, %r12
+; SSE4-NEXT: pextrq $1, %xmm7, %r15
+; SSE4-NEXT: movq %r15, %r9
+; SSE4-NEXT: sarq $63, %r9
+; SSE4-NEXT: movq %xmm7, %rbp
+; SSE4-NEXT: movq %rbp, %r8
+; SSE4-NEXT: sarq $63, %r8
+; SSE4-NEXT: addq %rbp, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: adcq %rsi, %r8
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE4-NEXT: addq %r15, %rcx
+; SSE4-NEXT: adcq %r13, %r9
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
+; SSE4-NEXT: addq %rdx, %rbp
+; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; SSE4-NEXT: addq %rdi, %r13
+; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; SSE4-NEXT: addq %rax, %r15
+; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
+; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; SSE4-NEXT: adcq (%rsp), %r11 # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
+; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
+; SSE4-NEXT: addq $1, %rdx
+; SSE4-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: adcq $0, %rax
+; SSE4-NEXT: movq %rax, %rdx
+; SSE4-NEXT: addq $1, %rsi
+; SSE4-NEXT: adcq $0, %r11
+; SSE4-NEXT: addq $1, %rdi
+; SSE4-NEXT: adcq $0, %r10
+; SSE4-NEXT: addq $1, %r15
+; SSE4-NEXT: adcq $0, %r14
+; SSE4-NEXT: addq $1, %r13
+; SSE4-NEXT: adcq $0, %rbx
+; SSE4-NEXT: addq $1, %rbp
+; SSE4-NEXT: adcq $0, %r12
+; SSE4-NEXT: addq $1, %rcx
+; SSE4-NEXT: adcq $0, %r9
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE4-NEXT: addq $1, %rax
+; SSE4-NEXT: adcq $0, %r8
+; SSE4-NEXT: shldq $63, %rax, %r8
+; SSE4-NEXT: shldq $63, %rcx, %r9
+; SSE4-NEXT: shldq $63, %rbp, %r12
+; SSE4-NEXT: shldq $63, %r13, %rbx
+; SSE4-NEXT: shldq $63, %r15, %r14
+; SSE4-NEXT: shldq $63, %rdi, %r10
+; SSE4-NEXT: shldq $63, %rsi, %r11
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE4-NEXT: shldq $63, %rcx, %rdx
+; SSE4-NEXT: movq %rdx, %xmm4
+; SSE4-NEXT: movq %r11, %xmm0
+; SSE4-NEXT: movq %r10, %xmm5
+; SSE4-NEXT: movq %r14, %xmm1
+; SSE4-NEXT: movq %rbx, %xmm6
+; SSE4-NEXT: movq %r12, %xmm2
+; SSE4-NEXT: movq %r9, %xmm7
+; SSE4-NEXT: movq %r8, %xmm3
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
+; SSE4-NEXT: addq $16, %rsp
+; SSE4-NEXT: .cfi_def_cfa_offset 56
+; SSE4-NEXT: popq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: popq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: popq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: popq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: popq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: popq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 8
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: pushq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: pushq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 56
+; AVX1-NEXT: pushq %rax
+; AVX1-NEXT: .cfi_def_cfa_offset 64
+; AVX1-NEXT: .cfi_offset %rbx, -56
+; AVX1-NEXT: .cfi_offset %r12, -48
+; AVX1-NEXT: .cfi_offset %r13, -40
+; AVX1-NEXT: .cfi_offset %r14, -32
+; AVX1-NEXT: .cfi_offset %r15, -24
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpextrq $1, %xmm4, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %rax
+; AVX1-NEXT: movq %rax, (%rsp) # 8-byte Spill
+; AVX1-NEXT: vmovq %xmm4, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vpextrq $1, %xmm1, %rbx
+; AVX1-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %rbx
+; AVX1-NEXT: vmovq %xmm1, %r8
+; AVX1-NEXT: movq %r8, %rbp
+; AVX1-NEXT: sarq $63, %rbp
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm0
+; AVX1-NEXT: vpextrq $1, %xmm0, %r9
+; AVX1-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r9
+; AVX1-NEXT: vmovq %xmm0, %r10
+; AVX1-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r10
+; AVX1-NEXT: vpextrq $1, %xmm2, %r11
+; AVX1-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r11
+; AVX1-NEXT: vmovq %xmm2, %r15
+; AVX1-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r15
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm0
+; AVX1-NEXT: vpextrq $1, %xmm0, %rdi
+; AVX1-NEXT: movq %rdi, %r14
+; AVX1-NEXT: sarq $63, %r14
+; AVX1-NEXT: vmovq %xmm0, %rsi
+; AVX1-NEXT: movq %rsi, %r12
+; AVX1-NEXT: sarq $63, %r12
+; AVX1-NEXT: vpextrq $1, %xmm3, %r13
+; AVX1-NEXT: movq %r13, %rdx
+; AVX1-NEXT: sarq $63, %rdx
+; AVX1-NEXT: vmovq %xmm3, %rax
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: addq %rax, %r8
+; AVX1-NEXT: adcq %rbp, %rcx
+; AVX1-NEXT: addq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX1-NEXT: adcq %rbx, %rdx
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
+; AVX1-NEXT: addq %rsi, %rbp
+; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX1-NEXT: addq %rdi, %r13
+; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
+; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
+; AVX1-NEXT: adcq (%rsp), %r9 # 8-byte Folded Reload
+; AVX1-NEXT: addq $1, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: adcq $0, %r9
+; AVX1-NEXT: addq $1, %rsi
+; AVX1-NEXT: adcq $0, %r10
+; AVX1-NEXT: addq $1, %rdi
+; AVX1-NEXT: adcq $0, %r11
+; AVX1-NEXT: addq $1, %rbx
+; AVX1-NEXT: adcq $0, %r15
+; AVX1-NEXT: addq $1, %r13
+; AVX1-NEXT: adcq $0, %r14
+; AVX1-NEXT: addq $1, %rbp
+; AVX1-NEXT: adcq $0, %r12
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX1-NEXT: addq $1, %rax
+; AVX1-NEXT: adcq $0, %rdx
+; AVX1-NEXT: addq $1, %r8
+; AVX1-NEXT: adcq $0, %rcx
+; AVX1-NEXT: shldq $63, %r8, %rcx
+; AVX1-NEXT: shldq $63, %rax, %rdx
+; AVX1-NEXT: shldq $63, %rbp, %r12
+; AVX1-NEXT: shldq $63, %r13, %r14
+; AVX1-NEXT: shldq $63, %rbx, %r15
+; AVX1-NEXT: shldq $63, %rdi, %r11
+; AVX1-NEXT: shldq $63, %rsi, %r10
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX1-NEXT: shldq $63, %rax, %r9
+; AVX1-NEXT: vmovq %r9, %xmm0
+; AVX1-NEXT: vmovq %r10, %xmm1
+; AVX1-NEXT: vmovq %r11, %xmm2
+; AVX1-NEXT: vmovq %r15, %xmm3
+; AVX1-NEXT: vmovq %r14, %xmm4
+; AVX1-NEXT: vmovq %r12, %xmm5
+; AVX1-NEXT: vmovq %rdx, %xmm6
+; AVX1-NEXT: vmovq %rcx, %xmm7
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: addq $8, %rsp
+; AVX1-NEXT: .cfi_def_cfa_offset 56
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: popq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 8
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 56
+; AVX2-NEXT: pushq %rax
+; AVX2-NEXT: .cfi_def_cfa_offset 64
+; AVX2-NEXT: .cfi_offset %rbx, -56
+; AVX2-NEXT: .cfi_offset %r12, -48
+; AVX2-NEXT: .cfi_offset %r13, -40
+; AVX2-NEXT: .cfi_offset %r14, -32
+; AVX2-NEXT: .cfi_offset %r15, -24
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
+; AVX2-NEXT: vpextrq $1, %xmm4, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %rax
+; AVX2-NEXT: movq %rax, (%rsp) # 8-byte Spill
+; AVX2-NEXT: vmovq %xmm4, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm0
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vpextrq $1, %xmm1, %rbx
+; AVX2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %rbx
+; AVX2-NEXT: vmovq %xmm1, %r8
+; AVX2-NEXT: movq %r8, %rbp
+; AVX2-NEXT: sarq $63, %rbp
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm0
+; AVX2-NEXT: vpextrq $1, %xmm0, %r9
+; AVX2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r9
+; AVX2-NEXT: vmovq %xmm0, %r10
+; AVX2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r10
+; AVX2-NEXT: vpextrq $1, %xmm2, %r11
+; AVX2-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r11
+; AVX2-NEXT: vmovq %xmm2, %r15
+; AVX2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r15
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm0
+; AVX2-NEXT: vpextrq $1, %xmm0, %rdi
+; AVX2-NEXT: movq %rdi, %r14
+; AVX2-NEXT: sarq $63, %r14
+; AVX2-NEXT: vmovq %xmm0, %rsi
+; AVX2-NEXT: movq %rsi, %r12
+; AVX2-NEXT: sarq $63, %r12
+; AVX2-NEXT: vpextrq $1, %xmm3, %r13
+; AVX2-NEXT: movq %r13, %rdx
+; AVX2-NEXT: sarq $63, %rdx
+; AVX2-NEXT: vmovq %xmm3, %rax
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: addq %rax, %r8
+; AVX2-NEXT: adcq %rbp, %rcx
+; AVX2-NEXT: addq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX2-NEXT: adcq %rbx, %rdx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
+; AVX2-NEXT: addq %rsi, %rbp
+; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT: addq %rdi, %r13
+; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
+; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
+; AVX2-NEXT: adcq (%rsp), %r9 # 8-byte Folded Reload
+; AVX2-NEXT: addq $1, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: adcq $0, %r9
+; AVX2-NEXT: addq $1, %rsi
+; AVX2-NEXT: adcq $0, %r10
+; AVX2-NEXT: addq $1, %rdi
+; AVX2-NEXT: adcq $0, %r11
+; AVX2-NEXT: addq $1, %rbx
+; AVX2-NEXT: adcq $0, %r15
+; AVX2-NEXT: addq $1, %r13
+; AVX2-NEXT: adcq $0, %r14
+; AVX2-NEXT: addq $1, %rbp
+; AVX2-NEXT: adcq $0, %r12
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX2-NEXT: addq $1, %rax
+; AVX2-NEXT: adcq $0, %rdx
+; AVX2-NEXT: addq $1, %r8
+; AVX2-NEXT: adcq $0, %rcx
+; AVX2-NEXT: shldq $63, %r8, %rcx
+; AVX2-NEXT: shldq $63, %rax, %rdx
+; AVX2-NEXT: shldq $63, %rbp, %r12
+; AVX2-NEXT: shldq $63, %r13, %r14
+; AVX2-NEXT: shldq $63, %rbx, %r15
+; AVX2-NEXT: shldq $63, %rdi, %r11
+; AVX2-NEXT: shldq $63, %rsi, %r10
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX2-NEXT: shldq $63, %rax, %r9
+; AVX2-NEXT: vmovq %r9, %xmm0
+; AVX2-NEXT: vmovq %r10, %xmm1
+; AVX2-NEXT: vmovq %r11, %xmm2
+; AVX2-NEXT: vmovq %r15, %xmm3
+; AVX2-NEXT: vmovq %r14, %xmm4
+; AVX2-NEXT: vmovq %r12, %xmm5
+; AVX2-NEXT: vmovq %rdx, %xmm6
+; AVX2-NEXT: vmovq %rcx, %xmm7
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX2-NEXT: addq $8, %rsp
+; AVX2-NEXT: .cfi_def_cfa_offset 56
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 8
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 56
+; AVX512-NEXT: pushq %rax
+; AVX512-NEXT: .cfi_def_cfa_offset 64
+; AVX512-NEXT: .cfi_offset %rbx, -56
+; AVX512-NEXT: .cfi_offset %r12, -48
+; AVX512-NEXT: .cfi_offset %r13, -40
+; AVX512-NEXT: .cfi_offset %r14, -32
+; AVX512-NEXT: .cfi_offset %r15, -24
+; AVX512-NEXT: .cfi_offset %rbp, -16
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX512-NEXT: vpextrq $1, %xmm3, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %rax
+; AVX512-NEXT: movq %rax, (%rsp) # 8-byte Spill
+; AVX512-NEXT: vmovq %xmm3, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vmovq %xmm2, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vmovq %xmm2, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vpextrq $1, %xmm0, %rbx
+; AVX512-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %rbx
+; AVX512-NEXT: vmovq %xmm0, %r8
+; AVX512-NEXT: movq %r8, %r13
+; AVX512-NEXT: sarq $63, %r13
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512-NEXT: vpextrq $1, %xmm2, %r9
+; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r9
+; AVX512-NEXT: vmovq %xmm2, %r10
+; AVX512-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r10
+; AVX512-NEXT: vpextrq $1, %xmm0, %r11
+; AVX512-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r11
+; AVX512-NEXT: vmovq %xmm0, %r14
+; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r14
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm0
+; AVX512-NEXT: vpextrq $1, %xmm0, %rdi
+; AVX512-NEXT: movq %rdi, %r15
+; AVX512-NEXT: sarq $63, %r15
+; AVX512-NEXT: vmovq %xmm0, %rsi
+; AVX512-NEXT: movq %rsi, %r12
+; AVX512-NEXT: sarq $63, %r12
+; AVX512-NEXT: vpextrq $1, %xmm1, %rbp
+; AVX512-NEXT: movq %rbp, %rdx
+; AVX512-NEXT: sarq $63, %rdx
+; AVX512-NEXT: vmovq %xmm1, %rax
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: addq %rax, %r8
+; AVX512-NEXT: adcq %r13, %rcx
+; AVX512-NEXT: addq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX512-NEXT: adcq %rbx, %rdx
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
+; AVX512-NEXT: addq %rsi, %rbp
+; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX512-NEXT: addq %rdi, %r13
+; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
+; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
+; AVX512-NEXT: adcq (%rsp), %r9 # 8-byte Folded Reload
+; AVX512-NEXT: addq $1, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: adcq $0, %r9
+; AVX512-NEXT: addq $1, %rsi
+; AVX512-NEXT: adcq $0, %r10
+; AVX512-NEXT: addq $1, %rdi
+; AVX512-NEXT: adcq $0, %r11
+; AVX512-NEXT: addq $1, %rbx
+; AVX512-NEXT: adcq $0, %r14
+; AVX512-NEXT: addq $1, %r13
+; AVX512-NEXT: adcq $0, %r15
+; AVX512-NEXT: addq $1, %rbp
+; AVX512-NEXT: adcq $0, %r12
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX512-NEXT: addq $1, %rax
+; AVX512-NEXT: adcq $0, %rdx
+; AVX512-NEXT: addq $1, %r8
+; AVX512-NEXT: adcq $0, %rcx
+; AVX512-NEXT: shldq $63, %r8, %rcx
+; AVX512-NEXT: shldq $63, %rax, %rdx
+; AVX512-NEXT: shldq $63, %rbp, %r12
+; AVX512-NEXT: shldq $63, %r13, %r15
+; AVX512-NEXT: shldq $63, %rbx, %r14
+; AVX512-NEXT: shldq $63, %rdi, %r11
+; AVX512-NEXT: shldq $63, %rsi, %r10
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX512-NEXT: shldq $63, %rax, %r9
+; AVX512-NEXT: vmovq %r9, %xmm0
+; AVX512-NEXT: vmovq %r10, %xmm1
+; AVX512-NEXT: vmovq %r11, %xmm2
+; AVX512-NEXT: vmovq %r14, %xmm3
+; AVX512-NEXT: vmovq %r15, %xmm4
+; AVX512-NEXT: vmovq %r12, %xmm5
+; AVX512-NEXT: vmovq %rdx, %xmm6
+; AVX512-NEXT: vmovq %rcx, %xmm7
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: addq $8, %rsp
+; AVX512-NEXT: .cfi_def_cfa_offset 56
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 8
+; AVX512-NEXT: retq
+ %x0 = sext <8 x i64> %a0 to <8 x i128>
+ %x1 = sext <8 x i64> %a1 to <8 x i128>
+ %sum = add <8 x i128> %x0, %x1
+ %inc = add <8 x i128> %sum, <i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1>
+ %shift = ashr <8 x i128> %inc, <i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1>
+ %res = trunc <8 x i128> %shift to <8 x i64>
+ ret <8 x i64> %res
+}
+
diff --git a/llvm/test/CodeGen/X86/avgceilu.ll b/llvm/test/CodeGen/X86/avgceilu.ll
new file mode 100644
index 0000000..dee1a5a
--- /dev/null
+++ b/llvm/test/CodeGen/X86/avgceilu.ll
@@ -0,0 +1,2187 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE4
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512
+
+;
+; 128-bit vectors
+;
+
+define <16 x i8> @test_fixed_v16i8(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE-LABEL: test_fixed_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v16i8:
+; AVX: # %bb.0:
+; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %or = or <16 x i8> %a0, %a1
+ %xor = xor <16 x i8> %a0, %a1
+ %shift = lshr <16 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = sub <16 x i8> %or, %shift
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_ext_v16i8(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE-LABEL: test_ext_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_ext_v16i8:
+; AVX: # %bb.0:
+; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %x0 = zext <16 x i8> %a0 to <16 x i16>
+ %x1 = zext <16 x i8> %a1 to <16 x i16>
+ %sum = add <16 x i16> %x0, %x1
+ %inc = add <16 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shift = lshr <16 x i16> %inc, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <16 x i16> %shift to <16 x i8>
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @test_fixed_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: test_fixed_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %or = or <8 x i16> %a0, %a1
+ %xor = xor <8 x i16> %a1, %a0
+ %shift = lshr <8 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = sub <8 x i16> %or, %shift
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_ext_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: test_ext_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_ext_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %x0 = zext <8 x i16> %a0 to <8 x i32>
+ %x1 = zext <8 x i16> %a1 to <8 x i32>
+ %sum = add <8 x i32> %x0, %x1
+ %inc = add <8 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %shift = lshr <8 x i32> %inc, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <8 x i32> %shift to <8 x i16>
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @test_fixed_v4i32(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: test_fixed_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm0, %xmm1
+; SSE-NEXT: psrld $1, %xmm1
+; SSE-NEXT: psubd %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX-NEXT: vpsubd %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %or = or <4 x i32> %a0, %a1
+ %xor = xor <4 x i32> %a1, %a0
+ %shift = lshr <4 x i32> %xor, <i32 1, i32 1, i32 1, i32 1>
+ %res = sub <4 x i32> %or, %shift
+ ret <4 x i32> %res
+}
+
+define <4 x i32> @test_ext_v4i32(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE2-LABEL: test_ext_v4i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE2-NEXT: paddq %xmm4, %xmm2
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-NEXT: paddq %xmm0, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE2-NEXT: psubq %xmm0, %xmm2
+; SSE2-NEXT: psubq %xmm0, %xmm1
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v4i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pxor %xmm3, %xmm3
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE4-NEXT: paddq %xmm0, %xmm1
+; SSE4-NEXT: paddq %xmm4, %xmm2
+; SSE4-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE4-NEXT: psubq %xmm0, %xmm1
+; SSE4-NEXT: psubq %xmm0, %xmm2
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
+; SSE4-NEXT: movaps %xmm2, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v4i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovqd %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = zext <4 x i32> %a0 to <4 x i64>
+ %x1 = zext <4 x i32> %a1 to <4 x i64>
+ %sum = add <4 x i64> %x0, %x1
+ %inc = add <4 x i64> %sum, <i64 1, i64 1, i64 1, i64 1>
+ %shift = lshr <4 x i64> %inc, <i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <4 x i64> %shift to <4 x i32>
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @test_fixed_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE-LABEL: test_fixed_v2i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm0, %xmm1
+; SSE-NEXT: psrlq $1, %xmm1
+; SSE-NEXT: psubq %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v2i64:
+; AVX: # %bb.0:
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX-NEXT: vpsubq %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %or = or <2 x i64> %a0, %a1
+ %xor = xor <2 x i64> %a1, %a0
+ %shift = lshr <2 x i64> %xor, <i64 1, i64 1>
+ %res = sub <2 x i64> %or, %shift
+ ret <2 x i64> %res
+}
+
+define <2 x i64> @test_ext_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE2-LABEL: test_ext_v2i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %rcx
+; SSE2-NEXT: movb $1, %dl
+; SSE2-NEXT: movb $1, %sil
+; SSE2-NEXT: addb $-1, %sil
+; SSE2-NEXT: leaq 1(%rax,%rcx), %rsi
+; SSE2-NEXT: adcq %rcx, %rax
+; SSE2-NEXT: setb %al
+; SSE2-NEXT: addb $-1, %dl
+; SSE2-NEXT: movq %xmm0, %rcx
+; SSE2-NEXT: movq %xmm1, %rdx
+; SSE2-NEXT: leaq 1(%rcx,%rdx), %rdi
+; SSE2-NEXT: adcq %rdx, %rcx
+; SSE2-NEXT: setb %cl
+; SSE2-NEXT: movzbl %cl, %ecx
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: shrdq $1, %rcx, %rdi
+; SSE2-NEXT: shrdq $1, %rax, %rsi
+; SSE2-NEXT: movq %rdi, %xmm0
+; SSE2-NEXT: movq %rsi, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v2i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movq %xmm0, %rax
+; SSE4-NEXT: movq %xmm1, %rcx
+; SSE4-NEXT: movb $1, %dl
+; SSE4-NEXT: movb $1, %sil
+; SSE4-NEXT: addb $-1, %sil
+; SSE4-NEXT: leaq 1(%rax,%rcx), %rsi
+; SSE4-NEXT: adcq %rcx, %rax
+; SSE4-NEXT: setb %al
+; SSE4-NEXT: addb $-1, %dl
+; SSE4-NEXT: pextrq $1, %xmm0, %rcx
+; SSE4-NEXT: pextrq $1, %xmm1, %rdx
+; SSE4-NEXT: leaq 1(%rcx,%rdx), %rdi
+; SSE4-NEXT: adcq %rdx, %rcx
+; SSE4-NEXT: setb %cl
+; SSE4-NEXT: movzbl %cl, %ecx
+; SSE4-NEXT: movzbl %al, %eax
+; SSE4-NEXT: shrdq $1, %rcx, %rdi
+; SSE4-NEXT: shrdq $1, %rax, %rsi
+; SSE4-NEXT: movq %rdi, %xmm1
+; SSE4-NEXT: movq %rsi, %xmm0
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE4-NEXT: retq
+;
+; AVX-LABEL: test_ext_v2i64:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: vmovq %xmm1, %rcx
+; AVX-NEXT: movb $1, %dl
+; AVX-NEXT: movb $1, %sil
+; AVX-NEXT: addb $-1, %sil
+; AVX-NEXT: leaq 1(%rax,%rcx), %rsi
+; AVX-NEXT: adcq %rcx, %rax
+; AVX-NEXT: setb %al
+; AVX-NEXT: addb $-1, %dl
+; AVX-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX-NEXT: vpextrq $1, %xmm1, %rdx
+; AVX-NEXT: leaq 1(%rcx,%rdx), %rdi
+; AVX-NEXT: adcq %rdx, %rcx
+; AVX-NEXT: setb %cl
+; AVX-NEXT: movzbl %cl, %ecx
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: shrdq $1, %rcx, %rdi
+; AVX-NEXT: shrdq $1, %rax, %rsi
+; AVX-NEXT: vmovq %rdi, %xmm0
+; AVX-NEXT: vmovq %rsi, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: retq
+ %x0 = zext <2 x i64> %a0 to <2 x i128>
+ %x1 = zext <2 x i64> %a1 to <2 x i128>
+ %sum = add <2 x i128> %x0, %x1
+ %inc = add <2 x i128> %sum, <i128 1, i128 1>
+ %shift = lshr <2 x i128> %inc, <i128 1, i128 1>
+ %res = trunc <2 x i128> %shift to <2 x i64>
+ ret <2 x i64> %res
+}
+
+;
+; 256-bit vectors
+;
+
+define <32 x i8> @test_fixed_v32i8(<32 x i8> %a0, <32 x i8> %a1) {
+; SSE-LABEL: test_fixed_v32i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgb %xmm2, %xmm0
+; SSE-NEXT: pavgb %xmm3, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpavgb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpavgb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpavgb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v32i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpavgb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %or = or <32 x i8> %a0, %a1
+ %xor = xor <32 x i8> %a0, %a1
+ %shift = lshr <32 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = sub <32 x i8> %or, %shift
+ ret <32 x i8> %res
+}
+
+define <32 x i8> @test_ext_v32i8(<32 x i8> %a0, <32 x i8> %a1) {
+; SSE-LABEL: test_ext_v32i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgb %xmm2, %xmm0
+; SSE-NEXT: pavgb %xmm3, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpavgb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpavgb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpavgb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v32i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpavgb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = zext <32 x i8> %a0 to <32 x i16>
+ %x1 = zext <32 x i8> %a1 to <32 x i16>
+ %sum = add <32 x i16> %x0, %x1
+ %inc = add <32 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shift = lshr <32 x i16> %inc, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <32 x i16> %shift to <32 x i8>
+ ret <32 x i8> %res
+}
+
+define <16 x i16> @test_fixed_v16i16(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE-LABEL: test_fixed_v16i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgw %xmm2, %xmm0
+; SSE-NEXT: pavgw %xmm3, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpavgw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpavgw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpavgw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpavgw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %or = or <16 x i16> %a0, %a1
+ %xor = xor <16 x i16> %a1, %a0
+ %shift = lshr <16 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = sub <16 x i16> %or, %shift
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_ext_v16i16(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE-LABEL: test_ext_v16i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgw %xmm2, %xmm0
+; SSE-NEXT: pavgw %xmm3, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpavgw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpavgw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpavgw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpavgw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = zext <16 x i16> %a0 to <16 x i32>
+ %x1 = zext <16 x i16> %a1 to <16 x i32>
+ %sum = add <16 x i32> %x0, %x1
+ %inc = add <16 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %shift = lshr <16 x i32> %inc, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <16 x i32> %shift to <16 x i16>
+ ret <16 x i16> %res
+}
+
+define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE-LABEL: test_fixed_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: por %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: por %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm0, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm3
+; SSE-NEXT: psrld $1, %xmm3
+; SSE-NEXT: psubd %xmm3, %xmm4
+; SSE-NEXT: psrld $1, %xmm2
+; SSE-NEXT: psubd %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpsubd %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX512-NEXT: vpsubd %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %or = or <8 x i32> %a0, %a1
+ %xor = xor <8 x i32> %a1, %a0
+ %shift = lshr <8 x i32> %xor, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = sub <8 x i32> %or, %shift
+ ret <8 x i32> %res
+}
+
+define <8 x i32> @test_ext_v8i32(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE2-LABEL: test_ext_v8i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm7
+; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
+; SSE2-NEXT: paddq %xmm6, %xmm0
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; SSE2-NEXT: paddq %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: paddq %xmm7, %xmm4
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; SSE2-NEXT: paddq %xmm1, %xmm3
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: psubq %xmm1, %xmm0
+; SSE2-NEXT: psubq %xmm1, %xmm2
+; SSE2-NEXT: psubq %xmm1, %xmm4
+; SSE2-NEXT: psubq %xmm1, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm3[0,2]
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; SSE2-NEXT: movaps %xmm4, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm0, %xmm4
+; SSE4-NEXT: pxor %xmm5, %xmm5
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm6 = xmm0[0],zero,xmm0[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm7 = xmm1[0],zero,xmm1[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; SSE4-NEXT: paddq %xmm4, %xmm2
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; SSE4-NEXT: paddq %xmm1, %xmm3
+; SSE4-NEXT: paddq %xmm6, %xmm0
+; SSE4-NEXT: paddq %xmm7, %xmm4
+; SSE4-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE4-NEXT: psubq %xmm1, %xmm2
+; SSE4-NEXT: psubq %xmm1, %xmm3
+; SSE4-NEXT: psubq %xmm1, %xmm0
+; SSE4-NEXT: psubq %xmm1, %xmm4
+; SSE4-NEXT: psrlq $1, %xmm3
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm3[0,2]
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; SSE4-NEXT: movaps %xmm4, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX1-NEXT: vpaddq %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm6[2],xmm2[2],xmm6[3],xmm2[3]
+; AVX1-NEXT: vpaddq %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm6[0],zero,xmm6[1],zero
+; AVX1-NEXT: vpaddq %xmm1, %xmm4, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpsubq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpsubq %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm0[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpsubq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = zext <8 x i32> %a0 to <8 x i64>
+ %x1 = zext <8 x i32> %a1 to <8 x i64>
+ %sum = add <8 x i64> %x0, %x1
+ %inc = add <8 x i64> %sum, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %shift = lshr <8 x i64> %inc, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <8 x i64> %shift to <8 x i32>
+ ret <8 x i32> %res
+}
+
+define <4 x i64> @test_fixed_v4i64(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE-LABEL: test_fixed_v4i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: por %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: por %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm0, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm3
+; SSE-NEXT: psrlq $1, %xmm3
+; SSE-NEXT: psubq %xmm3, %xmm4
+; SSE-NEXT: psrlq $1, %xmm2
+; SSE-NEXT: psubq %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v4i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpsubq %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v4i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v4i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpsubq %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %or = or <4 x i64> %a0, %a1
+ %xor = xor <4 x i64> %a1, %a0
+ %shift = lshr <4 x i64> %xor, <i64 1, i64 1, i64 1, i64 1>
+ %res = sub <4 x i64> %or, %shift
+ ret <4 x i64> %res
+}
+
+define <4 x i64> @test_ext_v4i64(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE2-LABEL: test_ext_v4i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm4, %rcx
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,3,2,3]
+; SSE2-NEXT: movq %xmm4, %rdx
+; SSE2-NEXT: movb $1, %al
+; SSE2-NEXT: movb $1, %sil
+; SSE2-NEXT: addb $-1, %sil
+; SSE2-NEXT: leaq 1(%rcx,%rdx), %rsi
+; SSE2-NEXT: adcq %rdx, %rcx
+; SSE2-NEXT: setb %dl
+; SSE2-NEXT: movb $1, %cl
+; SSE2-NEXT: addb $-1, %cl
+; SSE2-NEXT: movq %xmm1, %rdi
+; SSE2-NEXT: movq %xmm3, %r8
+; SSE2-NEXT: leaq 1(%rdi,%r8), %rcx
+; SSE2-NEXT: adcq %r8, %rdi
+; SSE2-NEXT: setb %dil
+; SSE2-NEXT: movb $1, %r8b
+; SSE2-NEXT: addb $-1, %r8b
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm1, %r8
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm1, %r9
+; SSE2-NEXT: leaq 1(%r8,%r9), %r10
+; SSE2-NEXT: adcq %r9, %r8
+; SSE2-NEXT: setb %r8b
+; SSE2-NEXT: addb $-1, %al
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: movq %xmm2, %r9
+; SSE2-NEXT: leaq 1(%rax,%r9), %r11
+; SSE2-NEXT: adcq %r9, %rax
+; SSE2-NEXT: setb %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movzbl %r8b, %r8d
+; SSE2-NEXT: movzbl %dil, %edi
+; SSE2-NEXT: movzbl %dl, %edx
+; SSE2-NEXT: shrdq $1, %rax, %r11
+; SSE2-NEXT: shrdq $1, %r8, %r10
+; SSE2-NEXT: shrdq $1, %rdi, %rcx
+; SSE2-NEXT: shrdq $1, %rdx, %rsi
+; SSE2-NEXT: movq %r11, %xmm0
+; SSE2-NEXT: movq %r10, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: movq %rcx, %xmm1
+; SSE2-NEXT: movq %rsi, %xmm2
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v4i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movq %xmm1, %rcx
+; SSE4-NEXT: movq %xmm3, %rdx
+; SSE4-NEXT: movb $1, %al
+; SSE4-NEXT: movb $1, %sil
+; SSE4-NEXT: addb $-1, %sil
+; SSE4-NEXT: leaq 1(%rcx,%rdx), %rsi
+; SSE4-NEXT: adcq %rdx, %rcx
+; SSE4-NEXT: setb %dl
+; SSE4-NEXT: movb $1, %cl
+; SSE4-NEXT: addb $-1, %cl
+; SSE4-NEXT: pextrq $1, %xmm1, %rdi
+; SSE4-NEXT: pextrq $1, %xmm3, %r8
+; SSE4-NEXT: leaq 1(%rdi,%r8), %rcx
+; SSE4-NEXT: adcq %r8, %rdi
+; SSE4-NEXT: setb %dil
+; SSE4-NEXT: movb $1, %r8b
+; SSE4-NEXT: addb $-1, %r8b
+; SSE4-NEXT: movq %xmm0, %r8
+; SSE4-NEXT: movq %xmm2, %r9
+; SSE4-NEXT: leaq 1(%r8,%r9), %r10
+; SSE4-NEXT: adcq %r9, %r8
+; SSE4-NEXT: setb %r8b
+; SSE4-NEXT: addb $-1, %al
+; SSE4-NEXT: pextrq $1, %xmm0, %rax
+; SSE4-NEXT: pextrq $1, %xmm2, %r9
+; SSE4-NEXT: leaq 1(%rax,%r9), %r11
+; SSE4-NEXT: adcq %r9, %rax
+; SSE4-NEXT: setb %al
+; SSE4-NEXT: movzbl %al, %eax
+; SSE4-NEXT: movzbl %r8b, %r8d
+; SSE4-NEXT: movzbl %dil, %edi
+; SSE4-NEXT: movzbl %dl, %edx
+; SSE4-NEXT: shrdq $1, %rax, %r11
+; SSE4-NEXT: shrdq $1, %r8, %r10
+; SSE4-NEXT: shrdq $1, %rdi, %rcx
+; SSE4-NEXT: shrdq $1, %rdx, %rsi
+; SSE4-NEXT: movq %r11, %xmm1
+; SSE4-NEXT: movq %r10, %xmm0
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE4-NEXT: movq %rcx, %xmm2
+; SSE4-NEXT: movq %rsi, %xmm1
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v4i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovq %xmm0, %rcx
+; AVX1-NEXT: vmovq %xmm1, %rdx
+; AVX1-NEXT: movb $1, %al
+; AVX1-NEXT: movb $1, %sil
+; AVX1-NEXT: addb $-1, %sil
+; AVX1-NEXT: leaq 1(%rcx,%rdx), %rsi
+; AVX1-NEXT: adcq %rdx, %rcx
+; AVX1-NEXT: setb %dl
+; AVX1-NEXT: movb $1, %cl
+; AVX1-NEXT: addb $-1, %cl
+; AVX1-NEXT: vpextrq $1, %xmm0, %rdi
+; AVX1-NEXT: vpextrq $1, %xmm1, %r8
+; AVX1-NEXT: leaq 1(%rdi,%r8), %rcx
+; AVX1-NEXT: adcq %r8, %rdi
+; AVX1-NEXT: setb %dil
+; AVX1-NEXT: movb $1, %r8b
+; AVX1-NEXT: addb $-1, %r8b
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %r8
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vmovq %xmm1, %r9
+; AVX1-NEXT: leaq 1(%r8,%r9), %r10
+; AVX1-NEXT: adcq %r9, %r8
+; AVX1-NEXT: setb %r8b
+; AVX1-NEXT: addb $-1, %al
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: vpextrq $1, %xmm1, %r9
+; AVX1-NEXT: leaq 1(%rax,%r9), %r11
+; AVX1-NEXT: adcq %r9, %rax
+; AVX1-NEXT: setb %al
+; AVX1-NEXT: movzbl %al, %eax
+; AVX1-NEXT: movzbl %r8b, %r8d
+; AVX1-NEXT: movzbl %dil, %edi
+; AVX1-NEXT: movzbl %dl, %edx
+; AVX1-NEXT: shrdq $1, %rax, %r11
+; AVX1-NEXT: shrdq $1, %r8, %r10
+; AVX1-NEXT: shrdq $1, %rdi, %rcx
+; AVX1-NEXT: shrdq $1, %rdx, %rsi
+; AVX1-NEXT: vmovq %r11, %xmm0
+; AVX1-NEXT: vmovq %r10, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vmovq %rcx, %xmm1
+; AVX1-NEXT: vmovq %rsi, %xmm2
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v4i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovq %xmm0, %rcx
+; AVX2-NEXT: vmovq %xmm1, %rdx
+; AVX2-NEXT: movb $1, %al
+; AVX2-NEXT: movb $1, %sil
+; AVX2-NEXT: addb $-1, %sil
+; AVX2-NEXT: leaq 1(%rcx,%rdx), %rsi
+; AVX2-NEXT: adcq %rdx, %rcx
+; AVX2-NEXT: setb %dl
+; AVX2-NEXT: movb $1, %cl
+; AVX2-NEXT: addb $-1, %cl
+; AVX2-NEXT: vpextrq $1, %xmm0, %rdi
+; AVX2-NEXT: vpextrq $1, %xmm1, %r8
+; AVX2-NEXT: leaq 1(%rdi,%r8), %rcx
+; AVX2-NEXT: adcq %r8, %rdi
+; AVX2-NEXT: setb %dil
+; AVX2-NEXT: movb $1, %r8b
+; AVX2-NEXT: addb $-1, %r8b
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %r8
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vmovq %xmm1, %r9
+; AVX2-NEXT: leaq 1(%r8,%r9), %r10
+; AVX2-NEXT: adcq %r9, %r8
+; AVX2-NEXT: setb %r8b
+; AVX2-NEXT: addb $-1, %al
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: vpextrq $1, %xmm1, %r9
+; AVX2-NEXT: leaq 1(%rax,%r9), %r11
+; AVX2-NEXT: adcq %r9, %rax
+; AVX2-NEXT: setb %al
+; AVX2-NEXT: movzbl %al, %eax
+; AVX2-NEXT: movzbl %r8b, %r8d
+; AVX2-NEXT: movzbl %dil, %edi
+; AVX2-NEXT: movzbl %dl, %edx
+; AVX2-NEXT: shrdq $1, %rax, %r11
+; AVX2-NEXT: shrdq $1, %r8, %r10
+; AVX2-NEXT: shrdq $1, %rdi, %rcx
+; AVX2-NEXT: shrdq $1, %rdx, %rsi
+; AVX2-NEXT: vmovq %r11, %xmm0
+; AVX2-NEXT: vmovq %r10, %xmm1
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vmovq %rcx, %xmm1
+; AVX2-NEXT: vmovq %rsi, %xmm2
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v4i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovq %xmm0, %rcx
+; AVX512-NEXT: vmovq %xmm1, %rdx
+; AVX512-NEXT: movb $1, %al
+; AVX512-NEXT: movb $1, %sil
+; AVX512-NEXT: addb $-1, %sil
+; AVX512-NEXT: leaq 1(%rcx,%rdx), %rsi
+; AVX512-NEXT: adcq %rdx, %rcx
+; AVX512-NEXT: setb %dl
+; AVX512-NEXT: movb $1, %cl
+; AVX512-NEXT: vpextrq $1, %xmm0, %rdi
+; AVX512-NEXT: vpextrq $1, %xmm1, %r8
+; AVX512-NEXT: addb $-1, %cl
+; AVX512-NEXT: leaq 1(%rdi,%r8), %rcx
+; AVX512-NEXT: adcq %r8, %rdi
+; AVX512-NEXT: setb %dil
+; AVX512-NEXT: movb $1, %r8b
+; AVX512-NEXT: addb $-1, %r8b
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-NEXT: vmovq %xmm0, %r8
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512-NEXT: vmovq %xmm1, %r9
+; AVX512-NEXT: leaq 1(%r8,%r9), %r10
+; AVX512-NEXT: adcq %r9, %r8
+; AVX512-NEXT: setb %r8b
+; AVX512-NEXT: addb $-1, %al
+; AVX512-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512-NEXT: vpextrq $1, %xmm1, %r9
+; AVX512-NEXT: leaq 1(%rax,%r9), %r11
+; AVX512-NEXT: adcq %r9, %rax
+; AVX512-NEXT: setb %al
+; AVX512-NEXT: movzbl %al, %eax
+; AVX512-NEXT: movzbl %r8b, %r8d
+; AVX512-NEXT: movzbl %dil, %edi
+; AVX512-NEXT: movzbl %dl, %edx
+; AVX512-NEXT: shrdq $1, %rax, %r11
+; AVX512-NEXT: shrdq $1, %r8, %r10
+; AVX512-NEXT: shrdq $1, %rdi, %rcx
+; AVX512-NEXT: shrdq $1, %rdx, %rsi
+; AVX512-NEXT: vmovq %r11, %xmm0
+; AVX512-NEXT: vmovq %r10, %xmm1
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: vmovq %rcx, %xmm1
+; AVX512-NEXT: vmovq %rsi, %xmm2
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
+ %x0 = zext <4 x i64> %a0 to <4 x i128>
+ %x1 = zext <4 x i64> %a1 to <4 x i128>
+ %sum = add <4 x i128> %x0, %x1
+ %inc = add <4 x i128> %sum, <i128 1, i128 1, i128 1, i128 1>
+ %shift = lshr <4 x i128> %inc, <i128 1, i128 1, i128 1, i128 1>
+ %res = trunc <4 x i128> %shift to <4 x i64>
+ ret <4 x i64> %res
+}
+
+;
+; 512-bit vectors
+;
+
+define <64 x i8> @test_fixed_v64i8(<64 x i8> %a0, <64 x i8> %a1) {
+; SSE-LABEL: test_fixed_v64i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgb %xmm4, %xmm0
+; SSE-NEXT: pavgb %xmm5, %xmm1
+; SSE-NEXT: pavgb %xmm6, %xmm2
+; SSE-NEXT: pavgb %xmm7, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v64i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vpavgb %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpavgb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpavgb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpavgb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v64i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpavgb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpavgb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v64i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpavgb %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %or = or <64 x i8> %a0, %a1
+ %xor = xor <64 x i8> %a0, %a1
+ %shift = lshr <64 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = sub <64 x i8> %or, %shift
+ ret <64 x i8> %res
+}
+
+define <64 x i8> @test_ext_v64i8(<64 x i8> %a0, <64 x i8> %a1) {
+; SSE-LABEL: test_ext_v64i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgb %xmm4, %xmm0
+; SSE-NEXT: pavgb %xmm5, %xmm1
+; SSE-NEXT: pavgb %xmm6, %xmm2
+; SSE-NEXT: pavgb %xmm7, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v64i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vpavgb %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpavgb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpavgb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpavgb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v64i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpavgb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpavgb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v64i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpavgb %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x0 = zext <64 x i8> %a0 to <64 x i16>
+ %x1 = zext <64 x i8> %a1 to <64 x i16>
+ %sum = add <64 x i16> %x0, %x1
+ %inc = add <64 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shift = lshr <64 x i16> %inc, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <64 x i16> %shift to <64 x i8>
+ ret <64 x i8> %res
+}
+
+define <32 x i16> @test_fixed_v32i16(<32 x i16> %a0, <32 x i16> %a1) {
+; SSE-LABEL: test_fixed_v32i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgw %xmm4, %xmm0
+; SSE-NEXT: pavgw %xmm5, %xmm1
+; SSE-NEXT: pavgw %xmm6, %xmm2
+; SSE-NEXT: pavgw %xmm7, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v32i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vpavgw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpavgw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpavgw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpavgw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v32i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpavgw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpavgw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v32i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpavgw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %or = or <32 x i16> %a0, %a1
+ %xor = xor <32 x i16> %a1, %a0
+ %shift = lshr <32 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = sub <32 x i16> %or, %shift
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_ext_v32i16(<32 x i16> %a0, <32 x i16> %a1) {
+; SSE-LABEL: test_ext_v32i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgw %xmm4, %xmm0
+; SSE-NEXT: pavgw %xmm5, %xmm1
+; SSE-NEXT: pavgw %xmm6, %xmm2
+; SSE-NEXT: pavgw %xmm7, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v32i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vpavgw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpavgw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpavgw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpavgw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v32i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpavgw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpavgw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v32i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpavgw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x0 = zext <32 x i16> %a0 to <32 x i32>
+ %x1 = zext <32 x i16> %a1 to <32 x i32>
+ %sum = add <32 x i32> %x0, %x1
+ %inc = add <32 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %shift = lshr <32 x i32> %inc, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <32 x i32> %shift to <32 x i16>
+ ret <32 x i16> %res
+}
+
+define <16 x i32> @test_fixed_v16i32(<16 x i32> %a0, <16 x i32> %a1) {
+; SSE-LABEL: test_fixed_v16i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: por %xmm7, %xmm3
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: por %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: por %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: por %xmm4, %xmm11
+; SSE-NEXT: pxor %xmm0, %xmm4
+; SSE-NEXT: pxor %xmm1, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm6
+; SSE-NEXT: pxor %xmm8, %xmm7
+; SSE-NEXT: psrld $1, %xmm7
+; SSE-NEXT: psubd %xmm7, %xmm3
+; SSE-NEXT: psrld $1, %xmm6
+; SSE-NEXT: psubd %xmm6, %xmm9
+; SSE-NEXT: psrld $1, %xmm5
+; SSE-NEXT: psubd %xmm5, %xmm10
+; SSE-NEXT: psrld $1, %xmm4
+; SSE-NEXT: psubd %xmm4, %xmm11
+; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpsubd %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpsubd %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsubd %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubd %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubd %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxord %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsubd %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %or = or <16 x i32> %a0, %a1
+ %xor = xor <16 x i32> %a1, %a0
+ %shift = lshr <16 x i32> %xor, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = sub <16 x i32> %or, %shift
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_ext_v16i32(<16 x i32> %a0, <16 x i32> %a1) {
+; SSE2-LABEL: test_ext_v16i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm2, %xmm8
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm9, %xmm9
+; SSE2-NEXT: movdqa %xmm0, %xmm10
+; SSE2-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm9[2],xmm1[3],xmm9[3]
+; SSE2-NEXT: movdqa %xmm2, %xmm11
+; SSE2-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm9[0],xmm11[1],xmm9[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm9[2],xmm2[3],xmm9[3]
+; SSE2-NEXT: movdqa %xmm8, %xmm12
+; SSE2-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm9[0],xmm12[1],xmm9[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm9[2],xmm8[3],xmm9[3]
+; SSE2-NEXT: movdqa %xmm3, %xmm13
+; SSE2-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm9[0],xmm13[1],xmm9[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm9[2],xmm3[3],xmm9[3]
+; SSE2-NEXT: movdqa %xmm4, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
+; SSE2-NEXT: paddq %xmm10, %xmm0
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm9[2],xmm4[3],xmm9[3]
+; SSE2-NEXT: paddq %xmm1, %xmm4
+; SSE2-NEXT: movdqa %xmm5, %xmm1
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1]
+; SSE2-NEXT: paddq %xmm11, %xmm1
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm9[2],xmm5[3],xmm9[3]
+; SSE2-NEXT: paddq %xmm2, %xmm5
+; SSE2-NEXT: movdqa %xmm6, %xmm2
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1]
+; SSE2-NEXT: paddq %xmm12, %xmm2
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm9[2],xmm6[3],xmm9[3]
+; SSE2-NEXT: paddq %xmm8, %xmm6
+; SSE2-NEXT: movdqa %xmm7, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
+; SSE2-NEXT: paddq %xmm13, %xmm8
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm9[2],xmm7[3],xmm9[3]
+; SSE2-NEXT: paddq %xmm3, %xmm7
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE2-NEXT: psubq %xmm3, %xmm0
+; SSE2-NEXT: psubq %xmm3, %xmm4
+; SSE2-NEXT: psubq %xmm3, %xmm1
+; SSE2-NEXT: psubq %xmm3, %xmm5
+; SSE2-NEXT: psubq %xmm3, %xmm2
+; SSE2-NEXT: psubq %xmm3, %xmm6
+; SSE2-NEXT: psubq %xmm3, %xmm8
+; SSE2-NEXT: psubq %xmm3, %xmm7
+; SSE2-NEXT: psrlq $1, %xmm7
+; SSE2-NEXT: psrlq $1, %xmm8
+; SSE2-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm7[0,2]
+; SSE2-NEXT: psrlq $1, %xmm6
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm6[0,2]
+; SSE2-NEXT: psrlq $1, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE2-NEXT: movaps %xmm8, %xmm3
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm3, %xmm8
+; SSE4-NEXT: movdqa %xmm2, %xmm3
+; SSE4-NEXT: movdqa %xmm1, %xmm2
+; SSE4-NEXT: movdqa %xmm0, %xmm1
+; SSE4-NEXT: pxor %xmm10, %xmm10
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm9 = xmm0[0],zero,xmm0[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm10[2],xmm1[3],xmm10[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm11 = xmm2[0],zero,xmm2[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm10[2],xmm2[3],xmm10[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm12 = xmm3[0],zero,xmm3[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm10[2],xmm3[3],xmm10[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm13 = xmm8[0],zero,xmm8[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm10[2],xmm8[3],xmm10[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3]
+; SSE4-NEXT: paddq %xmm1, %xmm4
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm5[0],zero,xmm5[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm10[2],xmm5[3],xmm10[3]
+; SSE4-NEXT: paddq %xmm2, %xmm5
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm6[0],zero,xmm6[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm10[2],xmm6[3],xmm10[3]
+; SSE4-NEXT: paddq %xmm3, %xmm6
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm7[0],zero,xmm7[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm10[2],xmm7[3],xmm10[3]
+; SSE4-NEXT: paddq %xmm8, %xmm7
+; SSE4-NEXT: paddq %xmm9, %xmm0
+; SSE4-NEXT: paddq %xmm11, %xmm1
+; SSE4-NEXT: paddq %xmm12, %xmm2
+; SSE4-NEXT: paddq %xmm13, %xmm3
+; SSE4-NEXT: pcmpeqd %xmm8, %xmm8
+; SSE4-NEXT: psubq %xmm8, %xmm4
+; SSE4-NEXT: psubq %xmm8, %xmm5
+; SSE4-NEXT: psubq %xmm8, %xmm6
+; SSE4-NEXT: psubq %xmm8, %xmm7
+; SSE4-NEXT: psubq %xmm8, %xmm0
+; SSE4-NEXT: psubq %xmm8, %xmm1
+; SSE4-NEXT: psubq %xmm8, %xmm2
+; SSE4-NEXT: psubq %xmm8, %xmm3
+; SSE4-NEXT: psrlq $1, %xmm7
+; SSE4-NEXT: psrlq $1, %xmm6
+; SSE4-NEXT: psrlq $1, %xmm5
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: psrlq $1, %xmm3
+; SSE4-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm7[0,2]
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm6[0,2]
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm6[2],xmm4[2],xmm6[3],xmm4[3]
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm8 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm9
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm10 = xmm9[2],xmm4[2],xmm9[3],xmm4[3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm9 = xmm9[0],zero,xmm9[1],zero
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm11 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; AVX1-NEXT: vpaddq %xmm5, %xmm11, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm11
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm12 = xmm11[2],xmm4[2],xmm11[3],xmm4[3]
+; AVX1-NEXT: vpaddq %xmm7, %xmm12, %xmm7
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm12 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX1-NEXT: vpaddq %xmm12, %xmm8, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm12
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm12[2],xmm4[2],xmm12[3],xmm4[3]
+; AVX1-NEXT: vpaddq %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm11[0],zero,xmm11[1],zero
+; AVX1-NEXT: vpaddq %xmm2, %xmm6, %xmm2
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm12[0],zero,xmm12[1],zero
+; AVX1-NEXT: vpaddq %xmm3, %xmm9, %xmm3
+; AVX1-NEXT: vpcmpeqd %xmm6, %xmm6, %xmm6
+; AVX1-NEXT: vpsubq %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vpsubq %xmm6, %xmm7, %xmm7
+; AVX1-NEXT: vpsubq %xmm6, %xmm8, %xmm8
+; AVX1-NEXT: vpsubq %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpsubq %xmm6, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
+; AVX1-NEXT: vpsrlq $1, %xmm8, %xmm6
+; AVX1-NEXT: vpsrlq $1, %xmm7, %xmm7
+; AVX1-NEXT: vpsrlq $1, %xmm5, %xmm5
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm5[0,2],ymm0[4,6],ymm5[4,6]
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm6, %ymm2
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX2-NEXT: vpaddq %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX2-NEXT: vpaddq %ymm6, %ymm5, %ymm5
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpsubq %ymm2, %ymm4, %ymm3
+; AVX2-NEXT: vpsubq %ymm2, %ymm5, %ymm4
+; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm4[2,3],ymm0[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm4, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm3[2,3],ymm1[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm3 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
+; AVX512-NEXT: vpaddq %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm1
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpsubq %zmm1, %zmm2, %zmm2
+; AVX512-NEXT: vpsubq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm2, %zmm1
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: retq
+ %x0 = zext <16 x i32> %a0 to <16 x i64>
+ %x1 = zext <16 x i32> %a1 to <16 x i64>
+ %sum = add <16 x i64> %x0, %x1
+ %inc = add <16 x i64> %sum, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %shift = lshr <16 x i64> %inc, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <16 x i64> %shift to <16 x i32>
+ ret <16 x i32> %res
+}
+
+define <8 x i64> @test_fixed_v8i64(<8 x i64> %a0, <8 x i64> %a1) {
+; SSE-LABEL: test_fixed_v8i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: por %xmm7, %xmm3
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: por %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: por %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: por %xmm4, %xmm11
+; SSE-NEXT: pxor %xmm0, %xmm4
+; SSE-NEXT: pxor %xmm1, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm6
+; SSE-NEXT: pxor %xmm8, %xmm7
+; SSE-NEXT: psrlq $1, %xmm7
+; SSE-NEXT: psubq %xmm7, %xmm3
+; SSE-NEXT: psrlq $1, %xmm6
+; SSE-NEXT: psubq %xmm6, %xmm9
+; SSE-NEXT: psrlq $1, %xmm5
+; SSE-NEXT: psubq %xmm5, %xmm10
+; SSE-NEXT: psrlq $1, %xmm4
+; SSE-NEXT: psubq %xmm4, %xmm11
+; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v8i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpsubq %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpsubq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v8i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v8i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsubq %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %or = or <8 x i64> %a0, %a1
+ %xor = xor <8 x i64> %a1, %a0
+ %shift = lshr <8 x i64> %xor, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = sub <8 x i64> %or, %shift
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_ext_v8i64(<8 x i64> %a0, <8 x i64> %a1) {
+; SSE2-LABEL: test_ext_v8i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: pushq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: pushq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: pushq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 56
+; SSE2-NEXT: .cfi_offset %rbx, -56
+; SSE2-NEXT: .cfi_offset %r12, -48
+; SSE2-NEXT: .cfi_offset %r13, -40
+; SSE2-NEXT: .cfi_offset %r14, -32
+; SSE2-NEXT: .cfi_offset %r15, -24
+; SSE2-NEXT: .cfi_offset %rbp, -16
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3]
+; SSE2-NEXT: movq %xmm8, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[2,3,2,3]
+; SSE2-NEXT: movq %xmm8, %rdx
+; SSE2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movb $1, %al
+; SSE2-NEXT: addb $-1, %al
+; SSE2-NEXT: movq %rcx, %rax
+; SSE2-NEXT: adcq %rdx, %rax
+; SSE2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE2-NEXT: movb $1, %al
+; SSE2-NEXT: addb $-1, %al
+; SSE2-NEXT: movq %xmm3, %r12
+; SSE2-NEXT: movq %xmm7, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %r12, %rax
+; SSE2-NEXT: adcq %rcx, %rax
+; SSE2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE2-NEXT: movb $1, %al
+; SSE2-NEXT: addb $-1, %al
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm3, %r11
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
+; SSE2-NEXT: movq %xmm3, %rbx
+; SSE2-NEXT: movq %r11, %rax
+; SSE2-NEXT: adcq %rbx, %rax
+; SSE2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE2-NEXT: movb $1, %al
+; SSE2-NEXT: addb $-1, %al
+; SSE2-NEXT: movq %xmm2, %r14
+; SSE2-NEXT: movq %xmm6, %r15
+; SSE2-NEXT: movq %r14, %rax
+; SSE2-NEXT: adcq %r15, %rax
+; SSE2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE2-NEXT: movb $1, %al
+; SSE2-NEXT: addb $-1, %al
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %r13
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %r10
+; SSE2-NEXT: movq %r13, %rax
+; SSE2-NEXT: adcq %r10, %rax
+; SSE2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE2-NEXT: movb $1, %al
+; SSE2-NEXT: addb $-1, %al
+; SSE2-NEXT: movq %xmm1, %r9
+; SSE2-NEXT: movq %xmm5, %r8
+; SSE2-NEXT: movq %r9, %rax
+; SSE2-NEXT: adcq %r8, %rax
+; SSE2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE2-NEXT: movb $1, %al
+; SSE2-NEXT: addb $-1, %al
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[2,3,2,3]
+; SSE2-NEXT: movq %xmm1, %rdi
+; SSE2-NEXT: movq %xmm2, %rsi
+; SSE2-NEXT: movq %rdi, %rdx
+; SSE2-NEXT: adcq %rsi, %rdx
+; SSE2-NEXT: movb $1, %dl
+; SSE2-NEXT: setb %bpl
+; SSE2-NEXT: addb $-1, %dl
+; SSE2-NEXT: movq %xmm0, %rcx
+; SSE2-NEXT: movq %xmm4, %rax
+; SSE2-NEXT: movq %rcx, %rdx
+; SSE2-NEXT: adcq %rax, %rdx
+; SSE2-NEXT: leaq 1(%rcx,%rax), %rdx
+; SSE2-NEXT: leaq 1(%rdi,%rsi), %rax
+; SSE2-NEXT: leaq 1(%r9,%r8), %rcx
+; SSE2-NEXT: leaq 1(%r13,%r10), %rdi
+; SSE2-NEXT: leaq 1(%r14,%r15), %rsi
+; SSE2-NEXT: leaq 1(%r11,%rbx), %r11
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE2-NEXT: leaq 1(%r12,%r8), %r9
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; SSE2-NEXT: leaq 1(%r8,%r10), %r10
+; SSE2-NEXT: setb %r8b
+; SSE2-NEXT: movzbl %r8b, %r8d
+; SSE2-NEXT: shrdq $1, %r8, %rdx
+; SSE2-NEXT: movzbl %bpl, %r8d
+; SSE2-NEXT: shrdq $1, %r8, %rax
+; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 1-byte Folded Reload
+; SSE2-NEXT: shrdq $1, %r8, %rcx
+; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 1-byte Folded Reload
+; SSE2-NEXT: shrdq $1, %r8, %rdi
+; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 1-byte Folded Reload
+; SSE2-NEXT: shrdq $1, %r8, %rsi
+; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 1-byte Folded Reload
+; SSE2-NEXT: shrdq $1, %r8, %r11
+; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 1-byte Folded Reload
+; SSE2-NEXT: shrdq $1, %r8, %r9
+; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 1-byte Folded Reload
+; SSE2-NEXT: shrdq $1, %r8, %r10
+; SSE2-NEXT: movq %rdx, %xmm0
+; SSE2-NEXT: movq %rax, %xmm4
+; SSE2-NEXT: movq %rcx, %xmm1
+; SSE2-NEXT: movq %rdi, %xmm5
+; SSE2-NEXT: movq %rsi, %xmm2
+; SSE2-NEXT: movq %r11, %xmm6
+; SSE2-NEXT: movq %r9, %xmm3
+; SSE2-NEXT: movq %r10, %xmm7
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: popq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: popq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: popq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 8
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pushq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: pushq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: pushq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: pushq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: pushq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: pushq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 56
+; SSE4-NEXT: .cfi_offset %rbx, -56
+; SSE4-NEXT: .cfi_offset %r12, -48
+; SSE4-NEXT: .cfi_offset %r13, -40
+; SSE4-NEXT: .cfi_offset %r14, -32
+; SSE4-NEXT: .cfi_offset %r15, -24
+; SSE4-NEXT: .cfi_offset %rbp, -16
+; SSE4-NEXT: movq %xmm3, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %xmm7, %rdx
+; SSE4-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movb $1, %al
+; SSE4-NEXT: addb $-1, %al
+; SSE4-NEXT: movq %rcx, %rax
+; SSE4-NEXT: adcq %rdx, %rax
+; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE4-NEXT: movb $1, %al
+; SSE4-NEXT: addb $-1, %al
+; SSE4-NEXT: pextrq $1, %xmm3, %r12
+; SSE4-NEXT: pextrq $1, %xmm7, %rbp
+; SSE4-NEXT: movq %r12, %rax
+; SSE4-NEXT: adcq %rbp, %rax
+; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE4-NEXT: movb $1, %al
+; SSE4-NEXT: addb $-1, %al
+; SSE4-NEXT: movq %xmm2, %r11
+; SSE4-NEXT: movq %xmm6, %rbx
+; SSE4-NEXT: movq %r11, %rax
+; SSE4-NEXT: adcq %rbx, %rax
+; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE4-NEXT: movb $1, %al
+; SSE4-NEXT: addb $-1, %al
+; SSE4-NEXT: pextrq $1, %xmm2, %r14
+; SSE4-NEXT: pextrq $1, %xmm6, %r15
+; SSE4-NEXT: movq %r14, %rax
+; SSE4-NEXT: adcq %r15, %rax
+; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE4-NEXT: movb $1, %al
+; SSE4-NEXT: addb $-1, %al
+; SSE4-NEXT: movq %xmm1, %r13
+; SSE4-NEXT: movq %xmm5, %r10
+; SSE4-NEXT: movq %r13, %rax
+; SSE4-NEXT: adcq %r10, %rax
+; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE4-NEXT: movb $1, %al
+; SSE4-NEXT: addb $-1, %al
+; SSE4-NEXT: pextrq $1, %xmm1, %r9
+; SSE4-NEXT: pextrq $1, %xmm5, %r8
+; SSE4-NEXT: movq %r9, %rax
+; SSE4-NEXT: adcq %r8, %rax
+; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE4-NEXT: movb $1, %al
+; SSE4-NEXT: addb $-1, %al
+; SSE4-NEXT: movq %xmm0, %rdi
+; SSE4-NEXT: movq %xmm4, %rsi
+; SSE4-NEXT: movq %rdi, %rdx
+; SSE4-NEXT: adcq %rsi, %rdx
+; SSE4-NEXT: movb $1, %dl
+; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE4-NEXT: addb $-1, %dl
+; SSE4-NEXT: pextrq $1, %xmm0, %rcx
+; SSE4-NEXT: pextrq $1, %xmm4, %rax
+; SSE4-NEXT: movq %rcx, %rdx
+; SSE4-NEXT: adcq %rax, %rdx
+; SSE4-NEXT: leaq 1(%rcx,%rax), %rdx
+; SSE4-NEXT: leaq 1(%rdi,%rsi), %rax
+; SSE4-NEXT: leaq 1(%r9,%r8), %rcx
+; SSE4-NEXT: leaq 1(%r13,%r10), %rdi
+; SSE4-NEXT: leaq 1(%r14,%r15), %rsi
+; SSE4-NEXT: leaq 1(%r11,%rbx), %r11
+; SSE4-NEXT: leaq 1(%r12,%rbp), %r8
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; SSE4-NEXT: leaq 1(%r9,%r10), %r9
+; SSE4-NEXT: setb %r10b
+; SSE4-NEXT: movzbl %r10b, %r10d
+; SSE4-NEXT: shrdq $1, %r10, %rdx
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; SSE4-NEXT: shrdq $1, %r10, %rax
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; SSE4-NEXT: shrdq $1, %r10, %rcx
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; SSE4-NEXT: shrdq $1, %r10, %rdi
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; SSE4-NEXT: shrdq $1, %r10, %rsi
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; SSE4-NEXT: shrdq $1, %r10, %r11
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; SSE4-NEXT: shrdq $1, %r10, %r8
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; SSE4-NEXT: shrdq $1, %r10, %r9
+; SSE4-NEXT: movq %rdx, %xmm4
+; SSE4-NEXT: movq %rax, %xmm0
+; SSE4-NEXT: movq %rcx, %xmm5
+; SSE4-NEXT: movq %rdi, %xmm1
+; SSE4-NEXT: movq %rsi, %xmm6
+; SSE4-NEXT: movq %r11, %xmm2
+; SSE4-NEXT: movq %r8, %xmm7
+; SSE4-NEXT: movq %r9, %xmm3
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
+; SSE4-NEXT: popq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: popq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: popq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: popq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: popq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: popq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 8
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: pushq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: pushq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 56
+; AVX1-NEXT: .cfi_offset %rbx, -56
+; AVX1-NEXT: .cfi_offset %r12, -48
+; AVX1-NEXT: .cfi_offset %r13, -40
+; AVX1-NEXT: .cfi_offset %r14, -32
+; AVX1-NEXT: .cfi_offset %r15, -24
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: vmovq %xmm1, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vmovq %xmm3, %rdx
+; AVX1-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movb $1, %al
+; AVX1-NEXT: addb $-1, %al
+; AVX1-NEXT: movq %rcx, %rax
+; AVX1-NEXT: adcq %rdx, %rax
+; AVX1-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX1-NEXT: movb $1, %al
+; AVX1-NEXT: addb $-1, %al
+; AVX1-NEXT: vpextrq $1, %xmm1, %r12
+; AVX1-NEXT: vpextrq $1, %xmm3, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %r12, %rax
+; AVX1-NEXT: adcq %rcx, %rax
+; AVX1-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX1-NEXT: movb $1, %al
+; AVX1-NEXT: addb $-1, %al
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vmovq %xmm1, %r11
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vmovq %xmm3, %rbx
+; AVX1-NEXT: movq %r11, %rax
+; AVX1-NEXT: adcq %rbx, %rax
+; AVX1-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX1-NEXT: movb $1, %al
+; AVX1-NEXT: addb $-1, %al
+; AVX1-NEXT: vpextrq $1, %xmm1, %r14
+; AVX1-NEXT: vpextrq $1, %xmm3, %r15
+; AVX1-NEXT: movq %r14, %rax
+; AVX1-NEXT: adcq %r15, %rax
+; AVX1-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX1-NEXT: movb $1, %al
+; AVX1-NEXT: addb $-1, %al
+; AVX1-NEXT: vmovq %xmm0, %r13
+; AVX1-NEXT: vmovq %xmm2, %r10
+; AVX1-NEXT: movq %r13, %rax
+; AVX1-NEXT: adcq %r10, %rax
+; AVX1-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX1-NEXT: movb $1, %al
+; AVX1-NEXT: addb $-1, %al
+; AVX1-NEXT: vpextrq $1, %xmm0, %r9
+; AVX1-NEXT: vpextrq $1, %xmm2, %r8
+; AVX1-NEXT: movq %r9, %rax
+; AVX1-NEXT: adcq %r8, %rax
+; AVX1-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX1-NEXT: movb $1, %al
+; AVX1-NEXT: addb $-1, %al
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1
+; AVX1-NEXT: vmovq %xmm0, %rdi
+; AVX1-NEXT: vmovq %xmm1, %rsi
+; AVX1-NEXT: movq %rdi, %rcx
+; AVX1-NEXT: adcq %rsi, %rcx
+; AVX1-NEXT: movb $1, %cl
+; AVX1-NEXT: setb %bpl
+; AVX1-NEXT: addb $-1, %cl
+; AVX1-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX1-NEXT: vpextrq $1, %xmm1, %rax
+; AVX1-NEXT: movq %rdx, %rcx
+; AVX1-NEXT: adcq %rax, %rcx
+; AVX1-NEXT: leaq 1(%rdx,%rax), %rcx
+; AVX1-NEXT: leaq 1(%rdi,%rsi), %rax
+; AVX1-NEXT: leaq 1(%r9,%r8), %rdx
+; AVX1-NEXT: leaq 1(%r13,%r10), %rdi
+; AVX1-NEXT: leaq 1(%r14,%r15), %rsi
+; AVX1-NEXT: leaq 1(%r11,%rbx), %r11
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX1-NEXT: leaq 1(%r12,%r8), %r9
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; AVX1-NEXT: leaq 1(%r8,%r10), %r8
+; AVX1-NEXT: setb %r10b
+; AVX1-NEXT: movzbl %r10b, %r10d
+; AVX1-NEXT: shrdq $1, %r10, %rcx
+; AVX1-NEXT: movzbl %bpl, %r10d
+; AVX1-NEXT: shrdq $1, %r10, %rax
+; AVX1-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX1-NEXT: shrdq $1, %r10, %rdx
+; AVX1-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX1-NEXT: shrdq $1, %r10, %rdi
+; AVX1-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX1-NEXT: shrdq $1, %r10, %rsi
+; AVX1-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX1-NEXT: shrdq $1, %r10, %r11
+; AVX1-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX1-NEXT: shrdq $1, %r10, %r9
+; AVX1-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX1-NEXT: shrdq $1, %r10, %r8
+; AVX1-NEXT: vmovq %rcx, %xmm0
+; AVX1-NEXT: vmovq %rax, %xmm1
+; AVX1-NEXT: vmovq %rdx, %xmm2
+; AVX1-NEXT: vmovq %rdi, %xmm3
+; AVX1-NEXT: vmovq %rsi, %xmm4
+; AVX1-NEXT: vmovq %r11, %xmm5
+; AVX1-NEXT: vmovq %r9, %xmm6
+; AVX1-NEXT: vmovq %r8, %xmm7
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: popq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 8
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 56
+; AVX2-NEXT: .cfi_offset %rbx, -56
+; AVX2-NEXT: .cfi_offset %r12, -48
+; AVX2-NEXT: .cfi_offset %r13, -40
+; AVX2-NEXT: .cfi_offset %r14, -32
+; AVX2-NEXT: .cfi_offset %r15, -24
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: vmovq %xmm1, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vmovq %xmm3, %rdx
+; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movb $1, %al
+; AVX2-NEXT: addb $-1, %al
+; AVX2-NEXT: movq %rcx, %rax
+; AVX2-NEXT: adcq %rdx, %rax
+; AVX2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX2-NEXT: movb $1, %al
+; AVX2-NEXT: addb $-1, %al
+; AVX2-NEXT: vpextrq $1, %xmm1, %r12
+; AVX2-NEXT: vpextrq $1, %xmm3, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %r12, %rax
+; AVX2-NEXT: adcq %rcx, %rax
+; AVX2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX2-NEXT: movb $1, %al
+; AVX2-NEXT: addb $-1, %al
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vmovq %xmm1, %r11
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3
+; AVX2-NEXT: vmovq %xmm3, %rbx
+; AVX2-NEXT: movq %r11, %rax
+; AVX2-NEXT: adcq %rbx, %rax
+; AVX2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX2-NEXT: movb $1, %al
+; AVX2-NEXT: addb $-1, %al
+; AVX2-NEXT: vpextrq $1, %xmm1, %r14
+; AVX2-NEXT: vpextrq $1, %xmm3, %r15
+; AVX2-NEXT: movq %r14, %rax
+; AVX2-NEXT: adcq %r15, %rax
+; AVX2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX2-NEXT: movb $1, %al
+; AVX2-NEXT: addb $-1, %al
+; AVX2-NEXT: vmovq %xmm0, %r13
+; AVX2-NEXT: vmovq %xmm2, %r10
+; AVX2-NEXT: movq %r13, %rax
+; AVX2-NEXT: adcq %r10, %rax
+; AVX2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX2-NEXT: movb $1, %al
+; AVX2-NEXT: addb $-1, %al
+; AVX2-NEXT: vpextrq $1, %xmm0, %r9
+; AVX2-NEXT: vpextrq $1, %xmm2, %r8
+; AVX2-NEXT: movq %r9, %rax
+; AVX2-NEXT: adcq %r8, %rax
+; AVX2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX2-NEXT: movb $1, %al
+; AVX2-NEXT: addb $-1, %al
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm1
+; AVX2-NEXT: vmovq %xmm0, %rdi
+; AVX2-NEXT: vmovq %xmm1, %rsi
+; AVX2-NEXT: movq %rdi, %rcx
+; AVX2-NEXT: adcq %rsi, %rcx
+; AVX2-NEXT: movb $1, %cl
+; AVX2-NEXT: setb %bpl
+; AVX2-NEXT: addb $-1, %cl
+; AVX2-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX2-NEXT: vpextrq $1, %xmm1, %rax
+; AVX2-NEXT: movq %rdx, %rcx
+; AVX2-NEXT: adcq %rax, %rcx
+; AVX2-NEXT: leaq 1(%rdx,%rax), %rcx
+; AVX2-NEXT: leaq 1(%rdi,%rsi), %rax
+; AVX2-NEXT: leaq 1(%r9,%r8), %rdx
+; AVX2-NEXT: leaq 1(%r13,%r10), %rdi
+; AVX2-NEXT: leaq 1(%r14,%r15), %rsi
+; AVX2-NEXT: leaq 1(%r11,%rbx), %r11
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX2-NEXT: leaq 1(%r12,%r8), %r9
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; AVX2-NEXT: leaq 1(%r8,%r10), %r8
+; AVX2-NEXT: setb %r10b
+; AVX2-NEXT: movzbl %r10b, %r10d
+; AVX2-NEXT: shrdq $1, %r10, %rcx
+; AVX2-NEXT: movzbl %bpl, %r10d
+; AVX2-NEXT: shrdq $1, %r10, %rax
+; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX2-NEXT: shrdq $1, %r10, %rdx
+; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX2-NEXT: shrdq $1, %r10, %rdi
+; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX2-NEXT: shrdq $1, %r10, %rsi
+; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX2-NEXT: shrdq $1, %r10, %r11
+; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX2-NEXT: shrdq $1, %r10, %r9
+; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX2-NEXT: shrdq $1, %r10, %r8
+; AVX2-NEXT: vmovq %rcx, %xmm0
+; AVX2-NEXT: vmovq %rax, %xmm1
+; AVX2-NEXT: vmovq %rdx, %xmm2
+; AVX2-NEXT: vmovq %rdi, %xmm3
+; AVX2-NEXT: vmovq %rsi, %xmm4
+; AVX2-NEXT: vmovq %r11, %xmm5
+; AVX2-NEXT: vmovq %r9, %xmm6
+; AVX2-NEXT: vmovq %r8, %xmm7
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 8
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 56
+; AVX512-NEXT: .cfi_offset %rbx, -56
+; AVX512-NEXT: .cfi_offset %r12, -48
+; AVX512-NEXT: .cfi_offset %r13, -40
+; AVX512-NEXT: .cfi_offset %r14, -32
+; AVX512-NEXT: .cfi_offset %r15, -24
+; AVX512-NEXT: .cfi_offset %rbp, -16
+; AVX512-NEXT: vmovq %xmm0, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vmovq %xmm1, %rdx
+; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movb $1, %al
+; AVX512-NEXT: addb $-1, %al
+; AVX512-NEXT: movq %rcx, %rax
+; AVX512-NEXT: adcq %rdx, %rax
+; AVX512-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX512-NEXT: movb $1, %al
+; AVX512-NEXT: vpextrq $1, %xmm0, %r12
+; AVX512-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: addb $-1, %al
+; AVX512-NEXT: movq %r12, %rax
+; AVX512-NEXT: adcq %rcx, %rax
+; AVX512-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX512-NEXT: movb $1, %al
+; AVX512-NEXT: addb $-1, %al
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512-NEXT: vmovq %xmm2, %r11
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX512-NEXT: vmovq %xmm3, %rbx
+; AVX512-NEXT: movq %r11, %rax
+; AVX512-NEXT: adcq %rbx, %rax
+; AVX512-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX512-NEXT: movb $1, %al
+; AVX512-NEXT: addb $-1, %al
+; AVX512-NEXT: vpextrq $1, %xmm2, %r14
+; AVX512-NEXT: vpextrq $1, %xmm3, %r15
+; AVX512-NEXT: movq %r14, %rax
+; AVX512-NEXT: adcq %r15, %rax
+; AVX512-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX512-NEXT: movb $1, %al
+; AVX512-NEXT: addb $-1, %al
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512-NEXT: vmovq %xmm0, %r13
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm1
+; AVX512-NEXT: vmovq %xmm1, %r10
+; AVX512-NEXT: movq %r13, %rax
+; AVX512-NEXT: adcq %r10, %rax
+; AVX512-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX512-NEXT: movb $1, %al
+; AVX512-NEXT: vpextrq $1, %xmm0, %r9
+; AVX512-NEXT: addb $-1, %al
+; AVX512-NEXT: vpextrq $1, %xmm1, %r8
+; AVX512-NEXT: movq %r9, %rax
+; AVX512-NEXT: adcq %r8, %rax
+; AVX512-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX512-NEXT: movb $1, %al
+; AVX512-NEXT: addb $-1, %al
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512-NEXT: vmovq %xmm0, %rdi
+; AVX512-NEXT: vmovq %xmm1, %rsi
+; AVX512-NEXT: movq %rdi, %rcx
+; AVX512-NEXT: adcq %rsi, %rcx
+; AVX512-NEXT: movb $1, %cl
+; AVX512-NEXT: setb %bpl
+; AVX512-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX512-NEXT: vpextrq $1, %xmm1, %rax
+; AVX512-NEXT: addb $-1, %cl
+; AVX512-NEXT: movq %rdx, %rcx
+; AVX512-NEXT: adcq %rax, %rcx
+; AVX512-NEXT: leaq 1(%rdx,%rax), %rcx
+; AVX512-NEXT: leaq 1(%rdi,%rsi), %rax
+; AVX512-NEXT: leaq 1(%r9,%r8), %rdx
+; AVX512-NEXT: leaq 1(%r13,%r10), %rdi
+; AVX512-NEXT: leaq 1(%r14,%r15), %rsi
+; AVX512-NEXT: leaq 1(%r11,%rbx), %r11
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX512-NEXT: leaq 1(%r12,%r8), %r9
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; AVX512-NEXT: leaq 1(%r8,%r10), %r8
+; AVX512-NEXT: setb %r10b
+; AVX512-NEXT: movzbl %r10b, %r10d
+; AVX512-NEXT: shrdq $1, %r10, %rcx
+; AVX512-NEXT: movzbl %bpl, %r10d
+; AVX512-NEXT: shrdq $1, %r10, %rax
+; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX512-NEXT: shrdq $1, %r10, %rdx
+; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX512-NEXT: shrdq $1, %r10, %rdi
+; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX512-NEXT: shrdq $1, %r10, %rsi
+; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX512-NEXT: shrdq $1, %r10, %r11
+; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX512-NEXT: shrdq $1, %r10, %r9
+; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX512-NEXT: shrdq $1, %r10, %r8
+; AVX512-NEXT: vmovq %rcx, %xmm0
+; AVX512-NEXT: vmovq %rax, %xmm1
+; AVX512-NEXT: vmovq %rdx, %xmm2
+; AVX512-NEXT: vmovq %rdi, %xmm3
+; AVX512-NEXT: vmovq %rsi, %xmm4
+; AVX512-NEXT: vmovq %r11, %xmm5
+; AVX512-NEXT: vmovq %r9, %xmm6
+; AVX512-NEXT: vmovq %r8, %xmm7
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 8
+; AVX512-NEXT: retq
+ %x0 = zext <8 x i64> %a0 to <8 x i128>
+ %x1 = zext <8 x i64> %a1 to <8 x i128>
+ %sum = add <8 x i128> %x0, %x1
+ %inc = add <8 x i128> %sum, <i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1>
+ %shift = lshr <8 x i128> %inc, <i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1>
+ %res = trunc <8 x i128> %shift to <8 x i64>
+ ret <8 x i64> %res
+}
+
diff --git a/llvm/test/CodeGen/X86/avgfloors.ll b/llvm/test/CodeGen/X86/avgfloors.ll
new file mode 100644
index 0000000..a3864ab
--- /dev/null
+++ b/llvm/test/CodeGen/X86/avgfloors.ll
@@ -0,0 +1,3437 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE4
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512
+
+;
+; 128-bit vectors
+;
+
+define <16 x i8> @test_fixed_v16i8(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE-LABEL: test_fixed_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: paddb %xmm2, %xmm0
+; SSE-NEXT: psubb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpbroadcastb {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX512-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
+; AVX512-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %and = and <16 x i8> %a0, %a1
+ %xor = xor <16 x i8> %a0, %a1
+ %shift = ashr <16 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = add <16 x i8> %and, %shift
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_ext_v16i8(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE2-LABEL: test_ext_v16i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE2-NEXT: psraw $8, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: psraw $8, %xmm0
+; SSE2-NEXT: paddw %xmm2, %xmm0
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psraw $8, %xmm1
+; SSE2-NEXT: paddw %xmm3, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i8:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm2, %xmm2
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm3
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm4
+; SSE4-NEXT: paddw %xmm2, %xmm4
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm0
+; SSE4-NEXT: paddw %xmm3, %xmm0
+; SSE4-NEXT: psrlw $1, %xmm4
+; SSE4-NEXT: psrlw $1, %xmm0
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
+; SSE4-NEXT: pand %xmm1, %xmm0
+; SSE4-NEXT: pand %xmm1, %xmm4
+; SSE4-NEXT: packuswb %xmm4, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm3, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1
+; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX512-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX512-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovwb %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = sext <16 x i8> %a0 to <16 x i16>
+ %x1 = sext <16 x i8> %a1 to <16 x i16>
+ %sum = add <16 x i16> %x0, %x1
+ %shift = ashr <16 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <16 x i16> %shift to <16 x i8>
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @test_fixed_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: test_fixed_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: paddw %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddw %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %and = and <8 x i16> %a0, %a1
+ %xor = xor <8 x i16> %a1, %a0
+ %shift = ashr <8 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = add <8 x i16> %and, %shift
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_ext_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE2-LABEL: test_ext_v8i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: paddd %xmm2, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: paddd %xmm3, %xmm0
+; SSE2-NEXT: pslld $15, %xmm4
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: pslld $15, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm4, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i16:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm2, %xmm2
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm3
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm4
+; SSE4-NEXT: paddd %xmm2, %xmm4
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm0
+; SSE4-NEXT: paddd %xmm3, %xmm0
+; SSE4-NEXT: psrld $1, %xmm4
+; SSE4-NEXT: psrld $1, %xmm0
+; SSE4-NEXT: pxor %xmm1, %xmm1
+; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2],xmm1[3],xmm4[4],xmm1[5],xmm4[6],xmm1[7]
+; SSE4-NEXT: packusdw %xmm4, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3
+; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX512-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = sext <8 x i16> %a0 to <8 x i32>
+ %x1 = sext <8 x i16> %a1 to <8 x i32>
+ %sum = add <8 x i32> %x0, %x1
+ %shift = ashr <8 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <8 x i32> %shift to <8 x i16>
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @test_fixed_v4i32(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: test_fixed_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrad $1, %xmm0
+; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddd %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %and = and <4 x i32> %a0, %a1
+ %xor = xor <4 x i32> %a1, %a0
+ %shift = ashr <4 x i32> %xor, <i32 1, i32 1, i32 1, i32 1>
+ %res = add <4 x i32> %and, %shift
+ ret <4 x i32> %res
+}
+
+define <4 x i32> @test_ext_v4i32(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE2-LABEL: test_ext_v4i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm3
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-NEXT: paddq %xmm1, %xmm0
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm2
+; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
+; SSE2-NEXT: paddq %xmm4, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm5
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v4i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm3
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm0
+; SSE4-NEXT: paddq %xmm2, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm1
+; SSE4-NEXT: paddq %xmm3, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[0,2]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v4i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX512-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovqd %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = sext <4 x i32> %a0 to <4 x i64>
+ %x1 = sext <4 x i32> %a1 to <4 x i64>
+ %sum = add <4 x i64> %x0, %x1
+ %shift = ashr <4 x i64> %sum, <i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <4 x i64> %shift to <4 x i32>
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @test_fixed_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE2-LABEL: test_fixed_v2i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-NEXT: paddq %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_fixed_v2i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm0, %xmm2
+; SSE4-NEXT: pand %xmm1, %xmm2
+; SSE4-NEXT: pxor %xmm1, %xmm0
+; SSE4-NEXT: movdqa %xmm0, %xmm1
+; SSE4-NEXT: psrad $1, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; SSE4-NEXT: paddq %xmm2, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v2i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; AVX1-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v2i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpsrad $1, %xmm0, %xmm1
+; AVX2-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v2i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vpsraq $1, %xmm0, %xmm0
+; AVX512-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: retq
+ %and = and <2 x i64> %a0, %a1
+ %xor = xor <2 x i64> %a1, %a0
+ %shift = ashr <2 x i64> %xor, <i64 1, i64 1>
+ %res = add <2 x i64> %and, %shift
+ ret <2 x i64> %res
+}
+
+define <2 x i64> @test_ext_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE2-LABEL: test_ext_v2i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %xmm0, %rdx
+; SSE2-NEXT: movq %rdx, %rsi
+; SSE2-NEXT: sarq $63, %rsi
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rdi
+; SSE2-NEXT: movq %rdi, %r8
+; SSE2-NEXT: sarq $63, %r8
+; SSE2-NEXT: movq %xmm1, %r9
+; SSE2-NEXT: movq %r9, %r10
+; SSE2-NEXT: sarq $63, %r10
+; SSE2-NEXT: addq %r9, %rdx
+; SSE2-NEXT: adcq %rsi, %r10
+; SSE2-NEXT: addq %rdi, %rax
+; SSE2-NEXT: adcq %rcx, %r8
+; SSE2-NEXT: shldq $63, %rax, %r8
+; SSE2-NEXT: shldq $63, %rdx, %r10
+; SSE2-NEXT: movq %r10, %xmm0
+; SSE2-NEXT: movq %r8, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v2i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movq %xmm0, %rax
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: pextrq $1, %xmm0, %rdx
+; SSE4-NEXT: movq %rdx, %rsi
+; SSE4-NEXT: sarq $63, %rsi
+; SSE4-NEXT: movq %xmm1, %rdi
+; SSE4-NEXT: movq %rdi, %r8
+; SSE4-NEXT: sarq $63, %r8
+; SSE4-NEXT: pextrq $1, %xmm1, %r9
+; SSE4-NEXT: movq %r9, %r10
+; SSE4-NEXT: sarq $63, %r10
+; SSE4-NEXT: addq %r9, %rdx
+; SSE4-NEXT: adcq %rsi, %r10
+; SSE4-NEXT: addq %rdi, %rax
+; SSE4-NEXT: adcq %rcx, %r8
+; SSE4-NEXT: shldq $63, %rax, %r8
+; SSE4-NEXT: shldq $63, %rdx, %r10
+; SSE4-NEXT: movq %r10, %xmm1
+; SSE4-NEXT: movq %r8, %xmm0
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE4-NEXT: retq
+;
+; AVX-LABEL: test_ext_v2i64:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: movq %rax, %rcx
+; AVX-NEXT: sarq $63, %rcx
+; AVX-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX-NEXT: movq %rdx, %rsi
+; AVX-NEXT: sarq $63, %rsi
+; AVX-NEXT: vmovq %xmm1, %rdi
+; AVX-NEXT: movq %rdi, %r8
+; AVX-NEXT: sarq $63, %r8
+; AVX-NEXT: vpextrq $1, %xmm1, %r9
+; AVX-NEXT: movq %r9, %r10
+; AVX-NEXT: sarq $63, %r10
+; AVX-NEXT: addq %r9, %rdx
+; AVX-NEXT: adcq %rsi, %r10
+; AVX-NEXT: addq %rdi, %rax
+; AVX-NEXT: adcq %rcx, %r8
+; AVX-NEXT: shldq $63, %rax, %r8
+; AVX-NEXT: shldq $63, %rdx, %r10
+; AVX-NEXT: vmovq %r10, %xmm0
+; AVX-NEXT: vmovq %r8, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: retq
+ %x0 = sext <2 x i64> %a0 to <2 x i128>
+ %x1 = sext <2 x i64> %a1 to <2 x i128>
+ %sum = add <2 x i128> %x0, %x1
+ %shift = ashr <2 x i128> %sum, <i128 1, i128 1>
+ %res = trunc <2 x i128> %shift to <2 x i64>
+ ret <2 x i64> %res
+}
+
+;
+; 256-bit vectors
+;
+
+define <32 x i8> @test_fixed_v32i8(<32 x i8> %a0, <32 x i8> %a1) {
+; SSE-LABEL: test_fixed_v32i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: paddb %xmm4, %xmm1
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm0
+; SSE-NEXT: paddb %xmm5, %xmm0
+; SSE-NEXT: psubb %xmm3, %xmm0
+; SSE-NEXT: psubb %xmm3, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubb %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpaddb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpsubb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v32i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} ymm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX512-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm0
+; AVX512-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %and = and <32 x i8> %a0, %a1
+ %xor = xor <32 x i8> %a0, %a1
+ %shift = ashr <32 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = add <32 x i8> %and, %shift
+ ret <32 x i8> %res
+}
+
+define <32 x i8> @test_ext_v32i8(<32 x i8> %a0, <32 x i8> %a1) {
+; SSE2-LABEL: test_ext_v32i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; SSE2-NEXT: psraw $8, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15]
+; SSE2-NEXT: psraw $8, %xmm5
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; SSE2-NEXT: psraw $8, %xmm6
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15]
+; SSE2-NEXT: psraw $8, %xmm7
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-NEXT: psraw $8, %xmm1
+; SSE2-NEXT: paddw %xmm4, %xmm1
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psraw $8, %xmm3
+; SSE2-NEXT: paddw %xmm5, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: psraw $8, %xmm0
+; SSE2-NEXT: paddw %xmm6, %xmm0
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: paddw %xmm7, %xmm2
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm3
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: packuswb %xmm3, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v32i8:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm4, %xmm5
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm6
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm7
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm8
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm4
+; SSE4-NEXT: paddw %xmm5, %xmm4
+; SSE4-NEXT: pmovsxbw %xmm3, %xmm1
+; SSE4-NEXT: paddw %xmm6, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm3
+; SSE4-NEXT: paddw %xmm7, %xmm3
+; SSE4-NEXT: pmovsxbw %xmm2, %xmm0
+; SSE4-NEXT: paddw %xmm8, %xmm0
+; SSE4-NEXT: psrlw $1, %xmm4
+; SSE4-NEXT: psrlw $1, %xmm1
+; SSE4-NEXT: psrlw $1, %xmm3
+; SSE4-NEXT: psrlw $1, %xmm0
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE4-NEXT: pand %xmm2, %xmm0
+; SSE4-NEXT: pand %xmm2, %xmm3
+; SSE4-NEXT: packuswb %xmm3, %xmm0
+; SSE4-NEXT: pand %xmm2, %xmm1
+; SSE4-NEXT: pand %xmm2, %xmm4
+; SSE4-NEXT: packuswb %xmm4, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm5, %xmm5
+; AVX1-NEXT: vpaddw %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm5
+; AVX1-NEXT: vpaddw %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm5, %xmm5
+; AVX1-NEXT: vpaddw %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1
+; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxbw %xmm1, %ymm3
+; AVX2-NEXT: vpaddw %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm2, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpackuswb %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v32i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxbw %ymm0, %zmm0
+; AVX512-NEXT: vpmovsxbw %ymm1, %zmm1
+; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = sext <32 x i8> %a0 to <32 x i16>
+ %x1 = sext <32 x i8> %a1 to <32 x i16>
+ %sum = add <32 x i16> %x0, %x1
+ %shift = ashr <32 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <32 x i16> %shift to <32 x i8>
+ ret <32 x i8> %res
+}
+
+define <16 x i16> @test_fixed_v16i16(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE-LABEL: test_fixed_v16i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psraw $1, %xmm1
+; SSE-NEXT: paddw %xmm4, %xmm1
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: paddw %xmm5, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpaddw %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddw %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpaddw %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %and = and <16 x i16> %a0, %a1
+ %xor = xor <16 x i16> %a1, %a0
+ %shift = ashr <16 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = add <16 x i16> %and, %shift
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_ext_v16i16(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE2-LABEL: test_ext_v16i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; SSE2-NEXT: psrad $16, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
+; SSE2-NEXT: psrad $16, %xmm6
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
+; SSE2-NEXT: psrad $16, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm2[4],xmm8[5],xmm2[5],xmm8[6],xmm2[6],xmm8[7],xmm2[7]
+; SSE2-NEXT: psrad $16, %xmm8
+; SSE2-NEXT: paddd %xmm4, %xmm8
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: paddd %xmm5, %xmm0
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: paddd %xmm6, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: paddd %xmm7, %xmm1
+; SSE2-NEXT: pslld $15, %xmm8
+; SSE2-NEXT: psrad $16, %xmm8
+; SSE2-NEXT: pslld $15, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm8, %xmm0
+; SSE2-NEXT: pslld $15, %xmm2
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: pslld $15, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: packssdw %xmm2, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i16:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm4, %xmm5
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm6
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm7
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm8
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm4
+; SSE4-NEXT: paddd %xmm5, %xmm4
+; SSE4-NEXT: pmovsxwd %xmm3, %xmm1
+; SSE4-NEXT: paddd %xmm6, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm3
+; SSE4-NEXT: paddd %xmm7, %xmm3
+; SSE4-NEXT: pmovsxwd %xmm2, %xmm0
+; SSE4-NEXT: paddd %xmm8, %xmm0
+; SSE4-NEXT: psrld $1, %xmm4
+; SSE4-NEXT: psrld $1, %xmm1
+; SSE4-NEXT: psrld $1, %xmm3
+; SSE4-NEXT: psrld $1, %xmm0
+; SSE4-NEXT: pxor %xmm2, %xmm2
+; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1],xmm3[2],xmm2[3],xmm3[4],xmm2[5],xmm3[6],xmm2[7]
+; SSE4-NEXT: packusdw %xmm3, %xmm0
+; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0],xmm2[1],xmm4[2],xmm2[3],xmm4[4],xmm2[5],xmm4[6],xmm2[7]
+; SSE4-NEXT: packusdw %xmm4, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm5, %xmm5
+; AVX1-NEXT: vpaddd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm5
+; AVX1-NEXT: vpaddd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm5, %xmm5
+; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm4, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4],xmm4[5],xmm3[6],xmm4[7]
+; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4],xmm4[5],xmm2[6],xmm4[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
+; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxwd %xmm0, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxwd %xmm1, %ymm3
+; AVX2-NEXT: vpaddd %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm2, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7],ymm1[8],ymm2[9],ymm1[10],ymm2[11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
+; AVX2-NEXT: vpackusdw %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = sext <16 x i16> %a0 to <16 x i32>
+ %x1 = sext <16 x i16> %a1 to <16 x i32>
+ %sum = add <16 x i32> %x0, %x1
+ %shift = ashr <16 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <16 x i32> %shift to <16 x i16>
+ ret <16 x i16> %res
+}
+
+define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE-LABEL: test_fixed_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrad $1, %xmm1
+; SSE-NEXT: paddd %xmm4, %xmm1
+; SSE-NEXT: psrad $1, %xmm0
+; SSE-NEXT: paddd %xmm5, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpaddd %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsrad $1, %ymm0, %ymm0
+; AVX512-NEXT: vpaddd %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %and = and <8 x i32> %a0, %a1
+ %xor = xor <8 x i32> %a1, %a0
+ %shift = ashr <8 x i32> %xor, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = add <8 x i32> %and, %shift
+ ret <8 x i32> %res
+}
+
+define <8 x i32> @test_ext_v8i32(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE2-LABEL: test_ext_v8i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm5
+; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm7, %xmm5
+; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; SSE2-NEXT: paddq %xmm3, %xmm1
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm8, %xmm3
+; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm3[0],xmm8[1],xmm3[1]
+; SSE2-NEXT: paddq %xmm6, %xmm8
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE2-NEXT: paddq %xmm2, %xmm0
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
+; SSE2-NEXT: paddq %xmm7, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: psrlq $1, %xmm8
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm8[0,2]
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm5
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm4
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm5
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm6
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm7
+; SSE4-NEXT: pmovsxdq %xmm3, %xmm1
+; SSE4-NEXT: paddq %xmm4, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm3
+; SSE4-NEXT: paddq %xmm5, %xmm3
+; SSE4-NEXT: pmovsxdq %xmm2, %xmm0
+; SSE4-NEXT: paddq %xmm6, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm2, %xmm2
+; SSE4-NEXT: paddq %xmm7, %xmm2
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm3
+; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm5[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm6, %xmm6
+; AVX1-NEXT: vpaddq %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm6, %xmm6
+; AVX1-NEXT: vpaddq %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxdq %xmm5, %xmm5
+; AVX1-NEXT: vpaddq %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2
+; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpmovsxdq %xmm3, %ymm3
+; AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm2[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
+; AVX512-NEXT: vpmovsxdq %ymm1, %zmm1
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = sext <8 x i32> %a0 to <8 x i64>
+ %x1 = sext <8 x i32> %a1 to <8 x i64>
+ %sum = add <8 x i64> %x0, %x1
+ %shift = ashr <8 x i64> %sum, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <8 x i64> %shift to <8 x i32>
+ ret <8 x i32> %res
+}
+
+define <4 x i64> @test_fixed_v4i64(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE2-LABEL: test_fixed_v4i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: pand %xmm3, %xmm4
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: pand %xmm2, %xmm5
+; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm1, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: paddq %xmm4, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-NEXT: paddq %xmm5, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_fixed_v4i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm1, %xmm4
+; SSE4-NEXT: pand %xmm3, %xmm4
+; SSE4-NEXT: movdqa %xmm0, %xmm5
+; SSE4-NEXT: pand %xmm2, %xmm5
+; SSE4-NEXT: pxor %xmm2, %xmm0
+; SSE4-NEXT: pxor %xmm3, %xmm1
+; SSE4-NEXT: movdqa %xmm1, %xmm2
+; SSE4-NEXT: psrad $1, %xmm2
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; SSE4-NEXT: paddq %xmm4, %xmm1
+; SSE4-NEXT: movdqa %xmm0, %xmm2
+; SSE4-NEXT: psrad $1, %xmm2
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; SSE4-NEXT: paddq %xmm5, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v4i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3],xmm3[4,5],xmm1[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpaddq %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v4i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v4i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsraq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpaddq %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %and = and <4 x i64> %a0, %a1
+ %xor = xor <4 x i64> %a1, %a0
+ %shift = ashr <4 x i64> %xor, <i64 1, i64 1, i64 1, i64 1>
+ %res = add <4 x i64> %and, %shift
+ ret <4 x i64> %res
+}
+
+define <4 x i64> @test_ext_v4i64(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE2-LABEL: test_ext_v4i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: pushq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: pushq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: pushq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 56
+; SSE2-NEXT: .cfi_offset %rbx, -56
+; SSE2-NEXT: .cfi_offset %r12, -48
+; SSE2-NEXT: .cfi_offset %r13, -40
+; SSE2-NEXT: .cfi_offset %r14, -32
+; SSE2-NEXT: .cfi_offset %r15, -24
+; SSE2-NEXT: .cfi_offset %rbp, -16
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm4, %rdx
+; SSE2-NEXT: movq %rdx, %r14
+; SSE2-NEXT: sarq $63, %r14
+; SSE2-NEXT: movq %xmm1, %rcx
+; SSE2-NEXT: movq %rcx, %r10
+; SSE2-NEXT: sarq $63, %r10
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm1, %rsi
+; SSE2-NEXT: movq %rsi, %r11
+; SSE2-NEXT: sarq $63, %r11
+; SSE2-NEXT: movq %xmm0, %r8
+; SSE2-NEXT: movq %r8, %rbx
+; SSE2-NEXT: sarq $63, %rbx
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rdi
+; SSE2-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %rdi
+; SSE2-NEXT: movq %xmm3, %r15
+; SSE2-NEXT: movq %r15, %r9
+; SSE2-NEXT: sarq $63, %r9
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r12
+; SSE2-NEXT: movq %r12, %r13
+; SSE2-NEXT: sarq $63, %r13
+; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: movq %rax, %rbp
+; SSE2-NEXT: sarq $63, %rbp
+; SSE2-NEXT: addq %rax, %r8
+; SSE2-NEXT: adcq %rbx, %rbp
+; SSE2-NEXT: addq %r12, %rsi
+; SSE2-NEXT: adcq %r11, %r13
+; SSE2-NEXT: addq %r15, %rcx
+; SSE2-NEXT: adcq %r10, %r9
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; SSE2-NEXT: adcq %r14, %rdi
+; SSE2-NEXT: shldq $63, %rdx, %rdi
+; SSE2-NEXT: shldq $63, %rcx, %r9
+; SSE2-NEXT: shldq $63, %rsi, %r13
+; SSE2-NEXT: shldq $63, %r8, %rbp
+; SSE2-NEXT: movq %rbp, %xmm0
+; SSE2-NEXT: movq %r13, %xmm2
+; SSE2-NEXT: movq %r9, %xmm1
+; SSE2-NEXT: movq %rdi, %xmm3
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: popq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: popq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: popq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 8
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v4i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pushq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: pushq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: pushq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: pushq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: pushq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: pushq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 56
+; SSE4-NEXT: .cfi_offset %rbx, -56
+; SSE4-NEXT: .cfi_offset %r12, -48
+; SSE4-NEXT: .cfi_offset %r13, -40
+; SSE4-NEXT: .cfi_offset %r14, -32
+; SSE4-NEXT: .cfi_offset %r15, -24
+; SSE4-NEXT: .cfi_offset %rbp, -16
+; SSE4-NEXT: movq %xmm1, %rdi
+; SSE4-NEXT: movq %rdi, %r14
+; SSE4-NEXT: sarq $63, %r14
+; SSE4-NEXT: pextrq $1, %xmm1, %rcx
+; SSE4-NEXT: movq %rcx, %r10
+; SSE4-NEXT: sarq $63, %r10
+; SSE4-NEXT: movq %xmm0, %rsi
+; SSE4-NEXT: movq %rsi, %r11
+; SSE4-NEXT: sarq $63, %r11
+; SSE4-NEXT: pextrq $1, %xmm0, %r8
+; SSE4-NEXT: movq %r8, %rbx
+; SSE4-NEXT: sarq $63, %rbx
+; SSE4-NEXT: movq %xmm3, %rdx
+; SSE4-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %rdx
+; SSE4-NEXT: pextrq $1, %xmm3, %r15
+; SSE4-NEXT: movq %r15, %r9
+; SSE4-NEXT: sarq $63, %r9
+; SSE4-NEXT: movq %xmm2, %r12
+; SSE4-NEXT: movq %r12, %r13
+; SSE4-NEXT: sarq $63, %r13
+; SSE4-NEXT: pextrq $1, %xmm2, %rax
+; SSE4-NEXT: movq %rax, %rbp
+; SSE4-NEXT: sarq $63, %rbp
+; SSE4-NEXT: addq %rax, %r8
+; SSE4-NEXT: adcq %rbx, %rbp
+; SSE4-NEXT: addq %r12, %rsi
+; SSE4-NEXT: adcq %r11, %r13
+; SSE4-NEXT: addq %r15, %rcx
+; SSE4-NEXT: adcq %r10, %r9
+; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
+; SSE4-NEXT: adcq %r14, %rdx
+; SSE4-NEXT: shldq $63, %rdi, %rdx
+; SSE4-NEXT: shldq $63, %rcx, %r9
+; SSE4-NEXT: shldq $63, %rsi, %r13
+; SSE4-NEXT: shldq $63, %r8, %rbp
+; SSE4-NEXT: movq %rbp, %xmm2
+; SSE4-NEXT: movq %r13, %xmm0
+; SSE4-NEXT: movq %r9, %xmm3
+; SSE4-NEXT: movq %rdx, %xmm1
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; SSE4-NEXT: popq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: popq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: popq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: popq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: popq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: popq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 8
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v4i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: pushq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: pushq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 56
+; AVX1-NEXT: .cfi_offset %rbx, -56
+; AVX1-NEXT: .cfi_offset %r12, -48
+; AVX1-NEXT: .cfi_offset %r13, -40
+; AVX1-NEXT: .cfi_offset %r14, -32
+; AVX1-NEXT: .cfi_offset %r15, -24
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: vmovq %xmm0, %rdx
+; AVX1-NEXT: movq %rdx, %r14
+; AVX1-NEXT: sarq $63, %r14
+; AVX1-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX1-NEXT: movq %rcx, %r10
+; AVX1-NEXT: sarq $63, %r10
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %rsi
+; AVX1-NEXT: movq %rsi, %r11
+; AVX1-NEXT: sarq $63, %r11
+; AVX1-NEXT: vpextrq $1, %xmm0, %rdi
+; AVX1-NEXT: movq %rdi, %rbx
+; AVX1-NEXT: sarq $63, %rbx
+; AVX1-NEXT: vmovq %xmm1, %r8
+; AVX1-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r8
+; AVX1-NEXT: vpextrq $1, %xmm1, %r15
+; AVX1-NEXT: movq %r15, %r9
+; AVX1-NEXT: sarq $63, %r9
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %r12
+; AVX1-NEXT: movq %r12, %r13
+; AVX1-NEXT: sarq $63, %r13
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: movq %rax, %rbp
+; AVX1-NEXT: sarq $63, %rbp
+; AVX1-NEXT: addq %rax, %rdi
+; AVX1-NEXT: adcq %rbx, %rbp
+; AVX1-NEXT: addq %r12, %rsi
+; AVX1-NEXT: adcq %r11, %r13
+; AVX1-NEXT: addq %r15, %rcx
+; AVX1-NEXT: adcq %r10, %r9
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; AVX1-NEXT: adcq %r14, %r8
+; AVX1-NEXT: shldq $63, %rdx, %r8
+; AVX1-NEXT: shldq $63, %rcx, %r9
+; AVX1-NEXT: shldq $63, %rsi, %r13
+; AVX1-NEXT: shldq $63, %rdi, %rbp
+; AVX1-NEXT: vmovq %rbp, %xmm0
+; AVX1-NEXT: vmovq %r13, %xmm1
+; AVX1-NEXT: vmovq %r9, %xmm2
+; AVX1-NEXT: vmovq %r8, %xmm3
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: popq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 8
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v4i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 56
+; AVX2-NEXT: .cfi_offset %rbx, -56
+; AVX2-NEXT: .cfi_offset %r12, -48
+; AVX2-NEXT: .cfi_offset %r13, -40
+; AVX2-NEXT: .cfi_offset %r14, -32
+; AVX2-NEXT: .cfi_offset %r15, -24
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: vmovq %xmm0, %rdx
+; AVX2-NEXT: movq %rdx, %r14
+; AVX2-NEXT: sarq $63, %r14
+; AVX2-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX2-NEXT: movq %rcx, %r10
+; AVX2-NEXT: sarq $63, %r10
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %rsi
+; AVX2-NEXT: movq %rsi, %r11
+; AVX2-NEXT: sarq $63, %r11
+; AVX2-NEXT: vpextrq $1, %xmm0, %rdi
+; AVX2-NEXT: movq %rdi, %rbx
+; AVX2-NEXT: sarq $63, %rbx
+; AVX2-NEXT: vmovq %xmm1, %r8
+; AVX2-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r8
+; AVX2-NEXT: vpextrq $1, %xmm1, %r15
+; AVX2-NEXT: movq %r15, %r9
+; AVX2-NEXT: sarq $63, %r9
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %r12
+; AVX2-NEXT: movq %r12, %r13
+; AVX2-NEXT: sarq $63, %r13
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: movq %rax, %rbp
+; AVX2-NEXT: sarq $63, %rbp
+; AVX2-NEXT: addq %rax, %rdi
+; AVX2-NEXT: adcq %rbx, %rbp
+; AVX2-NEXT: addq %r12, %rsi
+; AVX2-NEXT: adcq %r11, %r13
+; AVX2-NEXT: addq %r15, %rcx
+; AVX2-NEXT: adcq %r10, %r9
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; AVX2-NEXT: adcq %r14, %r8
+; AVX2-NEXT: shldq $63, %rdx, %r8
+; AVX2-NEXT: shldq $63, %rcx, %r9
+; AVX2-NEXT: shldq $63, %rsi, %r13
+; AVX2-NEXT: shldq $63, %rdi, %rbp
+; AVX2-NEXT: vmovq %rbp, %xmm0
+; AVX2-NEXT: vmovq %r13, %xmm1
+; AVX2-NEXT: vmovq %r9, %xmm2
+; AVX2-NEXT: vmovq %r8, %xmm3
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 8
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v4i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 56
+; AVX512-NEXT: .cfi_offset %rbx, -56
+; AVX512-NEXT: .cfi_offset %r12, -48
+; AVX512-NEXT: .cfi_offset %r13, -40
+; AVX512-NEXT: .cfi_offset %r14, -32
+; AVX512-NEXT: .cfi_offset %r15, -24
+; AVX512-NEXT: .cfi_offset %rbp, -16
+; AVX512-NEXT: vmovq %xmm0, %rdx
+; AVX512-NEXT: movq %rdx, %r14
+; AVX512-NEXT: sarq $63, %r14
+; AVX512-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX512-NEXT: movq %rcx, %r10
+; AVX512-NEXT: sarq $63, %r10
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-NEXT: vmovq %xmm0, %rsi
+; AVX512-NEXT: movq %rsi, %r11
+; AVX512-NEXT: sarq $63, %r11
+; AVX512-NEXT: vpextrq $1, %xmm0, %rdi
+; AVX512-NEXT: movq %rdi, %rbx
+; AVX512-NEXT: sarq $63, %rbx
+; AVX512-NEXT: vmovq %xmm1, %r8
+; AVX512-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r8
+; AVX512-NEXT: vpextrq $1, %xmm1, %r15
+; AVX512-NEXT: movq %r15, %r9
+; AVX512-NEXT: sarq $63, %r9
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm0
+; AVX512-NEXT: vmovq %xmm0, %r12
+; AVX512-NEXT: movq %r12, %r13
+; AVX512-NEXT: sarq $63, %r13
+; AVX512-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512-NEXT: movq %rax, %rbp
+; AVX512-NEXT: sarq $63, %rbp
+; AVX512-NEXT: addq %rax, %rdi
+; AVX512-NEXT: adcq %rbx, %rbp
+; AVX512-NEXT: addq %r12, %rsi
+; AVX512-NEXT: adcq %r11, %r13
+; AVX512-NEXT: addq %r15, %rcx
+; AVX512-NEXT: adcq %r10, %r9
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; AVX512-NEXT: adcq %r14, %r8
+; AVX512-NEXT: shldq $63, %rdx, %r8
+; AVX512-NEXT: shldq $63, %rcx, %r9
+; AVX512-NEXT: shldq $63, %rsi, %r13
+; AVX512-NEXT: shldq $63, %rdi, %rbp
+; AVX512-NEXT: vmovq %rbp, %xmm0
+; AVX512-NEXT: vmovq %r13, %xmm1
+; AVX512-NEXT: vmovq %r9, %xmm2
+; AVX512-NEXT: vmovq %r8, %xmm3
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 8
+; AVX512-NEXT: retq
+ %x0 = sext <4 x i64> %a0 to <4 x i128>
+ %x1 = sext <4 x i64> %a1 to <4 x i128>
+ %sum = add <4 x i128> %x0, %x1
+ %shift = ashr <4 x i128> %sum, <i128 1, i128 1, i128 1, i128 1>
+ %res = trunc <4 x i128> %shift to <4 x i64>
+ ret <4 x i64> %res
+}
+
+;
+; 512-bit vectors
+;
+
+define <64 x i8> @test_fixed_v64i8(<64 x i8> %a0, <64 x i8> %a1) {
+; SSE-LABEL: test_fixed_v64i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm10
+; SSE-NEXT: pand %xmm7, %xmm10
+; SSE-NEXT: movdqa %xmm2, %xmm11
+; SSE-NEXT: pand %xmm6, %xmm11
+; SSE-NEXT: movdqa %xmm1, %xmm9
+; SSE-NEXT: pand %xmm5, %xmm9
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: pand %xmm4, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrlw $1, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm5, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; SSE-NEXT: pxor %xmm4, %xmm3
+; SSE-NEXT: paddb %xmm10, %xmm3
+; SSE-NEXT: psrlw $1, %xmm2
+; SSE-NEXT: pand %xmm5, %xmm2
+; SSE-NEXT: pxor %xmm4, %xmm2
+; SSE-NEXT: paddb %xmm11, %xmm2
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: pand %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm4, %xmm1
+; SSE-NEXT: paddb %xmm9, %xmm1
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pand %xmm5, %xmm0
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: paddb %xmm8, %xmm0
+; SSE-NEXT: psubb %xmm4, %xmm0
+; SSE-NEXT: psubb %xmm4, %xmm1
+; SSE-NEXT: psubb %xmm4, %xmm2
+; SSE-NEXT: psubb %xmm4, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v64i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX1-NEXT: vpxor %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm7
+; AVX1-NEXT: vpand %xmm3, %xmm7, %xmm7
+; AVX1-NEXT: vpxor %xmm6, %xmm7, %xmm7
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm6, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm3
+; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubb %xmm6, %xmm0, %xmm0
+; AVX1-NEXT: vpaddb %xmm5, %xmm7, %xmm3
+; AVX1-NEXT: vpsubb %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsubb %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vpaddb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsubb %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v64i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm3 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpaddb %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm5, %ymm0, %ymm0
+; AVX2-NEXT: vpsubb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsubb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v64i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} zmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX512-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm0
+; AVX512-NEXT: vpaddb %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpsubb %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %and = and <64 x i8> %a0, %a1
+ %xor = xor <64 x i8> %a0, %a1
+ %shift = ashr <64 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = add <64 x i8> %and, %shift
+ ret <64 x i8> %res
+}
+
+define <64 x i8> @test_ext_v64i8(<64 x i8> %a0, <64 x i8> %a1) {
+; SSE2-LABEL: test_ext_v64i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm3[0],xmm13[1],xmm3[1],xmm13[2],xmm3[2],xmm13[3],xmm3[3],xmm13[4],xmm3[4],xmm13[5],xmm3[5],xmm13[6],xmm3[6],xmm13[7],xmm3[7]
+; SSE2-NEXT: psraw $8, %xmm13
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm3[8],xmm14[9],xmm3[9],xmm14[10],xmm3[10],xmm14[11],xmm3[11],xmm14[12],xmm3[12],xmm14[13],xmm3[13],xmm14[14],xmm3[14],xmm14[15],xmm3[15]
+; SSE2-NEXT: psraw $8, %xmm14
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm2[0],xmm15[1],xmm2[1],xmm15[2],xmm2[2],xmm15[3],xmm2[3],xmm15[4],xmm2[4],xmm15[5],xmm2[5],xmm15[6],xmm2[6],xmm15[7],xmm2[7]
+; SSE2-NEXT: psraw $8, %xmm15
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm2[8],xmm12[9],xmm2[9],xmm12[10],xmm2[10],xmm12[11],xmm2[11],xmm12[12],xmm2[12],xmm12[13],xmm2[13],xmm12[14],xmm2[14],xmm12[15],xmm2[15]
+; SSE2-NEXT: psraw $8, %xmm12
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm1[0],xmm11[1],xmm1[1],xmm11[2],xmm1[2],xmm11[3],xmm1[3],xmm11[4],xmm1[4],xmm11[5],xmm1[5],xmm11[6],xmm1[6],xmm11[7],xmm1[7]
+; SSE2-NEXT: psraw $8, %xmm11
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm1[8],xmm10[9],xmm1[9],xmm10[10],xmm1[10],xmm10[11],xmm1[11],xmm10[12],xmm1[12],xmm10[13],xmm1[13],xmm10[14],xmm1[14],xmm10[15],xmm1[15]
+; SSE2-NEXT: psraw $8, %xmm10
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3],xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
+; SSE2-NEXT: psraw $8, %xmm9
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm0[8],xmm8[9],xmm0[9],xmm8[10],xmm0[10],xmm8[11],xmm0[11],xmm8[12],xmm0[12],xmm8[13],xmm0[13],xmm8[14],xmm0[14],xmm8[15],xmm0[15]
+; SSE2-NEXT: psraw $8, %xmm8
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3],xmm3[4],xmm7[4],xmm3[5],xmm7[5],xmm3[6],xmm7[6],xmm3[7],xmm7[7]
+; SSE2-NEXT: psraw $8, %xmm3
+; SSE2-NEXT: paddw %xmm13, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psraw $8, %xmm7
+; SSE2-NEXT: paddw %xmm14, %xmm7
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: paddw %xmm15, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psraw $8, %xmm6
+; SSE2-NEXT: paddw %xmm12, %xmm6
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
+; SSE2-NEXT: psraw $8, %xmm1
+; SSE2-NEXT: paddw %xmm11, %xmm1
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psraw $8, %xmm5
+; SSE2-NEXT: paddw %xmm10, %xmm5
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: psraw $8, %xmm0
+; SSE2-NEXT: paddw %xmm9, %xmm0
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psraw $8, %xmm4
+; SSE2-NEXT: paddw %xmm8, %xmm4
+; SSE2-NEXT: psrlw $1, %xmm3
+; SSE2-NEXT: psrlw $1, %xmm7
+; SSE2-NEXT: psrlw $1, %xmm2
+; SSE2-NEXT: psrlw $1, %xmm6
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm5
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE2-NEXT: pand %xmm8, %xmm4
+; SSE2-NEXT: pand %xmm8, %xmm0
+; SSE2-NEXT: packuswb %xmm4, %xmm0
+; SSE2-NEXT: pand %xmm8, %xmm5
+; SSE2-NEXT: pand %xmm8, %xmm1
+; SSE2-NEXT: packuswb %xmm5, %xmm1
+; SSE2-NEXT: pand %xmm8, %xmm6
+; SSE2-NEXT: pand %xmm8, %xmm2
+; SSE2-NEXT: packuswb %xmm6, %xmm2
+; SSE2-NEXT: pand %xmm8, %xmm7
+; SSE2-NEXT: pand %xmm8, %xmm3
+; SSE2-NEXT: packuswb %xmm7, %xmm3
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v64i8:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm8, %xmm9
+; SSE4-NEXT: pmovsxbw %xmm3, %xmm10
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm3, %xmm11
+; SSE4-NEXT: pmovsxbw %xmm2, %xmm12
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm2, %xmm13
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm14
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm15
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm0
+; SSE4-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm8
+; SSE4-NEXT: paddw %xmm9, %xmm8
+; SSE4-NEXT: pmovsxbw %xmm7, %xmm3
+; SSE4-NEXT: paddw %xmm10, %xmm3
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm7
+; SSE4-NEXT: paddw %xmm11, %xmm7
+; SSE4-NEXT: pmovsxbw %xmm6, %xmm2
+; SSE4-NEXT: paddw %xmm12, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm6
+; SSE4-NEXT: paddw %xmm13, %xmm6
+; SSE4-NEXT: pmovsxbw %xmm5, %xmm1
+; SSE4-NEXT: paddw %xmm14, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm5
+; SSE4-NEXT: paddw %xmm15, %xmm5
+; SSE4-NEXT: pmovsxbw %xmm4, %xmm0
+; SSE4-NEXT: paddw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE4-NEXT: psrlw $1, %xmm8
+; SSE4-NEXT: psrlw $1, %xmm3
+; SSE4-NEXT: psrlw $1, %xmm7
+; SSE4-NEXT: psrlw $1, %xmm2
+; SSE4-NEXT: psrlw $1, %xmm6
+; SSE4-NEXT: psrlw $1, %xmm1
+; SSE4-NEXT: psrlw $1, %xmm5
+; SSE4-NEXT: psrlw $1, %xmm0
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; SSE4-NEXT: pand %xmm4, %xmm0
+; SSE4-NEXT: pand %xmm4, %xmm5
+; SSE4-NEXT: packuswb %xmm5, %xmm0
+; SSE4-NEXT: pand %xmm4, %xmm1
+; SSE4-NEXT: pand %xmm4, %xmm6
+; SSE4-NEXT: packuswb %xmm6, %xmm1
+; SSE4-NEXT: pand %xmm4, %xmm2
+; SSE4-NEXT: pand %xmm4, %xmm7
+; SSE4-NEXT: packuswb %xmm7, %xmm2
+; SSE4-NEXT: pand %xmm4, %xmm3
+; SSE4-NEXT: pand %xmm4, %xmm8
+; SSE4-NEXT: packuswb %xmm8, %xmm3
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v64i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm6, %xmm6
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm7, %xmm7
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm9, %xmm9
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm10, %xmm10
+; AVX1-NEXT: vpaddw %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vpmovsxbw %xmm3, %xmm10
+; AVX1-NEXT: vpaddw %xmm5, %xmm10, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm10, %xmm10
+; AVX1-NEXT: vpaddw %xmm6, %xmm10, %xmm6
+; AVX1-NEXT: vpmovsxbw %xmm3, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm3, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm7, %xmm3
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm7
+; AVX1-NEXT: vpaddw %xmm7, %xmm8, %xmm7
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm8 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm8, %xmm8
+; AVX1-NEXT: vpaddw %xmm8, %xmm9, %xmm8
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
+; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm5, %xmm4
+; AVX1-NEXT: vpsrlw $1, %xmm6, %xmm5
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm7, %xmm6
+; AVX1-NEXT: vpsrlw $1, %xmm8, %xmm7
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpand %xmm0, %xmm8, %xmm0
+; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm7
+; AVX1-NEXT: vpackuswb %xmm7, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm6, %xmm8, %xmm6
+; AVX1-NEXT: vpand %xmm3, %xmm8, %xmm3
+; AVX1-NEXT: vpackuswb %xmm3, %xmm6, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vpand %xmm1, %xmm8, %xmm1
+; AVX1-NEXT: vpand %xmm5, %xmm8, %xmm3
+; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm4, %xmm8, %xmm3
+; AVX1-NEXT: vpand %xmm2, %xmm8, %xmm2
+; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v64i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxbw %xmm1, %ymm4
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm5
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxbw %xmm3, %ymm6
+; AVX2-NEXT: vpaddw %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3
+; AVX2-NEXT: vpmovsxbw %xmm3, %ymm3
+; AVX2-NEXT: vpaddw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpmovsxbw %xmm2, %ymm3
+; AVX2-NEXT: vpaddw %ymm3, %ymm5, %ymm3
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
+; AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm4, %ymm2
+; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm3, %ymm3
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX2-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
+; AVX2-NEXT: vpackuswb %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vpackuswb %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v64i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512-NEXT: vpmovsxbw %ymm2, %zmm2
+; AVX512-NEXT: vpmovsxbw %ymm0, %zmm0
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512-NEXT: vpmovsxbw %ymm3, %zmm3
+; AVX512-NEXT: vpaddw %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vpmovsxbw %ymm1, %zmm1
+; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm2, %zmm1
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x0 = sext <64 x i8> %a0 to <64 x i16>
+ %x1 = sext <64 x i8> %a1 to <64 x i16>
+ %sum = add <64 x i16> %x0, %x1
+ %shift = ashr <64 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <64 x i16> %shift to <64 x i8>
+ ret <64 x i8> %res
+}
+
+define <32 x i16> @test_fixed_v32i16(<32 x i16> %a0, <32 x i16> %a1) {
+; SSE-LABEL: test_fixed_v32i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: pand %xmm7, %xmm8
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: pand %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: pand %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: pand %xmm4, %xmm11
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psraw $1, %xmm3
+; SSE-NEXT: paddw %xmm8, %xmm3
+; SSE-NEXT: psraw $1, %xmm2
+; SSE-NEXT: paddw %xmm9, %xmm2
+; SSE-NEXT: psraw $1, %xmm1
+; SSE-NEXT: paddw %xmm10, %xmm1
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: paddw %xmm11, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v32i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsraw $1, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsraw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpaddw %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpaddw %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpaddw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpaddw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v32i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsraw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddw %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddw %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v32i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsraw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddw %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %and = and <32 x i16> %a0, %a1
+ %xor = xor <32 x i16> %a1, %a0
+ %shift = ashr <32 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = add <32 x i16> %and, %shift
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_ext_v32i16(<32 x i16> %a0, <32 x i16> %a1) {
+; SSE2-LABEL: test_ext_v32i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm3, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
+; SSE2-NEXT: psrad $16, %xmm8
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3]
+; SSE2-NEXT: psrad $16, %xmm15
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm1[0],xmm14[1],xmm1[1],xmm14[2],xmm1[2],xmm14[3],xmm1[3]
+; SSE2-NEXT: psrad $16, %xmm14
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm2[4],xmm13[5],xmm2[5],xmm13[6],xmm2[6],xmm13[7],xmm2[7]
+; SSE2-NEXT: psrad $16, %xmm13
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm2[0],xmm12[1],xmm2[1],xmm12[2],xmm2[2],xmm12[3],xmm2[3]
+; SSE2-NEXT: psrad $16, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm9[4],xmm11[5],xmm9[5],xmm11[6],xmm9[6],xmm11[7],xmm9[7]
+; SSE2-NEXT: psrad $16, %xmm11
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
+; SSE2-NEXT: psrad $16, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
+; SSE2-NEXT: psrad $16, %xmm9
+; SSE2-NEXT: paddd %xmm8, %xmm9
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: paddd %xmm15, %xmm0
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm5[4],xmm8[5],xmm5[5],xmm8[6],xmm5[6],xmm8[7],xmm5[7]
+; SSE2-NEXT: psrad $16, %xmm8
+; SSE2-NEXT: paddd %xmm3, %xmm8
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: paddd %xmm14, %xmm1
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
+; SSE2-NEXT: psrad $16, %xmm5
+; SSE2-NEXT: paddd %xmm13, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3]
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: paddd %xmm12, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: paddd %xmm11, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3]
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: paddd %xmm10, %xmm3
+; SSE2-NEXT: pslld $15, %xmm9
+; SSE2-NEXT: psrad $16, %xmm9
+; SSE2-NEXT: pslld $15, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm9, %xmm0
+; SSE2-NEXT: pslld $15, %xmm8
+; SSE2-NEXT: psrad $16, %xmm8
+; SSE2-NEXT: pslld $15, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: packssdw %xmm8, %xmm1
+; SSE2-NEXT: pslld $15, %xmm5
+; SSE2-NEXT: psrad $16, %xmm5
+; SSE2-NEXT: pslld $15, %xmm2
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: packssdw %xmm5, %xmm2
+; SSE2-NEXT: pslld $15, %xmm4
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: pslld $15, %xmm3
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: packssdw %xmm4, %xmm3
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v32i16:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm8, %xmm9
+; SSE4-NEXT: pmovsxwd %xmm3, %xmm10
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm3, %xmm11
+; SSE4-NEXT: pmovsxwd %xmm2, %xmm12
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm2, %xmm13
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm14
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm15
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm0
+; SSE4-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm8
+; SSE4-NEXT: paddd %xmm9, %xmm8
+; SSE4-NEXT: pmovsxwd %xmm7, %xmm3
+; SSE4-NEXT: paddd %xmm10, %xmm3
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm7
+; SSE4-NEXT: paddd %xmm11, %xmm7
+; SSE4-NEXT: pmovsxwd %xmm6, %xmm2
+; SSE4-NEXT: paddd %xmm12, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm6
+; SSE4-NEXT: paddd %xmm13, %xmm6
+; SSE4-NEXT: pmovsxwd %xmm5, %xmm1
+; SSE4-NEXT: paddd %xmm14, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm5
+; SSE4-NEXT: paddd %xmm15, %xmm5
+; SSE4-NEXT: pmovsxwd %xmm4, %xmm0
+; SSE4-NEXT: paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE4-NEXT: psrld $1, %xmm8
+; SSE4-NEXT: psrld $1, %xmm3
+; SSE4-NEXT: psrld $1, %xmm7
+; SSE4-NEXT: psrld $1, %xmm2
+; SSE4-NEXT: psrld $1, %xmm6
+; SSE4-NEXT: psrld $1, %xmm1
+; SSE4-NEXT: psrld $1, %xmm5
+; SSE4-NEXT: psrld $1, %xmm0
+; SSE4-NEXT: pxor %xmm4, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0],xmm4[1],xmm5[2],xmm4[3],xmm5[4],xmm4[5],xmm5[6],xmm4[7]
+; SSE4-NEXT: packusdw %xmm5, %xmm0
+; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0],xmm4[1],xmm6[2],xmm4[3],xmm6[4],xmm4[5],xmm6[6],xmm4[7]
+; SSE4-NEXT: packusdw %xmm6, %xmm1
+; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4],xmm4[5],xmm2[6],xmm4[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0],xmm4[1],xmm7[2],xmm4[3],xmm7[4],xmm4[5],xmm7[6],xmm4[7]
+; SSE4-NEXT: packusdw %xmm7, %xmm2
+; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4],xmm4[5],xmm3[6],xmm4[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0],xmm4[1],xmm8[2],xmm4[3],xmm8[4],xmm4[5],xmm8[6],xmm4[7]
+; SSE4-NEXT: packusdw %xmm8, %xmm3
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v32i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm6, %xmm6
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm7, %xmm7
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm9, %xmm9
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm10, %xmm10
+; AVX1-NEXT: vpaddd %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vpmovsxwd %xmm3, %xmm10
+; AVX1-NEXT: vpaddd %xmm5, %xmm10, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm10, %xmm10
+; AVX1-NEXT: vpaddd %xmm6, %xmm10, %xmm6
+; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3
+; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3
+; AVX1-NEXT: vpaddd %xmm3, %xmm7, %xmm3
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm7
+; AVX1-NEXT: vpaddd %xmm7, %xmm8, %xmm7
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm8 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm8, %xmm8
+; AVX1-NEXT: vpaddd %xmm8, %xmm9, %xmm8
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm4, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm5, %xmm4
+; AVX1-NEXT: vpsrld $1, %xmm6, %xmm5
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm7, %xmm6
+; AVX1-NEXT: vpsrld $1, %xmm8, %xmm7
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm8[1],xmm0[2],xmm8[3],xmm0[4],xmm8[5],xmm0[6],xmm8[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm8[1],xmm7[2],xmm8[3],xmm7[4],xmm8[5],xmm7[6],xmm8[7]
+; AVX1-NEXT: vpackusdw %xmm7, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2],xmm8[3],xmm6[4],xmm8[5],xmm6[6],xmm8[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm8[1],xmm3[2],xmm8[3],xmm3[4],xmm8[5],xmm3[6],xmm8[7]
+; AVX1-NEXT: vpackusdw %xmm3, %xmm6, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm8[1],xmm1[2],xmm8[3],xmm1[4],xmm8[5],xmm1[6],xmm8[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0],xmm8[1],xmm5[2],xmm8[3],xmm5[4],xmm8[5],xmm5[6],xmm8[7]
+; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm8[1],xmm4[2],xmm8[3],xmm4[4],xmm8[5],xmm4[6],xmm8[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2],xmm8[3],xmm2[4],xmm8[5],xmm2[6],xmm8[7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v32i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxwd %xmm1, %ymm4
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVX2-NEXT: vpmovsxwd %xmm0, %ymm5
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxwd %xmm3, %ymm6
+; AVX2-NEXT: vpaddd %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3
+; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3
+; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpmovsxwd %xmm2, %ymm3
+; AVX2-NEXT: vpaddd %ymm3, %ymm5, %ymm3
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2
+; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm4, %ymm2
+; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm3, %ymm3
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2],ymm4[3],ymm0[4],ymm4[5],ymm0[6],ymm4[7],ymm0[8],ymm4[9],ymm0[10],ymm4[11],ymm0[12],ymm4[13],ymm0[14],ymm4[15]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7],ymm3[8],ymm4[9],ymm3[10],ymm4[11],ymm3[12],ymm4[13],ymm3[14],ymm4[15]
+; AVX2-NEXT: vpackusdw %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2],ymm4[3],ymm1[4],ymm4[5],ymm1[6],ymm4[7],ymm1[8],ymm4[9],ymm1[10],ymm4[11],ymm1[12],ymm4[13],ymm1[14],ymm4[15]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2],ymm4[3],ymm2[4],ymm4[5],ymm2[6],ymm4[7],ymm2[8],ymm4[9],ymm2[10],ymm4[11],ymm2[12],ymm4[13],ymm2[14],ymm4[15]
+; AVX2-NEXT: vpackusdw %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v32i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512-NEXT: vpmovsxwd %ymm2, %zmm2
+; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512-NEXT: vpmovsxwd %ymm3, %zmm3
+; AVX512-NEXT: vpaddd %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm2, %zmm1
+; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x0 = sext <32 x i16> %a0 to <32 x i32>
+ %x1 = sext <32 x i16> %a1 to <32 x i32>
+ %sum = add <32 x i32> %x0, %x1
+ %shift = ashr <32 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <32 x i32> %shift to <32 x i16>
+ ret <32 x i16> %res
+}
+
+define <16 x i32> @test_fixed_v16i32(<16 x i32> %a0, <16 x i32> %a1) {
+; SSE-LABEL: test_fixed_v16i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: pand %xmm7, %xmm8
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: pand %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: pand %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: pand %xmm4, %xmm11
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrad $1, %xmm3
+; SSE-NEXT: paddd %xmm8, %xmm3
+; SSE-NEXT: psrad $1, %xmm2
+; SSE-NEXT: paddd %xmm9, %xmm2
+; SSE-NEXT: psrad $1, %xmm1
+; SSE-NEXT: paddd %xmm10, %xmm1
+; SSE-NEXT: psrad $1, %xmm0
+; SSE-NEXT: paddd %xmm11, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpaddd %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpaddd %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpaddd %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxord %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsrad $1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddd %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %and = and <16 x i32> %a0, %a1
+ %xor = xor <16 x i32> %a1, %a0
+ %shift = ashr <16 x i32> %xor, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = add <16 x i32> %and, %shift
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_ext_v16i32(<16 x i32> %a0, <16 x i32> %a1) {
+; SSE2-LABEL: test_ext_v16i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pxor %xmm9, %xmm9
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm9
+; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm3[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1]
+; SSE2-NEXT: pxor %xmm9, %xmm9
+; SSE2-NEXT: pcmpgtd %xmm13, %xmm9
+; SSE2-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm9[0],xmm13[1],xmm9[1]
+; SSE2-NEXT: pxor %xmm9, %xmm9
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm9
+; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm2[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1]
+; SSE2-NEXT: pxor %xmm9, %xmm9
+; SSE2-NEXT: pcmpgtd %xmm12, %xmm9
+; SSE2-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm9[0],xmm12[1],xmm9[1]
+; SSE2-NEXT: pxor %xmm9, %xmm9
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm9
+; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm1[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1]
+; SSE2-NEXT: pxor %xmm9, %xmm9
+; SSE2-NEXT: pcmpgtd %xmm11, %xmm9
+; SSE2-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm9[0],xmm11[1],xmm9[1]
+; SSE2-NEXT: pxor %xmm10, %xmm10
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm10
+; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1]
+; SSE2-NEXT: pxor %xmm10, %xmm10
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm10
+; SSE2-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
+; SSE2-NEXT: pxor %xmm14, %xmm14
+; SSE2-NEXT: pcmpgtd %xmm7, %xmm14
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm7[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1]
+; SSE2-NEXT: paddq %xmm7, %xmm3
+; SSE2-NEXT: pxor %xmm7, %xmm7
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm7
+; SSE2-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm7[0],xmm10[1],xmm7[1]
+; SSE2-NEXT: paddq %xmm13, %xmm10
+; SSE2-NEXT: pxor %xmm13, %xmm13
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm13
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1]
+; SSE2-NEXT: paddq %xmm6, %xmm2
+; SSE2-NEXT: pxor %xmm6, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm7, %xmm6
+; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
+; SSE2-NEXT: paddq %xmm12, %xmm7
+; SSE2-NEXT: pxor %xmm12, %xmm12
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm12
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1]
+; SSE2-NEXT: paddq %xmm5, %xmm1
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm5
+; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
+; SSE2-NEXT: paddq %xmm11, %xmm6
+; SSE2-NEXT: pxor %xmm11, %xmm11
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm11
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm11[0],xmm4[1],xmm11[1]
+; SSE2-NEXT: paddq %xmm4, %xmm0
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1]
+; SSE2-NEXT: paddq %xmm9, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm10
+; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm10[0,2]
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: psrlq $1, %xmm7
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm7[0,2]
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: psrlq $1, %xmm6
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm6[0,2]
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm5
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxdq %xmm3, %xmm8
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm3, %xmm9
+; SSE4-NEXT: pmovsxdq %xmm2, %xmm10
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm2, %xmm11
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm12
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm13
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm14
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm15
+; SSE4-NEXT: pmovsxdq %xmm7, %xmm3
+; SSE4-NEXT: paddq %xmm8, %xmm3
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm7
+; SSE4-NEXT: paddq %xmm9, %xmm7
+; SSE4-NEXT: pmovsxdq %xmm6, %xmm2
+; SSE4-NEXT: paddq %xmm10, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm6
+; SSE4-NEXT: paddq %xmm11, %xmm6
+; SSE4-NEXT: pmovsxdq %xmm5, %xmm1
+; SSE4-NEXT: paddq %xmm12, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm5
+; SSE4-NEXT: paddq %xmm13, %xmm5
+; SSE4-NEXT: pmovsxdq %xmm4, %xmm0
+; SSE4-NEXT: paddq %xmm14, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm4, %xmm4
+; SSE4-NEXT: paddq %xmm15, %xmm4
+; SSE4-NEXT: psrlq $1, %xmm3
+; SSE4-NEXT: psrlq $1, %xmm7
+; SSE4-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm7[0,2]
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: psrlq $1, %xmm6
+; SSE4-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm6[0,2]
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm5
+; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm5, %xmm5
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm6, %xmm6
+; AVX1-NEXT: vpmovsxdq %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7
+; AVX1-NEXT: vpshufd {{.*#+}} xmm8 = xmm7[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm8, %xmm8
+; AVX1-NEXT: vpshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm9, %xmm9
+; AVX1-NEXT: vpmovsxdq %xmm7, %xmm7
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm10
+; AVX1-NEXT: vpshufd {{.*#+}} xmm11 = xmm10[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm11, %xmm11
+; AVX1-NEXT: vpaddq %xmm5, %xmm11, %xmm5
+; AVX1-NEXT: vpshufd {{.*#+}} xmm11 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm11, %xmm11
+; AVX1-NEXT: vpaddq %xmm6, %xmm11, %xmm6
+; AVX1-NEXT: vpmovsxdq %xmm10, %xmm10
+; AVX1-NEXT: vpaddq %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm10, %xmm10
+; AVX1-NEXT: vpaddq %xmm10, %xmm8, %xmm8
+; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm10, %xmm10
+; AVX1-NEXT: vpaddq %xmm10, %xmm9, %xmm9
+; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm7, %xmm3
+; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm5, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm6, %xmm5
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm8, %xmm6
+; AVX1-NEXT: vpsrlq $1, %xmm9, %xmm7
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm7, %ymm3
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm3[0,2],ymm0[4,6],ymm3[4,6]
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2
+; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX2-NEXT: vpmovsxdq %xmm4, %ymm4
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
+; AVX2-NEXT: vpmovsxdq %xmm5, %ymm5
+; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm6
+; AVX2-NEXT: vpmovsxdq %xmm6, %ymm6
+; AVX2-NEXT: vpaddq %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm6
+; AVX2-NEXT: vpmovsxdq %xmm6, %ymm6
+; AVX2-NEXT: vpaddq %ymm6, %ymm5, %ymm5
+; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2
+; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpmovsxdq %xmm3, %ymm2
+; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm5[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm1[2,3],ymm4[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512-NEXT: vpmovsxdq %ymm2, %zmm2
+; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512-NEXT: vpmovsxdq %ymm3, %zmm3
+; AVX512-NEXT: vpaddq %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vpmovsxdq %ymm1, %zmm1
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm2, %zmm1
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x0 = sext <16 x i32> %a0 to <16 x i64>
+ %x1 = sext <16 x i32> %a1 to <16 x i64>
+ %sum = add <16 x i64> %x0, %x1
+ %shift = ashr <16 x i64> %sum, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <16 x i64> %shift to <16 x i32>
+ ret <16 x i32> %res
+}
+
+define <8 x i64> @test_fixed_v8i64(<8 x i64> %a0, <8 x i64> %a1) {
+; SSE2-LABEL: test_fixed_v8i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm3, %xmm11
+; SSE2-NEXT: pand %xmm7, %xmm11
+; SSE2-NEXT: movdqa %xmm2, %xmm10
+; SSE2-NEXT: pand %xmm6, %xmm10
+; SSE2-NEXT: movdqa %xmm1, %xmm9
+; SSE2-NEXT: pand %xmm5, %xmm9
+; SSE2-NEXT: movdqa %xmm0, %xmm8
+; SSE2-NEXT: pand %xmm4, %xmm8
+; SSE2-NEXT: pxor %xmm0, %xmm4
+; SSE2-NEXT: pxor %xmm1, %xmm5
+; SSE2-NEXT: pxor %xmm2, %xmm6
+; SSE2-NEXT: pxor %xmm3, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm7[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; SSE2-NEXT: paddq %xmm11, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm6[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE2-NEXT: paddq %xmm10, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm5[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: paddq %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
+; SSE2-NEXT: paddq %xmm8, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_fixed_v8i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm3, %xmm10
+; SSE4-NEXT: pand %xmm7, %xmm10
+; SSE4-NEXT: movdqa %xmm2, %xmm11
+; SSE4-NEXT: pand %xmm6, %xmm11
+; SSE4-NEXT: movdqa %xmm1, %xmm9
+; SSE4-NEXT: pand %xmm5, %xmm9
+; SSE4-NEXT: movdqa %xmm0, %xmm8
+; SSE4-NEXT: pand %xmm4, %xmm8
+; SSE4-NEXT: pxor %xmm4, %xmm0
+; SSE4-NEXT: pxor %xmm5, %xmm1
+; SSE4-NEXT: pxor %xmm6, %xmm2
+; SSE4-NEXT: pxor %xmm7, %xmm3
+; SSE4-NEXT: movdqa %xmm3, %xmm4
+; SSE4-NEXT: psrad $1, %xmm4
+; SSE4-NEXT: psrlq $1, %xmm3
+; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
+; SSE4-NEXT: paddq %xmm10, %xmm3
+; SSE4-NEXT: movdqa %xmm2, %xmm4
+; SSE4-NEXT: psrad $1, %xmm4
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; SSE4-NEXT: paddq %xmm11, %xmm2
+; SSE4-NEXT: movdqa %xmm1, %xmm4
+; SSE4-NEXT: psrad $1, %xmm4
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
+; SSE4-NEXT: paddq %xmm9, %xmm1
+; SSE4-NEXT: movdqa %xmm0, %xmm4
+; SSE4-NEXT: psrad $1, %xmm4
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
+; SSE4-NEXT: paddq %xmm8, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v8i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm6
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0,1],xmm3[2,3],xmm6[4,5],xmm3[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm6
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5],xmm6[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpaddq %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpaddq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpaddq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v8i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm1, %ymm2
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
+; AVX2-NEXT: vpaddq %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm2
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
+; AVX2-NEXT: vpaddq %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v8i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsraq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddq %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %and = and <8 x i64> %a0, %a1
+ %xor = xor <8 x i64> %a1, %a0
+ %shift = ashr <8 x i64> %xor, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = add <8 x i64> %and, %shift
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_ext_v8i64(<8 x i64> %a0, <8 x i64> %a1) {
+; SSE2-LABEL: test_ext_v8i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: pushq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: pushq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: pushq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 56
+; SSE2-NEXT: pushq %rax
+; SSE2-NEXT: .cfi_def_cfa_offset 64
+; SSE2-NEXT: .cfi_offset %rbx, -56
+; SSE2-NEXT: .cfi_offset %r12, -48
+; SSE2-NEXT: .cfi_offset %r13, -40
+; SSE2-NEXT: .cfi_offset %r14, -32
+; SSE2-NEXT: .cfi_offset %r15, -24
+; SSE2-NEXT: .cfi_offset %rbp, -16
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3]
+; SSE2-NEXT: movq %xmm8, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, (%rsp) # 8-byte Spill
+; SSE2-NEXT: movq %xmm3, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm3, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %xmm1, %rbp
+; SSE2-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %rbp
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm1, %rbx
+; SSE2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %rbx
+; SSE2-NEXT: movq %xmm0, %r15
+; SSE2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %r15
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r10
+; SSE2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %r10
+; SSE2-NEXT: movq %xmm7, %r9
+; SSE2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %r9
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r12
+; SSE2-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %r12
+; SSE2-NEXT: movq %xmm6, %r13
+; SSE2-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %r13
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r14
+; SSE2-NEXT: movq %r14, %rsi
+; SSE2-NEXT: sarq $63, %rsi
+; SSE2-NEXT: movq %xmm5, %r11
+; SSE2-NEXT: movq %r11, %rdx
+; SSE2-NEXT: sarq $63, %rdx
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r8
+; SSE2-NEXT: movq %r8, %rdi
+; SSE2-NEXT: sarq $63, %rdi
+; SSE2-NEXT: movq %xmm4, %rcx
+; SSE2-NEXT: movq %rcx, %rax
+; SSE2-NEXT: sarq $63, %rax
+; SSE2-NEXT: addq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; SSE2-NEXT: adcq %r15, %rax
+; SSE2-NEXT: addq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; SSE2-NEXT: adcq %rbx, %rdi
+; SSE2-NEXT: addq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; SSE2-NEXT: adcq %rbp, %rdx
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; SSE2-NEXT: addq %r14, %r15
+; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; SSE2-NEXT: adcq (%rsp), %r10 # 8-byte Folded Reload
+; SSE2-NEXT: shldq $63, %rcx, %r10
+; SSE2-NEXT: shldq $63, %r8, %r9
+; SSE2-NEXT: shldq $63, %r11, %r12
+; SSE2-NEXT: shldq $63, %rbx, %r13
+; SSE2-NEXT: shldq $63, %r15, %rsi
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE2-NEXT: shldq $63, %rcx, %rdx
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE2-NEXT: shldq $63, %rcx, %rdi
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE2-NEXT: shldq $63, %rcx, %rax
+; SSE2-NEXT: movq %rax, %xmm0
+; SSE2-NEXT: movq %rdi, %xmm4
+; SSE2-NEXT: movq %rdx, %xmm1
+; SSE2-NEXT: movq %rsi, %xmm5
+; SSE2-NEXT: movq %r13, %xmm2
+; SSE2-NEXT: movq %r12, %xmm6
+; SSE2-NEXT: movq %r9, %xmm3
+; SSE2-NEXT: movq %r10, %xmm7
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
+; SSE2-NEXT: addq $8, %rsp
+; SSE2-NEXT: .cfi_def_cfa_offset 56
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: popq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: popq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: popq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 8
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pushq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: pushq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: pushq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: pushq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: pushq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: pushq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 56
+; SSE4-NEXT: pushq %rax
+; SSE4-NEXT: .cfi_def_cfa_offset 64
+; SSE4-NEXT: .cfi_offset %rbx, -56
+; SSE4-NEXT: .cfi_offset %r12, -48
+; SSE4-NEXT: .cfi_offset %r13, -40
+; SSE4-NEXT: .cfi_offset %r14, -32
+; SSE4-NEXT: .cfi_offset %r15, -24
+; SSE4-NEXT: .cfi_offset %rbp, -16
+; SSE4-NEXT: movq %xmm3, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, (%rsp) # 8-byte Spill
+; SSE4-NEXT: pextrq $1, %xmm3, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %xmm2, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: pextrq $1, %xmm2, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %xmm1, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: pextrq $1, %xmm1, %rbp
+; SSE4-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %rbp
+; SSE4-NEXT: movq %xmm0, %rbx
+; SSE4-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %rbx
+; SSE4-NEXT: pextrq $1, %xmm0, %r14
+; SSE4-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %r14
+; SSE4-NEXT: movq %xmm7, %r10
+; SSE4-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %r10
+; SSE4-NEXT: pextrq $1, %xmm7, %r9
+; SSE4-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %r9
+; SSE4-NEXT: movq %xmm6, %r15
+; SSE4-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %r15
+; SSE4-NEXT: pextrq $1, %xmm6, %r13
+; SSE4-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %r13
+; SSE4-NEXT: movq %xmm5, %r12
+; SSE4-NEXT: movq %r12, %rsi
+; SSE4-NEXT: sarq $63, %rsi
+; SSE4-NEXT: pextrq $1, %xmm5, %r11
+; SSE4-NEXT: movq %r11, %rdx
+; SSE4-NEXT: sarq $63, %rdx
+; SSE4-NEXT: movq %xmm4, %r8
+; SSE4-NEXT: movq %r8, %rdi
+; SSE4-NEXT: sarq $63, %rdi
+; SSE4-NEXT: pextrq $1, %xmm4, %rcx
+; SSE4-NEXT: movq %rcx, %rax
+; SSE4-NEXT: sarq $63, %rax
+; SSE4-NEXT: addq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; SSE4-NEXT: adcq %r14, %rax
+; SSE4-NEXT: addq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; SSE4-NEXT: adcq %rbx, %rdi
+; SSE4-NEXT: addq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; SSE4-NEXT: adcq %rbp, %rdx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
+; SSE4-NEXT: addq %r12, %r14
+; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; SSE4-NEXT: adcq (%rsp), %r10 # 8-byte Folded Reload
+; SSE4-NEXT: shldq $63, %rcx, %r10
+; SSE4-NEXT: shldq $63, %r8, %r9
+; SSE4-NEXT: shldq $63, %r11, %r15
+; SSE4-NEXT: shldq $63, %rbx, %r13
+; SSE4-NEXT: shldq $63, %r14, %rsi
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE4-NEXT: shldq $63, %rcx, %rdx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE4-NEXT: shldq $63, %rcx, %rdi
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE4-NEXT: shldq $63, %rcx, %rax
+; SSE4-NEXT: movq %rax, %xmm4
+; SSE4-NEXT: movq %rdi, %xmm0
+; SSE4-NEXT: movq %rdx, %xmm5
+; SSE4-NEXT: movq %rsi, %xmm1
+; SSE4-NEXT: movq %r13, %xmm6
+; SSE4-NEXT: movq %r15, %xmm2
+; SSE4-NEXT: movq %r9, %xmm7
+; SSE4-NEXT: movq %r10, %xmm3
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
+; SSE4-NEXT: addq $8, %rsp
+; SSE4-NEXT: .cfi_def_cfa_offset 56
+; SSE4-NEXT: popq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: popq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: popq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: popq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: popq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: popq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 8
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: pushq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: pushq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 56
+; AVX1-NEXT: pushq %rax
+; AVX1-NEXT: .cfi_def_cfa_offset 64
+; AVX1-NEXT: .cfi_offset %rbx, -56
+; AVX1-NEXT: .cfi_offset %r12, -48
+; AVX1-NEXT: .cfi_offset %r13, -40
+; AVX1-NEXT: .cfi_offset %r14, -32
+; AVX1-NEXT: .cfi_offset %r15, -24
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: vmovq %xmm1, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, (%rsp) # 8-byte Spill
+; AVX1-NEXT: vpextrq $1, %xmm1, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vmovq %xmm1, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vpextrq $1, %xmm1, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vpextrq $1, %xmm0, %rbp
+; AVX1-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %rbp
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %rbx
+; AVX1-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %rbx
+; AVX1-NEXT: vpextrq $1, %xmm0, %r15
+; AVX1-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r15
+; AVX1-NEXT: vmovq %xmm3, %r9
+; AVX1-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r9
+; AVX1-NEXT: vpextrq $1, %xmm3, %r10
+; AVX1-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r10
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %r12
+; AVX1-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r12
+; AVX1-NEXT: vpextrq $1, %xmm0, %r13
+; AVX1-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r13
+; AVX1-NEXT: vmovq %xmm2, %r14
+; AVX1-NEXT: movq %r14, %rsi
+; AVX1-NEXT: sarq $63, %rsi
+; AVX1-NEXT: vpextrq $1, %xmm2, %r11
+; AVX1-NEXT: movq %r11, %rdx
+; AVX1-NEXT: sarq $63, %rdx
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %r8
+; AVX1-NEXT: movq %r8, %rdi
+; AVX1-NEXT: sarq $63, %rdi
+; AVX1-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX1-NEXT: movq %rcx, %rax
+; AVX1-NEXT: sarq $63, %rax
+; AVX1-NEXT: addq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX1-NEXT: adcq %r15, %rax
+; AVX1-NEXT: addq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX1-NEXT: adcq %rbx, %rdi
+; AVX1-NEXT: addq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX1-NEXT: adcq %rbp, %rdx
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; AVX1-NEXT: addq %r14, %r15
+; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; AVX1-NEXT: adcq (%rsp), %r9 # 8-byte Folded Reload
+; AVX1-NEXT: shldq $63, %rcx, %r9
+; AVX1-NEXT: shldq $63, %r8, %r10
+; AVX1-NEXT: shldq $63, %r11, %r12
+; AVX1-NEXT: shldq $63, %rbx, %r13
+; AVX1-NEXT: shldq $63, %r15, %rsi
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX1-NEXT: shldq $63, %rcx, %rdx
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX1-NEXT: shldq $63, %rcx, %rdi
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX1-NEXT: shldq $63, %rcx, %rax
+; AVX1-NEXT: vmovq %rax, %xmm0
+; AVX1-NEXT: vmovq %rdi, %xmm1
+; AVX1-NEXT: vmovq %rdx, %xmm2
+; AVX1-NEXT: vmovq %rsi, %xmm3
+; AVX1-NEXT: vmovq %r13, %xmm4
+; AVX1-NEXT: vmovq %r12, %xmm5
+; AVX1-NEXT: vmovq %r10, %xmm6
+; AVX1-NEXT: vmovq %r9, %xmm7
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: addq $8, %rsp
+; AVX1-NEXT: .cfi_def_cfa_offset 56
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: popq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 8
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 56
+; AVX2-NEXT: pushq %rax
+; AVX2-NEXT: .cfi_def_cfa_offset 64
+; AVX2-NEXT: .cfi_offset %rbx, -56
+; AVX2-NEXT: .cfi_offset %r12, -48
+; AVX2-NEXT: .cfi_offset %r13, -40
+; AVX2-NEXT: .cfi_offset %r14, -32
+; AVX2-NEXT: .cfi_offset %r15, -24
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: vmovq %xmm1, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, (%rsp) # 8-byte Spill
+; AVX2-NEXT: vpextrq $1, %xmm1, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vmovq %xmm1, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vpextrq $1, %xmm1, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vpextrq $1, %xmm0, %rbp
+; AVX2-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %rbp
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %rbx
+; AVX2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %rbx
+; AVX2-NEXT: vpextrq $1, %xmm0, %r15
+; AVX2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r15
+; AVX2-NEXT: vmovq %xmm3, %r9
+; AVX2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r9
+; AVX2-NEXT: vpextrq $1, %xmm3, %r10
+; AVX2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r10
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %r12
+; AVX2-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r12
+; AVX2-NEXT: vpextrq $1, %xmm0, %r13
+; AVX2-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r13
+; AVX2-NEXT: vmovq %xmm2, %r14
+; AVX2-NEXT: movq %r14, %rsi
+; AVX2-NEXT: sarq $63, %rsi
+; AVX2-NEXT: vpextrq $1, %xmm2, %r11
+; AVX2-NEXT: movq %r11, %rdx
+; AVX2-NEXT: sarq $63, %rdx
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %r8
+; AVX2-NEXT: movq %r8, %rdi
+; AVX2-NEXT: sarq $63, %rdi
+; AVX2-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX2-NEXT: movq %rcx, %rax
+; AVX2-NEXT: sarq $63, %rax
+; AVX2-NEXT: addq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX2-NEXT: adcq %r15, %rax
+; AVX2-NEXT: addq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX2-NEXT: adcq %rbx, %rdi
+; AVX2-NEXT: addq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX2-NEXT: adcq %rbp, %rdx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; AVX2-NEXT: addq %r14, %r15
+; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; AVX2-NEXT: adcq (%rsp), %r9 # 8-byte Folded Reload
+; AVX2-NEXT: shldq $63, %rcx, %r9
+; AVX2-NEXT: shldq $63, %r8, %r10
+; AVX2-NEXT: shldq $63, %r11, %r12
+; AVX2-NEXT: shldq $63, %rbx, %r13
+; AVX2-NEXT: shldq $63, %r15, %rsi
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX2-NEXT: shldq $63, %rcx, %rdx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX2-NEXT: shldq $63, %rcx, %rdi
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX2-NEXT: shldq $63, %rcx, %rax
+; AVX2-NEXT: vmovq %rax, %xmm0
+; AVX2-NEXT: vmovq %rdi, %xmm1
+; AVX2-NEXT: vmovq %rdx, %xmm2
+; AVX2-NEXT: vmovq %rsi, %xmm3
+; AVX2-NEXT: vmovq %r13, %xmm4
+; AVX2-NEXT: vmovq %r12, %xmm5
+; AVX2-NEXT: vmovq %r10, %xmm6
+; AVX2-NEXT: vmovq %r9, %xmm7
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX2-NEXT: addq $8, %rsp
+; AVX2-NEXT: .cfi_def_cfa_offset 56
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 8
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 56
+; AVX512-NEXT: pushq %rax
+; AVX512-NEXT: .cfi_def_cfa_offset 64
+; AVX512-NEXT: .cfi_offset %rbx, -56
+; AVX512-NEXT: .cfi_offset %r12, -48
+; AVX512-NEXT: .cfi_offset %r13, -40
+; AVX512-NEXT: .cfi_offset %r14, -32
+; AVX512-NEXT: .cfi_offset %r15, -24
+; AVX512-NEXT: .cfi_offset %rbp, -16
+; AVX512-NEXT: vmovq %xmm0, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, (%rsp) # 8-byte Spill
+; AVX512-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512-NEXT: vmovq %xmm2, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512-NEXT: vmovq %xmm0, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vpextrq $1, %xmm0, %r13
+; AVX512-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r13
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-NEXT: vmovq %xmm0, %r14
+; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r14
+; AVX512-NEXT: vpextrq $1, %xmm0, %r15
+; AVX512-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r15
+; AVX512-NEXT: vmovq %xmm1, %r9
+; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r9
+; AVX512-NEXT: vpextrq $1, %xmm1, %r11
+; AVX512-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r11
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm0
+; AVX512-NEXT: vmovq %xmm0, %r12
+; AVX512-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r12
+; AVX512-NEXT: vpextrq $1, %xmm0, %rbp
+; AVX512-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %rbp
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; AVX512-NEXT: vmovq %xmm0, %rbx
+; AVX512-NEXT: movq %rbx, %rsi
+; AVX512-NEXT: sarq $63, %rsi
+; AVX512-NEXT: vpextrq $1, %xmm0, %r10
+; AVX512-NEXT: movq %r10, %rdx
+; AVX512-NEXT: sarq $63, %rdx
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-NEXT: vmovq %xmm0, %r8
+; AVX512-NEXT: movq %r8, %rdi
+; AVX512-NEXT: sarq $63, %rdi
+; AVX512-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX512-NEXT: movq %rcx, %rax
+; AVX512-NEXT: sarq $63, %rax
+; AVX512-NEXT: addq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX512-NEXT: adcq %r15, %rax
+; AVX512-NEXT: addq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX512-NEXT: adcq %r14, %rdi
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; AVX512-NEXT: addq %r10, %r15
+; AVX512-NEXT: adcq %r13, %rdx
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
+; AVX512-NEXT: addq %rbx, %r14
+; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; AVX512-NEXT: adcq (%rsp), %r9 # 8-byte Folded Reload
+; AVX512-NEXT: shldq $63, %rcx, %r9
+; AVX512-NEXT: shldq $63, %r8, %r11
+; AVX512-NEXT: shldq $63, %r10, %r12
+; AVX512-NEXT: shldq $63, %rbx, %rbp
+; AVX512-NEXT: shldq $63, %r14, %rsi
+; AVX512-NEXT: shldq $63, %r15, %rdx
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX512-NEXT: shldq $63, %rcx, %rdi
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX512-NEXT: shldq $63, %rcx, %rax
+; AVX512-NEXT: vmovq %rax, %xmm0
+; AVX512-NEXT: vmovq %rdi, %xmm1
+; AVX512-NEXT: vmovq %rdx, %xmm2
+; AVX512-NEXT: vmovq %rsi, %xmm3
+; AVX512-NEXT: vmovq %rbp, %xmm4
+; AVX512-NEXT: vmovq %r12, %xmm5
+; AVX512-NEXT: vmovq %r11, %xmm6
+; AVX512-NEXT: vmovq %r9, %xmm7
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: addq $8, %rsp
+; AVX512-NEXT: .cfi_def_cfa_offset 56
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 8
+; AVX512-NEXT: retq
+ %x0 = sext <8 x i64> %a0 to <8 x i128>
+ %x1 = sext <8 x i64> %a1 to <8 x i128>
+ %sum = add <8 x i128> %x0, %x1
+ %shift = ashr <8 x i128> %sum, <i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1>
+ %res = trunc <8 x i128> %shift to <8 x i64>
+ ret <8 x i64> %res
+}
+
diff --git a/llvm/test/CodeGen/X86/avgflooru.ll b/llvm/test/CodeGen/X86/avgflooru.ll
new file mode 100644
index 0000000..e07c1f5
--- /dev/null
+++ b/llvm/test/CodeGen/X86/avgflooru.ll
@@ -0,0 +1,2629 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE4
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512
+
+;
+; 128-bit vectors
+;
+
+define <16 x i8> @test_fixed_v16i8(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE-LABEL: test_fixed_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: paddb %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpaddb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: retq
+ %and = and <16 x i8> %a0, %a1
+ %xor = xor <16 x i8> %a0, %a1
+ %shift = lshr <16 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = add <16 x i8> %and, %shift
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_ext_v16i8(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE2-LABEL: test_ext_v16i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
+; SSE2-NEXT: paddw %xmm3, %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: paddw %xmm1, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: packuswb %xmm4, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i8:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pxor %xmm3, %xmm3
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
+; SSE4-NEXT: paddw %xmm0, %xmm1
+; SSE4-NEXT: paddw %xmm4, %xmm2
+; SSE4-NEXT: psrlw $1, %xmm1
+; SSE4-NEXT: psrlw $1, %xmm2
+; SSE4-NEXT: packuswb %xmm1, %xmm2
+; SSE4-NEXT: movdqa %xmm2, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; AVX1-NEXT: vpaddw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovwb %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = zext <16 x i8> %a0 to <16 x i16>
+ %x1 = zext <16 x i8> %a1 to <16 x i16>
+ %sum = add <16 x i16> %x0, %x1
+ %shift = lshr <16 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <16 x i16> %shift to <16 x i8>
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @test_fixed_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: test_fixed_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: paddw %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddw %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %and = and <8 x i16> %a0, %a1
+ %xor = xor <8 x i16> %a1, %a0
+ %shift = lshr <8 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = add <8 x i16> %and, %shift
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_ext_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE2-LABEL: test_ext_v8i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; SSE2-NEXT: paddd %xmm3, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: paddd %xmm1, %xmm0
+; SSE2-NEXT: pslld $15, %xmm4
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: pslld $15, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm4, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i16:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pxor %xmm3, %xmm3
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE4-NEXT: paddd %xmm0, %xmm1
+; SSE4-NEXT: paddd %xmm4, %xmm2
+; SSE4-NEXT: psrld $1, %xmm1
+; SSE4-NEXT: psrld $1, %xmm2
+; SSE4-NEXT: packusdw %xmm1, %xmm2
+; SSE4-NEXT: movdqa %xmm2, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX1-NEXT: vpaddd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = zext <8 x i16> %a0 to <8 x i32>
+ %x1 = zext <8 x i16> %a1 to <8 x i32>
+ %sum = add <8 x i32> %x0, %x1
+ %shift = lshr <8 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <8 x i32> %shift to <8 x i16>
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @test_fixed_v4i32(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: test_fixed_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddd %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %and = and <4 x i32> %a0, %a1
+ %xor = xor <4 x i32> %a1, %a0
+ %shift = lshr <4 x i32> %xor, <i32 1, i32 1, i32 1, i32 1>
+ %res = add <4 x i32> %and, %shift
+ ret <4 x i32> %res
+}
+
+define <4 x i32> @test_ext_v4i32(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE2-LABEL: test_ext_v4i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; SSE2-NEXT: paddq %xmm3, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT: paddq %xmm1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v4i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pxor %xmm3, %xmm3
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE4-NEXT: paddq %xmm0, %xmm1
+; SSE4-NEXT: paddq %xmm4, %xmm2
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
+; SSE4-NEXT: movaps %xmm2, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v4i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovqd %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = zext <4 x i32> %a0 to <4 x i64>
+ %x1 = zext <4 x i32> %a1 to <4 x i64>
+ %sum = add <4 x i64> %x0, %x1
+ %shift = lshr <4 x i64> %sum, <i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <4 x i64> %shift to <4 x i32>
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @test_fixed_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE-LABEL: test_fixed_v2i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: paddq %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v2i64:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %and = and <2 x i64> %a0, %a1
+ %xor = xor <2 x i64> %a1, %a0
+ %shift = lshr <2 x i64> %xor, <i64 1, i64 1>
+ %res = add <2 x i64> %and, %shift
+ ret <2 x i64> %res
+}
+
+define <2 x i64> @test_ext_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE2-LABEL: test_ext_v2i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: movq %xmm0, %rcx
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rdx
+; SSE2-NEXT: movq %xmm1, %rsi
+; SSE2-NEXT: xorl %edi, %edi
+; SSE2-NEXT: addq %rcx, %rsi
+; SSE2-NEXT: setb %dil
+; SSE2-NEXT: xorl %ecx, %ecx
+; SSE2-NEXT: addq %rax, %rdx
+; SSE2-NEXT: setb %cl
+; SSE2-NEXT: shldq $63, %rdx, %rcx
+; SSE2-NEXT: shldq $63, %rsi, %rdi
+; SSE2-NEXT: movq %rdi, %xmm0
+; SSE2-NEXT: movq %rcx, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v2i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movq %xmm0, %rax
+; SSE4-NEXT: pextrq $1, %xmm0, %rcx
+; SSE4-NEXT: movq %xmm1, %rdx
+; SSE4-NEXT: pextrq $1, %xmm1, %rsi
+; SSE4-NEXT: xorl %edi, %edi
+; SSE4-NEXT: addq %rcx, %rsi
+; SSE4-NEXT: setb %dil
+; SSE4-NEXT: xorl %ecx, %ecx
+; SSE4-NEXT: addq %rax, %rdx
+; SSE4-NEXT: setb %cl
+; SSE4-NEXT: shldq $63, %rdx, %rcx
+; SSE4-NEXT: shldq $63, %rsi, %rdi
+; SSE4-NEXT: movq %rdi, %xmm1
+; SSE4-NEXT: movq %rcx, %xmm0
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v2i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX1-NEXT: vmovq %xmm1, %rdx
+; AVX1-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX1-NEXT: xorl %edi, %edi
+; AVX1-NEXT: addq %rcx, %rsi
+; AVX1-NEXT: setb %dil
+; AVX1-NEXT: xorl %ecx, %ecx
+; AVX1-NEXT: addq %rax, %rdx
+; AVX1-NEXT: setb %cl
+; AVX1-NEXT: shldq $63, %rdx, %rcx
+; AVX1-NEXT: shldq $63, %rsi, %rdi
+; AVX1-NEXT: vmovq %rdi, %xmm0
+; AVX1-NEXT: vmovq %rcx, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v2i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX2-NEXT: vmovq %xmm1, %rdx
+; AVX2-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX2-NEXT: xorl %edi, %edi
+; AVX2-NEXT: addq %rcx, %rsi
+; AVX2-NEXT: setb %dil
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: addq %rax, %rdx
+; AVX2-NEXT: setb %cl
+; AVX2-NEXT: shldq $63, %rdx, %rcx
+; AVX2-NEXT: shldq $63, %rsi, %rdi
+; AVX2-NEXT: vmovq %rdi, %xmm0
+; AVX2-NEXT: vmovq %rcx, %xmm1
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v2i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovq %xmm0, %rax
+; AVX512-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX512-NEXT: vpextrq $1, %xmm1, %rdx
+; AVX512-NEXT: vmovq %xmm1, %rsi
+; AVX512-NEXT: xorl %edi, %edi
+; AVX512-NEXT: addq %rcx, %rdx
+; AVX512-NEXT: setb %dil
+; AVX512-NEXT: xorl %ecx, %ecx
+; AVX512-NEXT: addq %rax, %rsi
+; AVX512-NEXT: setb %cl
+; AVX512-NEXT: shldq $63, %rsi, %rcx
+; AVX512-NEXT: shldq $63, %rdx, %rdi
+; AVX512-NEXT: vmovq %rdi, %xmm0
+; AVX512-NEXT: vmovq %rcx, %xmm1
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: retq
+ %x0 = zext <2 x i64> %a0 to <2 x i128>
+ %x1 = zext <2 x i64> %a1 to <2 x i128>
+ %sum = add <2 x i128> %x0, %x1
+ %shift = lshr <2 x i128> %sum, <i128 1, i128 1>
+ %res = trunc <2 x i128> %shift to <2 x i64>
+ ret <2 x i64> %res
+}
+
+;
+; 256-bit vectors
+;
+
+define <32 x i8> @test_fixed_v32i8(<32 x i8> %a0, <32 x i8> %a1) {
+; SSE-LABEL: test_fixed_v32i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: paddb %xmm4, %xmm1
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: paddb %xmm5, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpaddb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v32i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512-NEXT: vpaddb %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %and = and <32 x i8> %a0, %a1
+ %xor = xor <32 x i8> %a0, %a1
+ %shift = lshr <32 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = add <32 x i8> %and, %shift
+ ret <32 x i8> %res
+}
+
+define <32 x i8> @test_ext_v32i8(<32 x i8> %a0, <32 x i8> %a1) {
+; SSE2-LABEL: test_ext_v32i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
+; SSE2-NEXT: paddw %xmm5, %xmm7
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE2-NEXT: paddw %xmm3, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; SSE2-NEXT: paddw %xmm6, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-NEXT: paddw %xmm2, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm7
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: packuswb %xmm7, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm3
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: packuswb %xmm3, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v32i8:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pxor %xmm5, %xmm5
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm6 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm7 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15]
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
+; SSE4-NEXT: paddw %xmm1, %xmm3
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15]
+; SSE4-NEXT: paddw %xmm0, %xmm2
+; SSE4-NEXT: paddw %xmm6, %xmm4
+; SSE4-NEXT: paddw %xmm7, %xmm1
+; SSE4-NEXT: psrlw $1, %xmm3
+; SSE4-NEXT: psrlw $1, %xmm2
+; SSE4-NEXT: psrlw $1, %xmm4
+; SSE4-NEXT: packuswb %xmm3, %xmm4
+; SSE4-NEXT: psrlw $1, %xmm1
+; SSE4-NEXT: packuswb %xmm2, %xmm1
+; SSE4-NEXT: movdqa %xmm1, %xmm0
+; SSE4-NEXT: movdqa %xmm4, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; AVX1-NEXT: vpaddw %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15]
+; AVX1-NEXT: vpaddw %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; AVX1-NEXT: vpaddw %xmm1, %xmm4, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX2-NEXT: vpaddw %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm2, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v32i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = zext <32 x i8> %a0 to <32 x i16>
+ %x1 = zext <32 x i8> %a1 to <32 x i16>
+ %sum = add <32 x i16> %x0, %x1
+ %shift = lshr <32 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <32 x i16> %shift to <32 x i8>
+ ret <32 x i8> %res
+}
+
+define <16 x i16> @test_fixed_v16i16(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE-LABEL: test_fixed_v16i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: paddw %xmm4, %xmm1
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: paddw %xmm5, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpaddw %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddw %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpaddw %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %and = and <16 x i16> %a0, %a1
+ %xor = xor <16 x i16> %a1, %a0
+ %shift = lshr <16 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = add <16 x i16> %and, %shift
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_ext_v16i16(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE2-LABEL: test_ext_v16i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm2, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
+; SSE2-NEXT: paddd %xmm5, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-NEXT: paddd %xmm6, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm3, %xmm1
+; SSE2-NEXT: pslld $15, %xmm7
+; SSE2-NEXT: psrad $16, %xmm7
+; SSE2-NEXT: pslld $15, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm7, %xmm0
+; SSE2-NEXT: pslld $15, %xmm2
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: pslld $15, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: packssdw %xmm2, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i16:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pxor %xmm5, %xmm5
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm6 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm7 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; SSE4-NEXT: paddd %xmm1, %xmm3
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
+; SSE4-NEXT: paddd %xmm0, %xmm2
+; SSE4-NEXT: paddd %xmm6, %xmm4
+; SSE4-NEXT: paddd %xmm7, %xmm1
+; SSE4-NEXT: psrld $1, %xmm3
+; SSE4-NEXT: psrld $1, %xmm2
+; SSE4-NEXT: psrld $1, %xmm4
+; SSE4-NEXT: packusdw %xmm3, %xmm4
+; SSE4-NEXT: psrld $1, %xmm1
+; SSE4-NEXT: packusdw %xmm2, %xmm1
+; SSE4-NEXT: movdqa %xmm1, %xmm0
+; SSE4-NEXT: movdqa %xmm4, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX1-NEXT: vpaddd %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
+; AVX1-NEXT: vpaddd %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX1-NEXT: vpaddd %xmm1, %xmm4, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX2-NEXT: vpaddd %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm2, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = zext <16 x i16> %a0 to <16 x i32>
+ %x1 = zext <16 x i16> %a1 to <16 x i32>
+ %sum = add <16 x i32> %x0, %x1
+ %shift = lshr <16 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <16 x i32> %shift to <16 x i16>
+ ret <16 x i16> %res
+}
+
+define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE-LABEL: test_fixed_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrld $1, %xmm1
+; SSE-NEXT: paddd %xmm4, %xmm1
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: paddd %xmm5, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpaddd %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX512-NEXT: vpaddd %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %and = and <8 x i32> %a0, %a1
+ %xor = xor <8 x i32> %a1, %a0
+ %shift = lshr <8 x i32> %xor, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = add <8 x i32> %and, %shift
+ ret <8 x i32> %res
+}
+
+define <8 x i32> @test_ext_v8i32(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE2-LABEL: test_ext_v8i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm4[2],xmm6[3],xmm4[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm4[2],xmm7[3],xmm4[3]
+; SSE2-NEXT: paddq %xmm5, %xmm7
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; SSE2-NEXT: paddq %xmm3, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: paddq %xmm6, %xmm3
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE2-NEXT: paddq %xmm2, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm7
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm7[0,2]
+; SSE2-NEXT: psrlq $1, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pxor %xmm5, %xmm5
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm6 = xmm1[0],zero,xmm1[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm7 = xmm0[0],zero,xmm0[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; SSE4-NEXT: paddq %xmm1, %xmm3
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; SSE4-NEXT: paddq %xmm0, %xmm2
+; SSE4-NEXT: paddq %xmm6, %xmm4
+; SSE4-NEXT: paddq %xmm7, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm3
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm3[0,2]
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; SSE4-NEXT: movaps %xmm1, %xmm0
+; SSE4-NEXT: movaps %xmm4, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm6[2],xmm3[2],xmm6[3],xmm3[3]
+; AVX1-NEXT: vpaddq %xmm7, %xmm4, %xmm4
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX1-NEXT: vpaddq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm6[0],zero,xmm6[1],zero
+; AVX1-NEXT: vpaddq %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm2[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = zext <8 x i32> %a0 to <8 x i64>
+ %x1 = zext <8 x i32> %a1 to <8 x i64>
+ %sum = add <8 x i64> %x0, %x1
+ %shift = lshr <8 x i64> %sum, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <8 x i64> %shift to <8 x i32>
+ ret <8 x i32> %res
+}
+
+define <4 x i64> @test_fixed_v4i64(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE-LABEL: test_fixed_v4i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrlq $1, %xmm1
+; SSE-NEXT: paddq %xmm4, %xmm1
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: paddq %xmm5, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v4i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpaddq %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v4i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v4i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpaddq %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %and = and <4 x i64> %a0, %a1
+ %xor = xor <4 x i64> %a1, %a0
+ %shift = lshr <4 x i64> %xor, <i64 1, i64 1, i64 1, i64 1>
+ %res = add <4 x i64> %and, %shift
+ ret <4 x i64> %res
+}
+
+define <4 x i64> @test_ext_v4i64(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE2-LABEL: test_ext_v4i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm4, %rdi
+; SSE2-NEXT: movq %xmm1, %r9
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm1, %r10
+; SSE2-NEXT: movq %xmm0, %r11
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r8
+; SSE2-NEXT: movq %xmm3, %rsi
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rdx
+; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: xorl %ecx, %ecx
+; SSE2-NEXT: addq %r11, %rax
+; SSE2-NEXT: setb %cl
+; SSE2-NEXT: xorl %r11d, %r11d
+; SSE2-NEXT: addq %r10, %rdx
+; SSE2-NEXT: setb %r11b
+; SSE2-NEXT: xorl %r10d, %r10d
+; SSE2-NEXT: addq %r9, %rsi
+; SSE2-NEXT: setb %r10b
+; SSE2-NEXT: xorl %r9d, %r9d
+; SSE2-NEXT: addq %rdi, %r8
+; SSE2-NEXT: setb %r9b
+; SSE2-NEXT: shldq $63, %r8, %r9
+; SSE2-NEXT: shldq $63, %rsi, %r10
+; SSE2-NEXT: shldq $63, %rdx, %r11
+; SSE2-NEXT: shldq $63, %rax, %rcx
+; SSE2-NEXT: movq %rcx, %xmm0
+; SSE2-NEXT: movq %r11, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: movq %r10, %xmm1
+; SSE2-NEXT: movq %r9, %xmm2
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v4i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movq %xmm1, %r8
+; SSE4-NEXT: pextrq $1, %xmm1, %r9
+; SSE4-NEXT: movq %xmm0, %r10
+; SSE4-NEXT: pextrq $1, %xmm0, %r11
+; SSE4-NEXT: movq %xmm3, %rdi
+; SSE4-NEXT: pextrq $1, %xmm3, %rsi
+; SSE4-NEXT: movq %xmm2, %rdx
+; SSE4-NEXT: pextrq $1, %xmm2, %rax
+; SSE4-NEXT: xorl %ecx, %ecx
+; SSE4-NEXT: addq %r11, %rax
+; SSE4-NEXT: setb %cl
+; SSE4-NEXT: xorl %r11d, %r11d
+; SSE4-NEXT: addq %r10, %rdx
+; SSE4-NEXT: setb %r11b
+; SSE4-NEXT: xorl %r10d, %r10d
+; SSE4-NEXT: addq %r9, %rsi
+; SSE4-NEXT: setb %r10b
+; SSE4-NEXT: xorl %r9d, %r9d
+; SSE4-NEXT: addq %r8, %rdi
+; SSE4-NEXT: setb %r9b
+; SSE4-NEXT: shldq $63, %rdi, %r9
+; SSE4-NEXT: shldq $63, %rsi, %r10
+; SSE4-NEXT: shldq $63, %rdx, %r11
+; SSE4-NEXT: shldq $63, %rax, %rcx
+; SSE4-NEXT: movq %rcx, %xmm1
+; SSE4-NEXT: movq %r11, %xmm0
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE4-NEXT: movq %r10, %xmm2
+; SSE4-NEXT: movq %r9, %xmm1
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v4i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovq %xmm0, %rdi
+; AVX1-NEXT: vpextrq $1, %xmm0, %r9
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %r10
+; AVX1-NEXT: vpextrq $1, %xmm0, %r11
+; AVX1-NEXT: vmovq %xmm1, %r8
+; AVX1-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %rdx
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: xorl %ecx, %ecx
+; AVX1-NEXT: addq %r11, %rax
+; AVX1-NEXT: setb %cl
+; AVX1-NEXT: xorl %r11d, %r11d
+; AVX1-NEXT: addq %r10, %rdx
+; AVX1-NEXT: setb %r11b
+; AVX1-NEXT: xorl %r10d, %r10d
+; AVX1-NEXT: addq %r9, %rsi
+; AVX1-NEXT: setb %r10b
+; AVX1-NEXT: xorl %r9d, %r9d
+; AVX1-NEXT: addq %rdi, %r8
+; AVX1-NEXT: setb %r9b
+; AVX1-NEXT: shldq $63, %r8, %r9
+; AVX1-NEXT: shldq $63, %rsi, %r10
+; AVX1-NEXT: shldq $63, %rdx, %r11
+; AVX1-NEXT: shldq $63, %rax, %rcx
+; AVX1-NEXT: vmovq %rcx, %xmm0
+; AVX1-NEXT: vmovq %r11, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vmovq %r10, %xmm1
+; AVX1-NEXT: vmovq %r9, %xmm2
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v4i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovq %xmm0, %rdi
+; AVX2-NEXT: vpextrq $1, %xmm0, %r9
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %r10
+; AVX2-NEXT: vpextrq $1, %xmm0, %r11
+; AVX2-NEXT: vmovq %xmm1, %r8
+; AVX2-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %rdx
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: addq %r11, %rax
+; AVX2-NEXT: setb %cl
+; AVX2-NEXT: xorl %r11d, %r11d
+; AVX2-NEXT: addq %r10, %rdx
+; AVX2-NEXT: setb %r11b
+; AVX2-NEXT: xorl %r10d, %r10d
+; AVX2-NEXT: addq %r9, %rsi
+; AVX2-NEXT: setb %r10b
+; AVX2-NEXT: xorl %r9d, %r9d
+; AVX2-NEXT: addq %rdi, %r8
+; AVX2-NEXT: setb %r9b
+; AVX2-NEXT: shldq $63, %r8, %r9
+; AVX2-NEXT: shldq $63, %rsi, %r10
+; AVX2-NEXT: shldq $63, %rdx, %r11
+; AVX2-NEXT: shldq $63, %rax, %rcx
+; AVX2-NEXT: vmovq %rcx, %xmm0
+; AVX2-NEXT: vmovq %r11, %xmm1
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vmovq %r10, %xmm1
+; AVX2-NEXT: vmovq %r9, %xmm2
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v4i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovq %xmm0, %rsi
+; AVX512-NEXT: vpextrq $1, %xmm0, %r9
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-NEXT: vmovq %xmm0, %r10
+; AVX512-NEXT: vpextrq $1, %xmm0, %r11
+; AVX512-NEXT: vmovq %xmm1, %rdi
+; AVX512-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm0
+; AVX512-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512-NEXT: vmovq %xmm0, %r8
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: addq %r11, %rax
+; AVX512-NEXT: setb %dl
+; AVX512-NEXT: xorl %r11d, %r11d
+; AVX512-NEXT: addq %r10, %r8
+; AVX512-NEXT: setb %r11b
+; AVX512-NEXT: xorl %r10d, %r10d
+; AVX512-NEXT: addq %r9, %rcx
+; AVX512-NEXT: setb %r10b
+; AVX512-NEXT: xorl %r9d, %r9d
+; AVX512-NEXT: addq %rsi, %rdi
+; AVX512-NEXT: setb %r9b
+; AVX512-NEXT: shldq $63, %rdi, %r9
+; AVX512-NEXT: shldq $63, %rcx, %r10
+; AVX512-NEXT: shldq $63, %r8, %r11
+; AVX512-NEXT: shldq $63, %rax, %rdx
+; AVX512-NEXT: vmovq %rdx, %xmm0
+; AVX512-NEXT: vmovq %r11, %xmm1
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: vmovq %r10, %xmm1
+; AVX512-NEXT: vmovq %r9, %xmm2
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
+ %x0 = zext <4 x i64> %a0 to <4 x i128>
+ %x1 = zext <4 x i64> %a1 to <4 x i128>
+ %sum = add <4 x i128> %x0, %x1
+ %shift = lshr <4 x i128> %sum, <i128 1, i128 1, i128 1, i128 1>
+ %res = trunc <4 x i128> %shift to <4 x i64>
+ ret <4 x i64> %res
+}
+
+;
+; 512-bit vectors
+;
+
+define <64 x i8> @test_fixed_v64i8(<64 x i8> %a0, <64 x i8> %a1) {
+; SSE-LABEL: test_fixed_v64i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm9
+; SSE-NEXT: pand %xmm7, %xmm9
+; SSE-NEXT: movdqa %xmm2, %xmm10
+; SSE-NEXT: pand %xmm6, %xmm10
+; SSE-NEXT: movdqa %xmm1, %xmm11
+; SSE-NEXT: pand %xmm5, %xmm11
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: pand %xmm4, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrlw $1, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: paddb %xmm9, %xmm3
+; SSE-NEXT: psrlw $1, %xmm2
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: paddb %xmm10, %xmm2
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm1
+; SSE-NEXT: paddb %xmm11, %xmm1
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: paddb %xmm8, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v64i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm6
+; AVX1-NEXT: vpand %xmm3, %xmm6, %xmm6
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm3
+; AVX1-NEXT: vpaddb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddb %xmm6, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpaddb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpaddb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v64i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpaddb %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v64i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512-NEXT: vpaddb %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %and = and <64 x i8> %a0, %a1
+ %xor = xor <64 x i8> %a0, %a1
+ %shift = lshr <64 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = add <64 x i8> %and, %shift
+ ret <64 x i8> %res
+}
+
+define <64 x i8> @test_ext_v64i8(<64 x i8> %a0, <64 x i8> %a1) {
+; SSE2-LABEL: test_ext_v64i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: movdqa %xmm3, %xmm10
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3],xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
+; SSE2-NEXT: movdqa %xmm2, %xmm11
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm8[8],xmm11[9],xmm8[9],xmm11[10],xmm8[10],xmm11[11],xmm8[11],xmm11[12],xmm8[12],xmm11[13],xmm8[13],xmm11[14],xmm8[14],xmm11[15],xmm8[15]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm12
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm8[8],xmm12[9],xmm8[9],xmm12[10],xmm8[10],xmm12[11],xmm8[11],xmm12[12],xmm8[12],xmm12[13],xmm8[13],xmm12[14],xmm8[14],xmm12[15],xmm8[15]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
+; SSE2-NEXT: movdqa %xmm0, %xmm13
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm8[8],xmm13[9],xmm8[9],xmm13[10],xmm8[10],xmm13[11],xmm8[11],xmm13[12],xmm8[12],xmm13[13],xmm8[13],xmm13[14],xmm8[14],xmm13[15],xmm8[15]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
+; SSE2-NEXT: movdqa %xmm7, %xmm9
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm8[8],xmm9[9],xmm8[9],xmm9[10],xmm8[10],xmm9[11],xmm8[11],xmm9[12],xmm8[12],xmm9[13],xmm8[13],xmm9[14],xmm8[14],xmm9[15],xmm8[15]
+; SSE2-NEXT: paddw %xmm10, %xmm9
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
+; SSE2-NEXT: paddw %xmm7, %xmm3
+; SSE2-NEXT: movdqa %xmm6, %xmm7
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm8[8],xmm7[9],xmm8[9],xmm7[10],xmm8[10],xmm7[11],xmm8[11],xmm7[12],xmm8[12],xmm7[13],xmm8[13],xmm7[14],xmm8[14],xmm7[15],xmm8[15]
+; SSE2-NEXT: paddw %xmm11, %xmm7
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
+; SSE2-NEXT: paddw %xmm6, %xmm2
+; SSE2-NEXT: movdqa %xmm5, %xmm6
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm8[8],xmm6[9],xmm8[9],xmm6[10],xmm8[10],xmm6[11],xmm8[11],xmm6[12],xmm8[12],xmm6[13],xmm8[13],xmm6[14],xmm8[14],xmm6[15],xmm8[15]
+; SSE2-NEXT: paddw %xmm12, %xmm6
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3],xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
+; SSE2-NEXT: paddw %xmm5, %xmm1
+; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm8[8],xmm5[9],xmm8[9],xmm5[10],xmm8[10],xmm5[11],xmm8[11],xmm5[12],xmm8[12],xmm5[13],xmm8[13],xmm5[14],xmm8[14],xmm5[15],xmm8[15]
+; SSE2-NEXT: paddw %xmm13, %xmm5
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3],xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
+; SSE2-NEXT: paddw %xmm4, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm9
+; SSE2-NEXT: psrlw $1, %xmm3
+; SSE2-NEXT: packuswb %xmm9, %xmm3
+; SSE2-NEXT: psrlw $1, %xmm7
+; SSE2-NEXT: psrlw $1, %xmm2
+; SSE2-NEXT: packuswb %xmm7, %xmm2
+; SSE2-NEXT: psrlw $1, %xmm6
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: packuswb %xmm6, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm5
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: packuswb %xmm5, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v64i8:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm3, %xmm9
+; SSE4-NEXT: movdqa %xmm2, %xmm10
+; SSE4-NEXT: movdqa %xmm1, %xmm11
+; SSE4-NEXT: movdqa %xmm0, %xmm8
+; SSE4-NEXT: pxor %xmm13, %xmm13
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; SSE4-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm13[8],xmm9[9],xmm13[9],xmm9[10],xmm13[10],xmm9[11],xmm13[11],xmm9[12],xmm13[12],xmm9[13],xmm13[13],xmm9[14],xmm13[14],xmm9[15],xmm13[15]
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm14 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm13[8],xmm10[9],xmm13[9],xmm10[10],xmm13[10],xmm10[11],xmm13[11],xmm10[12],xmm13[12],xmm10[13],xmm13[13],xmm10[14],xmm13[14],xmm10[15],xmm13[15]
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm15 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm13[8],xmm11[9],xmm13[9],xmm11[10],xmm13[10],xmm11[11],xmm13[11],xmm11[12],xmm13[12],xmm11[13],xmm13[13],xmm11[14],xmm13[14],xmm11[15],xmm13[15]
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm12 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero,xmm8[4],zero,xmm8[5],zero,xmm8[6],zero,xmm8[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm13[8],xmm8[9],xmm13[9],xmm8[10],xmm13[10],xmm8[11],xmm13[11],xmm8[12],xmm13[12],xmm8[13],xmm13[13],xmm8[14],xmm13[14],xmm8[15],xmm13[15]
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm13[8],xmm7[9],xmm13[9],xmm7[10],xmm13[10],xmm7[11],xmm13[11],xmm7[12],xmm13[12],xmm7[13],xmm13[13],xmm7[14],xmm13[14],xmm7[15],xmm13[15]
+; SSE4-NEXT: paddw %xmm9, %xmm7
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm13[8],xmm6[9],xmm13[9],xmm6[10],xmm13[10],xmm6[11],xmm13[11],xmm6[12],xmm13[12],xmm6[13],xmm13[13],xmm6[14],xmm13[14],xmm6[15],xmm13[15]
+; SSE4-NEXT: paddw %xmm10, %xmm6
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm13[8],xmm5[9],xmm13[9],xmm5[10],xmm13[10],xmm5[11],xmm13[11],xmm5[12],xmm13[12],xmm5[13],xmm13[13],xmm5[14],xmm13[14],xmm5[15],xmm13[15]
+; SSE4-NEXT: paddw %xmm11, %xmm5
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm13[8],xmm4[9],xmm13[9],xmm4[10],xmm13[10],xmm4[11],xmm13[11],xmm4[12],xmm13[12],xmm4[13],xmm13[13],xmm4[14],xmm13[14],xmm4[15],xmm13[15]
+; SSE4-NEXT: paddw %xmm8, %xmm4
+; SSE4-NEXT: paddw {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; SSE4-NEXT: paddw %xmm14, %xmm2
+; SSE4-NEXT: paddw %xmm15, %xmm1
+; SSE4-NEXT: paddw %xmm12, %xmm0
+; SSE4-NEXT: psrlw $1, %xmm7
+; SSE4-NEXT: psrlw $1, %xmm6
+; SSE4-NEXT: psrlw $1, %xmm5
+; SSE4-NEXT: psrlw $1, %xmm4
+; SSE4-NEXT: psrlw $1, %xmm3
+; SSE4-NEXT: packuswb %xmm7, %xmm3
+; SSE4-NEXT: psrlw $1, %xmm2
+; SSE4-NEXT: packuswb %xmm6, %xmm2
+; SSE4-NEXT: psrlw $1, %xmm1
+; SSE4-NEXT: packuswb %xmm5, %xmm1
+; SSE4-NEXT: psrlw $1, %xmm0
+; SSE4-NEXT: packuswb %xmm4, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v64i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm8 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm9
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm9[8],xmm4[8],xmm9[9],xmm4[9],xmm9[10],xmm4[10],xmm9[11],xmm4[11],xmm9[12],xmm4[12],xmm9[13],xmm4[13],xmm9[14],xmm4[14],xmm9[15],xmm4[15]
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm9 = xmm9[0],zero,xmm9[1],zero,xmm9[2],zero,xmm9[3],zero,xmm9[4],zero,xmm9[5],zero,xmm9[6],zero,xmm9[7],zero
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; AVX1-NEXT: vpaddw %xmm5, %xmm11, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm11
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm11[8],xmm4[8],xmm11[9],xmm4[9],xmm11[10],xmm4[10],xmm11[11],xmm4[11],xmm11[12],xmm4[12],xmm11[13],xmm4[13],xmm11[14],xmm4[14],xmm11[15],xmm4[15]
+; AVX1-NEXT: vpaddw %xmm7, %xmm12, %xmm7
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
+; AVX1-NEXT: vpaddw %xmm12, %xmm8, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm12
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm12[8],xmm4[8],xmm12[9],xmm4[9],xmm12[10],xmm4[10],xmm12[11],xmm4[11],xmm12[12],xmm4[12],xmm12[13],xmm4[13],xmm12[14],xmm4[14],xmm12[15],xmm4[15]
+; AVX1-NEXT: vpaddw %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm11[0],zero,xmm11[1],zero,xmm11[2],zero,xmm11[3],zero,xmm11[4],zero,xmm11[5],zero,xmm11[6],zero,xmm11[7],zero
+; AVX1-NEXT: vpaddw %xmm3, %xmm6, %xmm3
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm12[0],zero,xmm12[1],zero,xmm12[2],zero,xmm12[3],zero,xmm12[4],zero,xmm12[5],zero,xmm12[6],zero,xmm12[7],zero
+; AVX1-NEXT: vpaddw %xmm2, %xmm9, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm5, %xmm5
+; AVX1-NEXT: vpsrlw $1, %xmm7, %xmm6
+; AVX1-NEXT: vpsrlw $1, %xmm8, %xmm7
+; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm4
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpackuswb %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
+; AVX1-NEXT: vpackuswb %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm7, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v64i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm6
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero,xmm6[8],zero,xmm6[9],zero,xmm6[10],zero,xmm6[11],zero,xmm6[12],zero,xmm6[13],zero,xmm6[14],zero,xmm6[15],zero
+; AVX2-NEXT: vpaddw %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX2-NEXT: vpaddw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX2-NEXT: vpaddw %ymm3, %ymm5, %ymm3
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm4, %ymm2
+; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm3, %ymm2
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v64i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
+; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512-NEXT: vpaddw %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm2, %zmm1
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x0 = zext <64 x i8> %a0 to <64 x i16>
+ %x1 = zext <64 x i8> %a1 to <64 x i16>
+ %sum = add <64 x i16> %x0, %x1
+ %shift = lshr <64 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <64 x i16> %shift to <64 x i8>
+ ret <64 x i8> %res
+}
+
+define <32 x i16> @test_fixed_v32i16(<32 x i16> %a0, <32 x i16> %a1) {
+; SSE-LABEL: test_fixed_v32i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: pand %xmm7, %xmm8
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: pand %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: pand %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: pand %xmm4, %xmm11
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrlw $1, %xmm3
+; SSE-NEXT: paddw %xmm8, %xmm3
+; SSE-NEXT: psrlw $1, %xmm2
+; SSE-NEXT: paddw %xmm9, %xmm2
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: paddw %xmm10, %xmm1
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: paddw %xmm11, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v32i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpaddw %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpaddw %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpaddw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpaddw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v32i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddw %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddw %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v32i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddw %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %and = and <32 x i16> %a0, %a1
+ %xor = xor <32 x i16> %a1, %a0
+ %shift = lshr <32 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = add <32 x i16> %and, %shift
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_ext_v32i16(<32 x i16> %a0, <32 x i16> %a1) {
+; SSE2-LABEL: test_ext_v32i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: movdqa %xmm0, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm11
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm8[4],xmm11[5],xmm8[5],xmm11[6],xmm8[6],xmm11[7],xmm8[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3]
+; SSE2-NEXT: movdqa %xmm2, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm8[4],xmm12[5],xmm8[5],xmm12[6],xmm8[6],xmm12[7],xmm8[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3]
+; SSE2-NEXT: movdqa %xmm3, %xmm13
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm8[4],xmm13[5],xmm8[5],xmm13[6],xmm8[6],xmm13[7],xmm8[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; SSE2-NEXT: movdqa %xmm4, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
+; SSE2-NEXT: paddd %xmm9, %xmm10
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3]
+; SSE2-NEXT: paddd %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm5, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; SSE2-NEXT: paddd %xmm11, %xmm9
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3]
+; SSE2-NEXT: paddd %xmm5, %xmm1
+; SSE2-NEXT: movdqa %xmm6, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
+; SSE2-NEXT: paddd %xmm12, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3]
+; SSE2-NEXT: paddd %xmm6, %xmm2
+; SSE2-NEXT: movdqa %xmm7, %xmm4
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
+; SSE2-NEXT: paddd %xmm13, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; SSE2-NEXT: paddd %xmm7, %xmm3
+; SSE2-NEXT: pslld $15, %xmm10
+; SSE2-NEXT: psrad $16, %xmm10
+; SSE2-NEXT: pslld $15, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm10, %xmm0
+; SSE2-NEXT: pslld $15, %xmm9
+; SSE2-NEXT: psrad $16, %xmm9
+; SSE2-NEXT: pslld $15, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: packssdw %xmm9, %xmm1
+; SSE2-NEXT: pslld $15, %xmm5
+; SSE2-NEXT: psrad $16, %xmm5
+; SSE2-NEXT: pslld $15, %xmm2
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: packssdw %xmm5, %xmm2
+; SSE2-NEXT: pslld $15, %xmm4
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: pslld $15, %xmm3
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: packssdw %xmm4, %xmm3
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v32i16:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm3, %xmm9
+; SSE4-NEXT: movdqa %xmm2, %xmm10
+; SSE4-NEXT: movdqa %xmm1, %xmm11
+; SSE4-NEXT: movdqa %xmm0, %xmm8
+; SSE4-NEXT: pxor %xmm13, %xmm13
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; SSE4-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm13[4],xmm9[5],xmm13[5],xmm9[6],xmm13[6],xmm9[7],xmm13[7]
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm14 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm13[4],xmm10[5],xmm13[5],xmm10[6],xmm13[6],xmm10[7],xmm13[7]
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm15 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm13[4],xmm11[5],xmm13[5],xmm11[6],xmm13[6],xmm11[7],xmm13[7]
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm12 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm13[4],xmm8[5],xmm13[5],xmm8[6],xmm13[6],xmm8[7],xmm13[7]
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7]
+; SSE4-NEXT: paddd %xmm9, %xmm7
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
+; SSE4-NEXT: paddd %xmm10, %xmm6
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm13[4],xmm5[5],xmm13[5],xmm5[6],xmm13[6],xmm5[7],xmm13[7]
+; SSE4-NEXT: paddd %xmm11, %xmm5
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7]
+; SSE4-NEXT: paddd %xmm8, %xmm4
+; SSE4-NEXT: paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; SSE4-NEXT: paddd %xmm14, %xmm2
+; SSE4-NEXT: paddd %xmm15, %xmm1
+; SSE4-NEXT: paddd %xmm12, %xmm0
+; SSE4-NEXT: psrld $1, %xmm7
+; SSE4-NEXT: psrld $1, %xmm6
+; SSE4-NEXT: psrld $1, %xmm5
+; SSE4-NEXT: psrld $1, %xmm4
+; SSE4-NEXT: psrld $1, %xmm3
+; SSE4-NEXT: packusdw %xmm7, %xmm3
+; SSE4-NEXT: psrld $1, %xmm2
+; SSE4-NEXT: packusdw %xmm6, %xmm2
+; SSE4-NEXT: psrld $1, %xmm1
+; SSE4-NEXT: packusdw %xmm5, %xmm1
+; SSE4-NEXT: psrld $1, %xmm0
+; SSE4-NEXT: packusdw %xmm4, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v32i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm9
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm10 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm9 = xmm9[0],zero,xmm9[1],zero,xmm9[2],zero,xmm9[3],zero
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm11 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; AVX1-NEXT: vpaddd %xmm5, %xmm11, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm11
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
+; AVX1-NEXT: vpaddd %xmm7, %xmm12, %xmm7
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; AVX1-NEXT: vpaddd %xmm12, %xmm8, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm12
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
+; AVX1-NEXT: vpaddd %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm11[0],zero,xmm11[1],zero,xmm11[2],zero,xmm11[3],zero
+; AVX1-NEXT: vpaddd %xmm3, %xmm6, %xmm3
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm12[0],zero,xmm12[1],zero,xmm12[2],zero,xmm12[3],zero
+; AVX1-NEXT: vpaddd %xmm2, %xmm9, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm5, %xmm5
+; AVX1-NEXT: vpsrld $1, %xmm7, %xmm6
+; AVX1-NEXT: vpsrld $1, %xmm8, %xmm7
+; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
+; AVX1-NEXT: vpackusdw %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm7, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v32i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm6
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; AVX2-NEXT: vpaddd %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX2-NEXT: vpaddd %ymm3, %ymm5, %ymm3
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm4, %ymm2
+; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
+; AVX2-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm3, %ymm2
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpackusdw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v32i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
+; AVX512-NEXT: vpaddd %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm2, %zmm1
+; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x0 = zext <32 x i16> %a0 to <32 x i32>
+ %x1 = zext <32 x i16> %a1 to <32 x i32>
+ %sum = add <32 x i32> %x0, %x1
+ %shift = lshr <32 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <32 x i32> %shift to <32 x i16>
+ ret <32 x i16> %res
+}
+
+define <16 x i32> @test_fixed_v16i32(<16 x i32> %a0, <16 x i32> %a1) {
+; SSE-LABEL: test_fixed_v16i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: pand %xmm7, %xmm8
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: pand %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: pand %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: pand %xmm4, %xmm11
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrld $1, %xmm3
+; SSE-NEXT: paddd %xmm8, %xmm3
+; SSE-NEXT: psrld $1, %xmm2
+; SSE-NEXT: paddd %xmm9, %xmm2
+; SSE-NEXT: psrld $1, %xmm1
+; SSE-NEXT: paddd %xmm10, %xmm1
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: paddd %xmm11, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpaddd %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpaddd %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpaddd %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxord %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddd %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %and = and <16 x i32> %a0, %a1
+ %xor = xor <16 x i32> %a1, %a0
+ %shift = lshr <16 x i32> %xor, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = add <16 x i32> %and, %shift
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_ext_v16i32(<16 x i32> %a0, <16 x i32> %a1) {
+; SSE2-LABEL: test_ext_v16i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: movdqa %xmm3, %xmm10
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm10 = xmm10[2],xmm8[2],xmm10[3],xmm8[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1]
+; SSE2-NEXT: movdqa %xmm2, %xmm11
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm11 = xmm11[2],xmm8[2],xmm11[3],xmm8[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1]
+; SSE2-NEXT: movdqa %xmm1, %xmm12
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm12 = xmm12[2],xmm8[2],xmm12[3],xmm8[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
+; SSE2-NEXT: movdqa %xmm0, %xmm13
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm13 = xmm13[2],xmm8[2],xmm13[3],xmm8[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
+; SSE2-NEXT: movdqa %xmm7, %xmm9
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm8[2],xmm9[3],xmm8[3]
+; SSE2-NEXT: paddq %xmm10, %xmm9
+; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
+; SSE2-NEXT: paddq %xmm7, %xmm3
+; SSE2-NEXT: movdqa %xmm6, %xmm7
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; SSE2-NEXT: paddq %xmm11, %xmm7
+; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1]
+; SSE2-NEXT: paddq %xmm6, %xmm2
+; SSE2-NEXT: movdqa %xmm5, %xmm6
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm8[2],xmm6[3],xmm8[3]
+; SSE2-NEXT: paddq %xmm12, %xmm6
+; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1]
+; SSE2-NEXT: paddq %xmm5, %xmm1
+; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm8[2],xmm5[3],xmm8[3]
+; SSE2-NEXT: paddq %xmm13, %xmm5
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1]
+; SSE2-NEXT: paddq %xmm4, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm9
+; SSE2-NEXT: psrlq $1, %xmm3
+; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm9[0,2]
+; SSE2-NEXT: psrlq $1, %xmm7
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm7[0,2]
+; SSE2-NEXT: psrlq $1, %xmm6
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm6[0,2]
+; SSE2-NEXT: psrlq $1, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm3, %xmm9
+; SSE4-NEXT: movdqa %xmm2, %xmm10
+; SSE4-NEXT: movdqa %xmm1, %xmm11
+; SSE4-NEXT: movdqa %xmm0, %xmm8
+; SSE4-NEXT: pxor %xmm13, %xmm13
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm3[0],zero,xmm3[1],zero
+; SSE4-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm13[2],xmm9[3],xmm13[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm14 = xmm2[0],zero,xmm2[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm10 = xmm10[2],xmm13[2],xmm10[3],xmm13[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm15 = xmm1[0],zero,xmm1[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm11 = xmm11[2],xmm13[2],xmm11[3],xmm13[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm12 = xmm8[0],zero,xmm8[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm13[2],xmm8[3],xmm13[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm7[0],zero,xmm7[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm13[2],xmm7[3],xmm13[3]
+; SSE4-NEXT: paddq %xmm9, %xmm7
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm6[0],zero,xmm6[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm13[2],xmm6[3],xmm13[3]
+; SSE4-NEXT: paddq %xmm10, %xmm6
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm5[0],zero,xmm5[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm13[2],xmm5[3],xmm13[3]
+; SSE4-NEXT: paddq %xmm11, %xmm5
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm13[2],xmm4[3],xmm13[3]
+; SSE4-NEXT: paddq %xmm8, %xmm4
+; SSE4-NEXT: paddq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; SSE4-NEXT: paddq %xmm14, %xmm2
+; SSE4-NEXT: paddq %xmm15, %xmm1
+; SSE4-NEXT: paddq %xmm12, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm7
+; SSE4-NEXT: psrlq $1, %xmm6
+; SSE4-NEXT: psrlq $1, %xmm5
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: psrlq $1, %xmm3
+; SSE4-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm7[0,2]
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm6[0,2]
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm8
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm9 = xmm8[2],xmm5[2],xmm8[3],xmm5[3]
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm10 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm8 = xmm8[0],zero,xmm8[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm11
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm12 = xmm11[2],xmm5[2],xmm11[3],xmm5[3]
+; AVX1-NEXT: vpaddq %xmm6, %xmm12, %xmm6
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm12 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; AVX1-NEXT: vpaddq %xmm7, %xmm12, %xmm7
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm12
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm13 = xmm12[2],xmm5[2],xmm12[3],xmm5[3]
+; AVX1-NEXT: vpaddq %xmm13, %xmm9, %xmm9
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; AVX1-NEXT: vpaddq %xmm5, %xmm10, %xmm5
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm11[0],zero,xmm11[1],zero
+; AVX1-NEXT: vpaddq %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm12[0],zero,xmm12[1],zero
+; AVX1-NEXT: vpaddq %xmm3, %xmm8, %xmm3
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm6, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm7, %xmm6
+; AVX1-NEXT: vpsrlq $1, %xmm9, %xmm7
+; AVX1-NEXT: vpsrlq $1, %xmm5, %xmm5
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm5[0,2],ymm0[4,6],ymm5[4,6]
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm6, %ymm2
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm6
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX2-NEXT: vpaddq %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm6
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX2-NEXT: vpaddq %ymm6, %ymm5, %ymm5
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm5[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm1[2,3],ymm4[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero
+; AVX512-NEXT: vpaddq %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm2, %zmm1
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x0 = zext <16 x i32> %a0 to <16 x i64>
+ %x1 = zext <16 x i32> %a1 to <16 x i64>
+ %sum = add <16 x i64> %x0, %x1
+ %shift = lshr <16 x i64> %sum, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <16 x i64> %shift to <16 x i32>
+ ret <16 x i32> %res
+}
+
+define <8 x i64> @test_fixed_v8i64(<8 x i64> %a0, <8 x i64> %a1) {
+; SSE-LABEL: test_fixed_v8i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: pand %xmm7, %xmm8
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: pand %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: pand %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: pand %xmm4, %xmm11
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrlq $1, %xmm3
+; SSE-NEXT: paddq %xmm8, %xmm3
+; SSE-NEXT: psrlq $1, %xmm2
+; SSE-NEXT: paddq %xmm9, %xmm2
+; SSE-NEXT: psrlq $1, %xmm1
+; SSE-NEXT: paddq %xmm10, %xmm1
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: paddq %xmm11, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v8i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpaddq %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpaddq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpaddq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v8i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddq %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v8i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddq %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %and = and <8 x i64> %a0, %a1
+ %xor = xor <8 x i64> %a1, %a0
+ %shift = lshr <8 x i64> %xor, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = add <8 x i64> %and, %shift
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_ext_v8i64(<8 x i64> %a0, <8 x i64> %a1) {
+; SSE2-LABEL: test_ext_v8i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: pushq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: pushq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: pushq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 56
+; SSE2-NEXT: .cfi_offset %rbx, -56
+; SSE2-NEXT: .cfi_offset %r12, -48
+; SSE2-NEXT: .cfi_offset %r13, -40
+; SSE2-NEXT: .cfi_offset %r14, -32
+; SSE2-NEXT: .cfi_offset %r15, -24
+; SSE2-NEXT: .cfi_offset %rbp, -16
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3]
+; SSE2-NEXT: movq %xmm3, %rbx
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm3, %r12
+; SSE2-NEXT: movq %xmm2, %rbp
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %r13
+; SSE2-NEXT: movq %xmm1, %r15
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm1, %r14
+; SSE2-NEXT: movq %xmm0, %r11
+; SSE2-NEXT: movq %xmm7, %r10
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r9
+; SSE2-NEXT: movq %xmm6, %r8
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rdi
+; SSE2-NEXT: movq %xmm5, %rsi
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rdx
+; SSE2-NEXT: movq %xmm4, %rax
+; SSE2-NEXT: xorl %ecx, %ecx
+; SSE2-NEXT: addq %r11, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: setb %cl
+; SSE2-NEXT: xorl %r11d, %r11d
+; SSE2-NEXT: addq %r14, %rdx
+; SSE2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: setb %r11b
+; SSE2-NEXT: xorl %r14d, %r14d
+; SSE2-NEXT: addq %r15, %rsi
+; SSE2-NEXT: setb %r14b
+; SSE2-NEXT: xorl %r15d, %r15d
+; SSE2-NEXT: addq %r13, %rdi
+; SSE2-NEXT: setb %r15b
+; SSE2-NEXT: xorl %r13d, %r13d
+; SSE2-NEXT: addq %rbp, %r8
+; SSE2-NEXT: setb %r13b
+; SSE2-NEXT: xorl %ebp, %ebp
+; SSE2-NEXT: addq %r12, %r9
+; SSE2-NEXT: setb %bpl
+; SSE2-NEXT: xorl %r12d, %r12d
+; SSE2-NEXT: addq %rbx, %r10
+; SSE2-NEXT: movq %xmm8, %rdx
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
+; SSE2-NEXT: setb %r12b
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: xorl %ebx, %ebx
+; SSE2-NEXT: addq %rdx, %rax
+; SSE2-NEXT: setb %bl
+; SSE2-NEXT: shldq $63, %rax, %rbx
+; SSE2-NEXT: shldq $63, %r10, %r12
+; SSE2-NEXT: shldq $63, %r9, %rbp
+; SSE2-NEXT: shldq $63, %r8, %r13
+; SSE2-NEXT: shldq $63, %rdi, %r15
+; SSE2-NEXT: shldq $63, %rsi, %r14
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE2-NEXT: shldq $63, %rax, %r11
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE2-NEXT: shldq $63, %rax, %rcx
+; SSE2-NEXT: movq %rcx, %xmm0
+; SSE2-NEXT: movq %r11, %xmm4
+; SSE2-NEXT: movq %r14, %xmm1
+; SSE2-NEXT: movq %r15, %xmm5
+; SSE2-NEXT: movq %r13, %xmm2
+; SSE2-NEXT: movq %rbp, %xmm6
+; SSE2-NEXT: movq %r12, %xmm3
+; SSE2-NEXT: movq %rbx, %xmm7
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: popq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: popq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: popq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 8
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pushq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: pushq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: pushq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: pushq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: pushq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: pushq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 56
+; SSE4-NEXT: .cfi_offset %rbx, -56
+; SSE4-NEXT: .cfi_offset %r12, -48
+; SSE4-NEXT: .cfi_offset %r13, -40
+; SSE4-NEXT: .cfi_offset %r14, -32
+; SSE4-NEXT: .cfi_offset %r15, -24
+; SSE4-NEXT: .cfi_offset %rbp, -16
+; SSE4-NEXT: pextrq $1, %xmm3, %r14
+; SSE4-NEXT: movq %xmm2, %r13
+; SSE4-NEXT: pextrq $1, %xmm2, %rbp
+; SSE4-NEXT: movq %xmm1, %r12
+; SSE4-NEXT: pextrq $1, %xmm1, %r15
+; SSE4-NEXT: movq %xmm0, %rbx
+; SSE4-NEXT: pextrq $1, %xmm0, %r11
+; SSE4-NEXT: pextrq $1, %xmm7, %r10
+; SSE4-NEXT: movq %xmm6, %r9
+; SSE4-NEXT: pextrq $1, %xmm6, %r8
+; SSE4-NEXT: movq %xmm5, %rdi
+; SSE4-NEXT: pextrq $1, %xmm5, %rsi
+; SSE4-NEXT: movq %xmm4, %rdx
+; SSE4-NEXT: pextrq $1, %xmm4, %rax
+; SSE4-NEXT: xorl %ecx, %ecx
+; SSE4-NEXT: addq %r11, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: setb %cl
+; SSE4-NEXT: xorl %r11d, %r11d
+; SSE4-NEXT: addq %rbx, %rdx
+; SSE4-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: setb %r11b
+; SSE4-NEXT: xorl %ebx, %ebx
+; SSE4-NEXT: addq %r15, %rsi
+; SSE4-NEXT: setb %bl
+; SSE4-NEXT: xorl %r15d, %r15d
+; SSE4-NEXT: addq %r12, %rdi
+; SSE4-NEXT: setb %r15b
+; SSE4-NEXT: xorl %r12d, %r12d
+; SSE4-NEXT: addq %rbp, %r8
+; SSE4-NEXT: setb %r12b
+; SSE4-NEXT: xorl %ebp, %ebp
+; SSE4-NEXT: addq %r13, %r9
+; SSE4-NEXT: setb %bpl
+; SSE4-NEXT: xorl %r13d, %r13d
+; SSE4-NEXT: addq %r14, %r10
+; SSE4-NEXT: movq %xmm3, %rdx
+; SSE4-NEXT: setb %r13b
+; SSE4-NEXT: movq %xmm7, %rax
+; SSE4-NEXT: xorl %r14d, %r14d
+; SSE4-NEXT: addq %rdx, %rax
+; SSE4-NEXT: setb %r14b
+; SSE4-NEXT: shldq $63, %rax, %r14
+; SSE4-NEXT: shldq $63, %r10, %r13
+; SSE4-NEXT: shldq $63, %r9, %rbp
+; SSE4-NEXT: shldq $63, %r8, %r12
+; SSE4-NEXT: shldq $63, %rdi, %r15
+; SSE4-NEXT: shldq $63, %rsi, %rbx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE4-NEXT: shldq $63, %rax, %r11
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE4-NEXT: shldq $63, %rax, %rcx
+; SSE4-NEXT: movq %rcx, %xmm4
+; SSE4-NEXT: movq %r11, %xmm0
+; SSE4-NEXT: movq %rbx, %xmm5
+; SSE4-NEXT: movq %r15, %xmm1
+; SSE4-NEXT: movq %r12, %xmm6
+; SSE4-NEXT: movq %rbp, %xmm2
+; SSE4-NEXT: movq %r13, %xmm7
+; SSE4-NEXT: movq %r14, %xmm3
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
+; SSE4-NEXT: popq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: popq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: popq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: popq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: popq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: popq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 8
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: pushq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: pushq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 56
+; AVX1-NEXT: .cfi_offset %rbx, -56
+; AVX1-NEXT: .cfi_offset %r12, -48
+; AVX1-NEXT: .cfi_offset %r13, -40
+; AVX1-NEXT: .cfi_offset %r14, -32
+; AVX1-NEXT: .cfi_offset %r15, -24
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: vpextrq $1, %xmm1, %rbx
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vmovq %xmm4, %r15
+; AVX1-NEXT: vpextrq $1, %xmm4, %rbp
+; AVX1-NEXT: vmovq %xmm0, %r13
+; AVX1-NEXT: vpextrq $1, %xmm0, %r12
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %r14
+; AVX1-NEXT: vpextrq $1, %xmm0, %r11
+; AVX1-NEXT: vpextrq $1, %xmm3, %r10
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %r9
+; AVX1-NEXT: vpextrq $1, %xmm0, %r8
+; AVX1-NEXT: vmovq %xmm2, %rdi
+; AVX1-NEXT: vpextrq $1, %xmm2, %rsi
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %rdx
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: xorl %ecx, %ecx
+; AVX1-NEXT: addq %r11, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: setb %cl
+; AVX1-NEXT: xorl %r11d, %r11d
+; AVX1-NEXT: addq %r14, %rdx
+; AVX1-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: setb %r11b
+; AVX1-NEXT: xorl %r14d, %r14d
+; AVX1-NEXT: addq %r12, %rsi
+; AVX1-NEXT: setb %r14b
+; AVX1-NEXT: xorl %r12d, %r12d
+; AVX1-NEXT: addq %r13, %rdi
+; AVX1-NEXT: setb %r12b
+; AVX1-NEXT: xorl %r13d, %r13d
+; AVX1-NEXT: addq %rbp, %r8
+; AVX1-NEXT: setb %r13b
+; AVX1-NEXT: xorl %ebp, %ebp
+; AVX1-NEXT: addq %r15, %r9
+; AVX1-NEXT: setb %bpl
+; AVX1-NEXT: xorl %r15d, %r15d
+; AVX1-NEXT: addq %rbx, %r10
+; AVX1-NEXT: vmovq %xmm1, %rdx
+; AVX1-NEXT: setb %r15b
+; AVX1-NEXT: vmovq %xmm3, %rax
+; AVX1-NEXT: xorl %ebx, %ebx
+; AVX1-NEXT: addq %rdx, %rax
+; AVX1-NEXT: setb %bl
+; AVX1-NEXT: shldq $63, %rax, %rbx
+; AVX1-NEXT: shldq $63, %r10, %r15
+; AVX1-NEXT: shldq $63, %r9, %rbp
+; AVX1-NEXT: shldq $63, %r8, %r13
+; AVX1-NEXT: shldq $63, %rdi, %r12
+; AVX1-NEXT: shldq $63, %rsi, %r14
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX1-NEXT: shldq $63, %rax, %r11
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX1-NEXT: shldq $63, %rax, %rcx
+; AVX1-NEXT: vmovq %rcx, %xmm0
+; AVX1-NEXT: vmovq %r11, %xmm1
+; AVX1-NEXT: vmovq %r14, %xmm2
+; AVX1-NEXT: vmovq %r12, %xmm3
+; AVX1-NEXT: vmovq %r13, %xmm4
+; AVX1-NEXT: vmovq %rbp, %xmm5
+; AVX1-NEXT: vmovq %r15, %xmm6
+; AVX1-NEXT: vmovq %rbx, %xmm7
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: popq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 8
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 56
+; AVX2-NEXT: .cfi_offset %rbx, -56
+; AVX2-NEXT: .cfi_offset %r12, -48
+; AVX2-NEXT: .cfi_offset %r13, -40
+; AVX2-NEXT: .cfi_offset %r14, -32
+; AVX2-NEXT: .cfi_offset %r15, -24
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: vpextrq $1, %xmm1, %rbx
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX2-NEXT: vmovq %xmm4, %r15
+; AVX2-NEXT: vpextrq $1, %xmm4, %rbp
+; AVX2-NEXT: vmovq %xmm0, %r13
+; AVX2-NEXT: vpextrq $1, %xmm0, %r12
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %r14
+; AVX2-NEXT: vpextrq $1, %xmm0, %r11
+; AVX2-NEXT: vpextrq $1, %xmm3, %r10
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %r9
+; AVX2-NEXT: vpextrq $1, %xmm0, %r8
+; AVX2-NEXT: vmovq %xmm2, %rdi
+; AVX2-NEXT: vpextrq $1, %xmm2, %rsi
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %rdx
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: addq %r11, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: setb %cl
+; AVX2-NEXT: xorl %r11d, %r11d
+; AVX2-NEXT: addq %r14, %rdx
+; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: setb %r11b
+; AVX2-NEXT: xorl %r14d, %r14d
+; AVX2-NEXT: addq %r12, %rsi
+; AVX2-NEXT: setb %r14b
+; AVX2-NEXT: xorl %r12d, %r12d
+; AVX2-NEXT: addq %r13, %rdi
+; AVX2-NEXT: setb %r12b
+; AVX2-NEXT: xorl %r13d, %r13d
+; AVX2-NEXT: addq %rbp, %r8
+; AVX2-NEXT: setb %r13b
+; AVX2-NEXT: xorl %ebp, %ebp
+; AVX2-NEXT: addq %r15, %r9
+; AVX2-NEXT: setb %bpl
+; AVX2-NEXT: xorl %r15d, %r15d
+; AVX2-NEXT: addq %rbx, %r10
+; AVX2-NEXT: vmovq %xmm1, %rdx
+; AVX2-NEXT: setb %r15b
+; AVX2-NEXT: vmovq %xmm3, %rax
+; AVX2-NEXT: xorl %ebx, %ebx
+; AVX2-NEXT: addq %rdx, %rax
+; AVX2-NEXT: setb %bl
+; AVX2-NEXT: shldq $63, %rax, %rbx
+; AVX2-NEXT: shldq $63, %r10, %r15
+; AVX2-NEXT: shldq $63, %r9, %rbp
+; AVX2-NEXT: shldq $63, %r8, %r13
+; AVX2-NEXT: shldq $63, %rdi, %r12
+; AVX2-NEXT: shldq $63, %rsi, %r14
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX2-NEXT: shldq $63, %rax, %r11
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX2-NEXT: shldq $63, %rax, %rcx
+; AVX2-NEXT: vmovq %rcx, %xmm0
+; AVX2-NEXT: vmovq %r11, %xmm1
+; AVX2-NEXT: vmovq %r14, %xmm2
+; AVX2-NEXT: vmovq %r12, %xmm3
+; AVX2-NEXT: vmovq %r13, %xmm4
+; AVX2-NEXT: vmovq %rbp, %xmm5
+; AVX2-NEXT: vmovq %r15, %xmm6
+; AVX2-NEXT: vmovq %rbx, %xmm7
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 8
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 56
+; AVX512-NEXT: .cfi_offset %rbx, -56
+; AVX512-NEXT: .cfi_offset %r12, -48
+; AVX512-NEXT: .cfi_offset %r13, -40
+; AVX512-NEXT: .cfi_offset %r14, -32
+; AVX512-NEXT: .cfi_offset %r15, -24
+; AVX512-NEXT: .cfi_offset %rbp, -16
+; AVX512-NEXT: vpextrq $1, %xmm0, %r10
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512-NEXT: vpextrq $1, %xmm2, %r13
+; AVX512-NEXT: vmovq %xmm2, %r15
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512-NEXT: vmovq %xmm2, %rbp
+; AVX512-NEXT: vpextrq $1, %xmm2, %r12
+; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX512-NEXT: vmovq %xmm2, %r14
+; AVX512-NEXT: vpextrq $1, %xmm2, %rbx
+; AVX512-NEXT: vpextrq $1, %xmm1, %r9
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512-NEXT: vmovq %xmm2, %r8
+; AVX512-NEXT: vpextrq $1, %xmm2, %rdx
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512-NEXT: vpextrq $1, %xmm2, %rcx
+; AVX512-NEXT: vmovq %xmm2, %r11
+; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX512-NEXT: vmovq %xmm2, %rdi
+; AVX512-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512-NEXT: xorl %esi, %esi
+; AVX512-NEXT: addq %rbx, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: setb %sil
+; AVX512-NEXT: xorl %ebx, %ebx
+; AVX512-NEXT: addq %r14, %rdi
+; AVX512-NEXT: setb %bl
+; AVX512-NEXT: xorl %r14d, %r14d
+; AVX512-NEXT: addq %r12, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: setb %r14b
+; AVX512-NEXT: xorl %r12d, %r12d
+; AVX512-NEXT: addq %rbp, %r11
+; AVX512-NEXT: setb %r12b
+; AVX512-NEXT: xorl %ebp, %ebp
+; AVX512-NEXT: addq %r13, %rdx
+; AVX512-NEXT: setb %bpl
+; AVX512-NEXT: xorl %r13d, %r13d
+; AVX512-NEXT: addq %r15, %r8
+; AVX512-NEXT: setb %r13b
+; AVX512-NEXT: xorl %r15d, %r15d
+; AVX512-NEXT: addq %r10, %r9
+; AVX512-NEXT: vmovq %xmm0, %rcx
+; AVX512-NEXT: setb %r15b
+; AVX512-NEXT: vmovq %xmm1, %rax
+; AVX512-NEXT: xorl %r10d, %r10d
+; AVX512-NEXT: addq %rcx, %rax
+; AVX512-NEXT: setb %r10b
+; AVX512-NEXT: shldq $63, %rax, %r10
+; AVX512-NEXT: shldq $63, %r9, %r15
+; AVX512-NEXT: shldq $63, %r8, %r13
+; AVX512-NEXT: shldq $63, %rdx, %rbp
+; AVX512-NEXT: shldq $63, %r11, %r12
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX512-NEXT: shldq $63, %rax, %r14
+; AVX512-NEXT: shldq $63, %rdi, %rbx
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX512-NEXT: shldq $63, %rax, %rsi
+; AVX512-NEXT: vmovq %rsi, %xmm0
+; AVX512-NEXT: vmovq %rbx, %xmm1
+; AVX512-NEXT: vmovq %r14, %xmm2
+; AVX512-NEXT: vmovq %r12, %xmm3
+; AVX512-NEXT: vmovq %rbp, %xmm4
+; AVX512-NEXT: vmovq %r13, %xmm5
+; AVX512-NEXT: vmovq %r15, %xmm6
+; AVX512-NEXT: vmovq %r10, %xmm7
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 8
+; AVX512-NEXT: retq
+ %x0 = zext <8 x i64> %a0 to <8 x i128>
+ %x1 = zext <8 x i64> %a1 to <8 x i128>
+ %sum = add <8 x i128> %x0, %x1
+ %shift = lshr <8 x i128> %sum, <i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1>
+ %res = trunc <8 x i128> %shift to <8 x i64>
+ ret <8 x i64> %res
+}
+
diff --git a/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll b/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll
index 34ef23d..234c7a0a 100644
--- a/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll
+++ b/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll
@@ -553,8 +553,8 @@ define i8 @v8i32_or_select(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
-; AVX1-NEXT: vorps %ymm2, %ymm3, %ymm1
-; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vorps %ymm0, %ymm3, %ymm0
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0
; AVX1-NEXT: vmovmskps %ymm0, %eax
; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: vzeroupper
@@ -571,8 +571,8 @@ define i8 @v8i32_or_select(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32
; AVX2-NEXT: vpcmpeqd %ymm2, %ymm0, %ymm2
; AVX2-NEXT: .LBB7_3:
; AVX2-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm1
-; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpor %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vmovmskps %ymm0, %eax
; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/callbr-asm-kill.mir b/llvm/test/CodeGen/X86/callbr-asm-kill.mir
index 86c58c4..0dded37 100644
--- a/llvm/test/CodeGen/X86/callbr-asm-kill.mir
+++ b/llvm/test/CodeGen/X86/callbr-asm-kill.mir
@@ -45,6 +45,7 @@ liveins:
- { reg: '$rsi', virtual-reg: '%3' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/CodeGen/X86/cmov.ll b/llvm/test/CodeGen/X86/cmov.ll
index 374e759..a8c068f 100644
--- a/llvm/test/CodeGen/X86/cmov.ll
+++ b/llvm/test/CodeGen/X86/cmov.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown -disable-cgp-select2branch -x86-cmov-converter=false | FileCheck %s
+; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown -disable-cgp-select2branch -x86-cmov-converter=false -mattr=+ndd --show-mc-encoding | FileCheck %s --check-prefix=NDD
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
define i32 @test1(i32 %x, i32 %n, i32 %w, ptr %vp) nounwind readnone {
@@ -9,6 +10,13 @@ define i32 @test1(i32 %x, i32 %n, i32 %w, ptr %vp) nounwind readnone {
; CHECK-NEXT: movl $12, %eax
; CHECK-NEXT: cmovael (%rcx), %eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: test1:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: btl %esi, %edi # encoding: [0x0f,0xa3,0xf7]
+; NDD-NEXT: movl $12, %eax # encoding: [0xb8,0x0c,0x00,0x00,0x00]
+; NDD-NEXT: cmovael (%rcx), %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0x01]
+; NDD-NEXT: retq # encoding: [0xc3]
entry:
%0 = lshr i32 %x, %n
%1 = and i32 %0, 1
@@ -25,6 +33,13 @@ define i32 @test2(i32 %x, i32 %n, i32 %w, ptr %vp) nounwind readnone {
; CHECK-NEXT: movl $12, %eax
; CHECK-NEXT: cmovbl (%rcx), %eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: test2:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: btl %esi, %edi # encoding: [0x0f,0xa3,0xf7]
+; NDD-NEXT: movl $12, %eax # encoding: [0xb8,0x0c,0x00,0x00,0x00]
+; NDD-NEXT: cmovbl (%rcx), %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0x01]
+; NDD-NEXT: retq # encoding: [0xc3]
entry:
%0 = lshr i32 %x, %n
%1 = and i32 %0, 1
@@ -50,6 +65,16 @@ define void @test3(i64 %a, i64 %b, i1 %p) nounwind {
; CHECK-NEXT: callq bar@PLT
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: test3:
+; NDD: # %bb.0:
+; NDD-NEXT: pushq %rax # encoding: [0x50]
+; NDD-NEXT: testb $1, %dl # encoding: [0xf6,0xc2,0x01]
+; NDD-NEXT: cmovel %esi, %edi # EVEX TO LEGACY Compression encoding: [0x0f,0x44,0xfe]
+; NDD-NEXT: callq bar@PLT # encoding: [0xe8,A,A,A,A]
+; NDD-NEXT: # fixup A - offset: 1, value: bar@PLT-4, kind: FK_PCRel_4
+; NDD-NEXT: popq %rax # encoding: [0x58]
+; NDD-NEXT: retq # encoding: [0xc3]
%c = trunc i64 %a to i32
%d = trunc i64 %b to i32
%e = select i1 %p, i32 %c, i32 %d
@@ -114,6 +139,54 @@ define i1 @test4() nounwind {
; CHECK-NEXT: movl %ebx, %eax
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: retq
+;
+; NDD-LABEL: test4:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: movsbl g_3(%rip), %eax # encoding: [0x0f,0xbe,0x05,A,A,A,A]
+; NDD-NEXT: # fixup A - offset: 3, value: g_3-4, kind: reloc_riprel_4byte
+; NDD-NEXT: movzbl %al, %ecx # encoding: [0x0f,0xb6,0xc8]
+; NDD-NEXT: shrl $7, %ecx # EVEX TO LEGACY Compression encoding: [0xc1,0xe9,0x07]
+; NDD-NEXT: xorb $1, %cl # EVEX TO LEGACY Compression encoding: [0x80,0xf1,0x01]
+; NDD-NEXT: sarl %cl, %eax, %ecx # encoding: [0x62,0xf4,0x74,0x18,0xd3,0xf8]
+; NDD-NEXT: movzbl g_96(%rip), %eax # encoding: [0x0f,0xb6,0x05,A,A,A,A]
+; NDD-NEXT: # fixup A - offset: 3, value: g_96-4, kind: reloc_riprel_4byte
+; NDD-NEXT: testb %al, %al # encoding: [0x84,0xc0]
+; NDD-NEXT: je .LBB3_2 # encoding: [0x74,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB3_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %bb.i.i.i
+; NDD-NEXT: movzbl g_100(%rip), %edx # encoding: [0x0f,0xb6,0x15,A,A,A,A]
+; NDD-NEXT: # fixup A - offset: 3, value: g_100-4, kind: reloc_riprel_4byte
+; NDD-NEXT: .LBB3_2: # %func_4.exit.i
+; NDD-NEXT: pushq %rbx # encoding: [0x53]
+; NDD-NEXT: xorl %edx, %edx # encoding: [0x31,0xd2]
+; NDD-NEXT: testb %cl, %cl # encoding: [0x84,0xc9]
+; NDD-NEXT: setne %bl # encoding: [0x0f,0x95,0xc3]
+; NDD-NEXT: movzbl %al, %ecx # encoding: [0x0f,0xb6,0xc8]
+; NDD-NEXT: cmovnel %edx, %ecx # EVEX TO LEGACY Compression encoding: [0x0f,0x45,0xca]
+; NDD-NEXT: testb %al, %al # encoding: [0x84,0xc0]
+; NDD-NEXT: je .LBB3_5 # encoding: [0x74,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB3_5-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.3: # %func_4.exit.i
+; NDD-NEXT: testb %bl, %bl # encoding: [0x84,0xdb]
+; NDD-NEXT: jne .LBB3_5 # encoding: [0x75,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB3_5-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.4: # %bb.i.i
+; NDD-NEXT: movzbl g_100(%rip), %ecx # encoding: [0x0f,0xb6,0x0d,A,A,A,A]
+; NDD-NEXT: # fixup A - offset: 3, value: g_100-4, kind: reloc_riprel_4byte
+; NDD-NEXT: xorl %ebx, %ebx # encoding: [0x31,0xdb]
+; NDD-NEXT: movl %eax, %ecx # encoding: [0x89,0xc1]
+; NDD-NEXT: .LBB3_5: # %func_1.exit
+; NDD-NEXT: movb %cl, g_96(%rip) # encoding: [0x88,0x0d,A,A,A,A]
+; NDD-NEXT: # fixup A - offset: 2, value: g_96-4, kind: reloc_riprel_4byte
+; NDD-NEXT: movzbl %cl, %esi # encoding: [0x0f,0xb6,0xf1]
+; NDD-NEXT: movl $_2E_str, %edi # encoding: [0xbf,A,A,A,A]
+; NDD-NEXT: # fixup A - offset: 1, value: _2E_str, kind: FK_Data_4
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: callq printf@PLT # encoding: [0xe8,A,A,A,A]
+; NDD-NEXT: # fixup A - offset: 1, value: printf@PLT-4, kind: FK_PCRel_4
+; NDD-NEXT: movl %ebx, %eax # encoding: [0x89,0xd8]
+; NDD-NEXT: popq %rbx # encoding: [0x5b]
+; NDD-NEXT: retq # encoding: [0xc3]
entry:
%0 = load i8, ptr @g_3, align 1
%1 = sext i8 %0 to i32
@@ -163,6 +236,14 @@ define i32 @test5(ptr nocapture %P) nounwind readonly {
; CHECK-NEXT: setge %al
; CHECK-NEXT: orl $-2, %eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: test5:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: cmpl $42, (%rdi) # encoding: [0x83,0x3f,0x2a]
+; NDD-NEXT: setge %al # encoding: [0x0f,0x9d,0xc0]
+; NDD-NEXT: orl $-2, %eax # EVEX TO LEGACY Compression encoding: [0x83,0xc8,0xfe]
+; NDD-NEXT: retq # encoding: [0xc3]
entry:
%0 = load i32, ptr %P, align 4
%1 = icmp sgt i32 %0, 41
@@ -178,6 +259,14 @@ define i32 @test6(ptr nocapture %P) nounwind readonly {
; CHECK-NEXT: setl %al
; CHECK-NEXT: leal 4(%rax,%rax,8), %eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: test6:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: cmpl $42, (%rdi) # encoding: [0x83,0x3f,0x2a]
+; NDD-NEXT: setl %al # encoding: [0x0f,0x9c,0xc0]
+; NDD-NEXT: leal 4(%rax,%rax,8), %eax # encoding: [0x8d,0x44,0xc0,0x04]
+; NDD-NEXT: retq # encoding: [0xc3]
entry:
%0 = load i32, ptr %P, align 4
%1 = icmp sgt i32 %0, 41
@@ -194,6 +283,13 @@ define i8 @test7(i1 inreg %c, i8 inreg %a, i8 inreg %b) nounwind {
; CHECK-NEXT: cmovel %edx, %eax
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: test7:
+; NDD: # %bb.0:
+; NDD-NEXT: testb $1, %dil # encoding: [0x40,0xf6,0xc7,0x01]
+; NDD-NEXT: cmovnel %esi, %edx, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x45,0xd6]
+; NDD-NEXT: # kill: def $al killed $al killed $eax
+; NDD-NEXT: retq # encoding: [0xc3]
%d = select i1 %c, i8 %a, i8 %b
ret i8 %d
}
@@ -205,6 +301,13 @@ define i64 @test8(i64 %0, i64 %1, i64 %2) {
; CHECK-NEXT: cmpq $-2147483648, %rdi # imm = 0x80000000
; CHECK-NEXT: cmovlq %rdx, %rax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: test8:
+; NDD: # %bb.0:
+; NDD-NEXT: cmpq $-2147483648, %rdi # encoding: [0x48,0x81,0xff,0x00,0x00,0x00,0x80]
+; NDD-NEXT: # imm = 0x80000000
+; NDD-NEXT: cmovgeq %rsi, %rdx, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x4d,0xd6]
+; NDD-NEXT: retq # encoding: [0xc3]
%4 = icmp sgt i64 %0, -2147483649
%5 = select i1 %4, i64 %1, i64 %2
ret i64 %5
@@ -218,6 +321,14 @@ define i32 @smin(i32 %x) {
; CHECK-NEXT: movl $-1, %eax
; CHECK-NEXT: cmovnsl %edi, %eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: smin:
+; NDD: # %bb.0:
+; NDD-NEXT: notl %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0xf7,0xd7]
+; NDD-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; NDD-NEXT: movl $-1, %ecx # encoding: [0xb9,0xff,0xff,0xff,0xff]
+; NDD-NEXT: cmovsl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x48,0xc1]
+; NDD-NEXT: retq # encoding: [0xc3]
%not_x = xor i32 %x, -1
%1 = icmp slt i32 %not_x, -1
%sel = select i1 %1, i32 %not_x, i32 -1
@@ -231,6 +342,13 @@ define i32 @pr47049_1(i32 %0) {
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: cmovlel %edi, %eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: pr47049_1:
+; NDD: # %bb.0:
+; NDD-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; NDD-NEXT: movl $1, %eax # encoding: [0xb8,0x01,0x00,0x00,0x00]
+; NDD-NEXT: cmovlel %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x4e,0xc7]
+; NDD-NEXT: retq # encoding: [0xc3]
%2 = icmp slt i32 %0, 1
%3 = select i1 %2, i32 %0, i32 1
ret i32 %3
@@ -243,6 +361,13 @@ define i32 @pr47049_2(i32 %0) {
; CHECK-NEXT: movl $-1, %eax
; CHECK-NEXT: cmovnsl %edi, %eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: pr47049_2:
+; NDD: # %bb.0:
+; NDD-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; NDD-NEXT: movl $-1, %eax # encoding: [0xb8,0xff,0xff,0xff,0xff]
+; NDD-NEXT: cmovnsl %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x49,0xc7]
+; NDD-NEXT: retq # encoding: [0xc3]
%2 = icmp sgt i32 %0, -1
%3 = select i1 %2, i32 %0, i32 -1
ret i32 %3
@@ -255,6 +380,13 @@ define i32 @pr47049_3(i32 %0) {
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: cmovgl %edi, %eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: pr47049_3:
+; NDD: # %bb.0:
+; NDD-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; NDD-NEXT: movl $1, %eax # encoding: [0xb8,0x01,0x00,0x00,0x00]
+; NDD-NEXT: cmovgl %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x4f,0xc7]
+; NDD-NEXT: retq # encoding: [0xc3]
%2 = icmp sgt i32 %0, 1
%3 = select i1 %2, i32 %0, i32 1
ret i32 %3
@@ -267,6 +399,13 @@ define i32 @pr47049_4(i32 %0) {
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: cmovnel %edi, %eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: pr47049_4:
+; NDD: # %bb.0:
+; NDD-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; NDD-NEXT: movl $1, %eax # encoding: [0xb8,0x01,0x00,0x00,0x00]
+; NDD-NEXT: cmovnel %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x45,0xc7]
+; NDD-NEXT: retq # encoding: [0xc3]
%2 = icmp ugt i32 %0, 1
%3 = select i1 %2, i32 %0, i32 1
ret i32 %3
diff --git a/llvm/test/CodeGen/X86/cmp.ll b/llvm/test/CodeGen/X86/cmp.ll
index cd1953b..30e52f0 100644
--- a/llvm/test/CodeGen/X86/cmp.ll
+++ b/llvm/test/CodeGen/X86/cmp.ll
@@ -416,9 +416,8 @@ define i32 @test13(i32 %mask, i32 %base, i32 %intra) {
;
; NDD-LABEL: test13:
; NDD: # %bb.0:
-; NDD-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
; NDD-NEXT: testb $8, %dil # encoding: [0x40,0xf6,0xc7,0x08]
-; NDD-NEXT: cmovnel %edx, %eax # encoding: [0x0f,0x45,0xc2]
+; NDD-NEXT: cmovnel %edx, %esi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x45,0xf2]
; NDD-NEXT: retq # encoding: [0xc3]
%and = and i32 %mask, 8
%tobool = icmp ne i32 %and, 0
@@ -436,9 +435,8 @@ define i32 @test14(i32 %mask, i32 %base, i32 %intra) {
;
; NDD-LABEL: test14:
; NDD: # %bb.0:
-; NDD-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
-; NDD-NEXT: shrl $7, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0xc1,0xef,0x07]
-; NDD-NEXT: cmovnsl %edx, %eax # encoding: [0x0f,0x49,0xc2]
+; NDD-NEXT: shrl $7, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0xc1,0xef,0x07]
+; NDD-NEXT: cmovnsl %edx, %esi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x49,0xf2]
; NDD-NEXT: retq # encoding: [0xc3]
%s = lshr i32 %mask, 7
%tobool = icmp sgt i32 %s, -1
@@ -1100,9 +1098,8 @@ define { i64, i64 } @pr39968(i64, i64, i32) {
; NDD: # %bb.0:
; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; NDD-NEXT: testb $64, %dl # encoding: [0xf6,0xc2,0x40]
-; NDD-NEXT: cmovneq %rdi, %rsi # encoding: [0x48,0x0f,0x45,0xf7]
-; NDD-NEXT: cmovneq %rdi, %rax # encoding: [0x48,0x0f,0x45,0xc7]
-; NDD-NEXT: movq %rsi, %rdx # encoding: [0x48,0x89,0xf2]
+; NDD-NEXT: cmovneq %rdi, %rsi, %rdx # encoding: [0x62,0xf4,0xec,0x18,0x45,0xf7]
+; NDD-NEXT: cmovneq %rdi, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x45,0xc7]
; NDD-NEXT: retq # encoding: [0xc3]
%4 = and i32 %2, 64
%5 = icmp ne i32 %4, 0
diff --git a/llvm/test/CodeGen/X86/combine-pavg.ll b/llvm/test/CodeGen/X86/combine-pavg.ll
index 0743592..7a8ddf5 100644
--- a/llvm/test/CodeGen/X86/combine-pavg.ll
+++ b/llvm/test/CodeGen/X86/combine-pavg.ll
@@ -18,6 +18,22 @@ define <16 x i8> @combine_pavgb_self(<16 x i8> %a0) {
ret <16 x i8> %1
}
+define <16 x i8> @combine_pavgb_zero(<16 x i8> %a0) {
+; SSE-LABEL: combine_pavgb_zero:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pavgb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_pavgb_zero:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> zeroinitializer, <16 x i8> %a0)
+ ret <16 x i8> %1
+}
+
define <16 x i8> @combine_pavgw_knownbits(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8 x i16> %a3) {
; SSE-LABEL: combine_pavgw_knownbits:
; SSE: # %bb.0:
@@ -64,3 +80,33 @@ define <16 x i8> @combine_pavgw_knownbits(<8 x i16> %a0, <8 x i16> %a1, <8 x i16
%trunc = trunc <16 x i16> %shuffle to <16 x i8>
ret <16 x i8> %trunc
}
+
+define <8 x i16> @combine_pavgw_demandedelts(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: combine_pavgw_demandedelts:
+; SSE: # %bb.0:
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,8,9,8,9,12,13,12,13]
+; SSE-NEXT: pavgw %xmm1, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: combine_pavgw_demandedelts:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,8,9,8,9,12,13,12,13]
+; AVX1-NEXT: vpavgw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: combine_pavgw_demandedelts:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
+; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
+; AVX2-NEXT: vpavgw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+ %s0 = shufflevector <8 x i16> %a0, <8 x i16> poison, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
+ %avg = tail call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %s0, <8 x i16> %a1)
+ %shuffle = shufflevector <8 x i16> %avg, <8 x i16> poison, <8 x i32> zeroinitializer
+ ret <8 x i16> %shuffle
+}
+
diff --git a/llvm/test/CodeGen/X86/combine-sra.ll b/llvm/test/CodeGen/X86/combine-sra.ll
index 0675ced..7eee418 100644
--- a/llvm/test/CodeGen/X86/combine-sra.ll
+++ b/llvm/test/CodeGen/X86/combine-sra.ll
@@ -521,3 +521,276 @@ define <4 x i32> @combine_vec_ashr_positive_splat(<4 x i32> %x, <4 x i32> %y) {
%2 = ashr <4 x i32> %1, <i32 10, i32 10, i32 10, i32 10>
ret <4 x i32> %2
}
+
+define <8 x i16> @combine_vec8i16_ashr_clamped(<8 x i16> %x, <8 x i16> %y) {
+; SSE2-LABEL: combine_vec8i16_ashr_clamped:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psubusw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE2-NEXT: psubw %xmm2, %xmm1
+; SSE2-NEXT: psllw $12, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psraw $15, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm0, %xmm3
+; SSE2-NEXT: psraw $8, %xmm0
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: paddw %xmm1, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psraw $15, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm0, %xmm3
+; SSE2-NEXT: psraw $4, %xmm0
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: paddw %xmm1, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psraw $15, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm0, %xmm3
+; SSE2-NEXT: psraw $2, %xmm0
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: paddw %xmm1, %xmm1
+; SSE2-NEXT: psraw $15, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pandn %xmm0, %xmm2
+; SSE2-NEXT: psraw $1, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec8i16_ashr_clamped:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pminuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psllw $12, %xmm0
+; SSE41-NEXT: psllw $4, %xmm1
+; SSE41-NEXT: por %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: paddw %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: psraw $8, %xmm3
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: psraw $4, %xmm3
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: psraw $2, %xmm3
+; SSE41-NEXT: paddw %xmm1, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: psraw $1, %xmm3
+; SSE41-NEXT: paddw %xmm1, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX2-LABEL: combine_vec8i16_ashr_clamped:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpminuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: combine_vec8i16_ashr_clamped:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsravw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %1 = tail call <8 x i16> @llvm.umin.v8i16(<8 x i16> %y, <8 x i16> <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>)
+ %2 = ashr <8 x i16> %x, %1
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @combine_vec4i32_ashr_clamped(<4 x i32> %x, <4 x i32> %y) {
+; SSE2-LABEL: combine_vec4i32_ashr_clamped:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: pxor %xmm1, %xmm2
+; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm1, %xmm3
+; SSE2-NEXT: psrld $27, %xmm2
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[2,3,3,3,4,5,6,7]
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: psrad %xmm1, %xmm3
+; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,1,1,1,4,5,6,7]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrad %xmm4, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7]
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: psrad %xmm3, %xmm4
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7]
+; SSE2-NEXT: psrad %xmm2, %xmm0
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3]
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec4i32_ashr_clamped:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: psrad %xmm2, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: psrad %xmm4, %xmm5
+; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: psrad %xmm1, %xmm3
+; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
+; SSE41-NEXT: psrad %xmm1, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: combine_vec4i32_ashr_clamped:
+; AVX: # %bb.0:
+; AVX-NEXT: vpsravd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %y, <4 x i32> <i32 31, i32 31, i32 31, i32 31>)
+ %2 = ashr <4 x i32> %x, %1
+ ret <4 x i32> %2
+}
+
+define <4 x i64> @combine_vec4i64_ashr_clamped(<4 x i64> %x, <4 x i64> %y) {
+; SSE2-LABEL: combine_vec4i64_ashr_clamped:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pxor %xmm5, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483711,2147483711,2147483711,2147483711]
+; SSE2-NEXT: movdqa %xmm7, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm8
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: pcmpeqd %xmm5, %xmm4
+; SSE2-NEXT: pand %xmm8, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [63,63]
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: pandn %xmm6, %xmm4
+; SSE2-NEXT: por %xmm3, %xmm4
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pxor %xmm5, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpgtd %xmm8, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: pcmpeqd %xmm5, %xmm3
+; SSE2-NEXT: pand %xmm7, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pandn %xmm6, %xmm3
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; SSE2-NEXT: movdqa %xmm2, %xmm5
+; SSE2-NEXT: psrlq %xmm3, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[2,3,2,3]
+; SSE2-NEXT: movdqa %xmm2, %xmm7
+; SSE2-NEXT: psrlq %xmm6, %xmm7
+; SSE2-NEXT: movsd {{.*#+}} xmm7 = xmm5[0],xmm7[1]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: psrlq %xmm3, %xmm5
+; SSE2-NEXT: psrlq %xmm6, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm5[0],xmm0[1]
+; SSE2-NEXT: xorpd %xmm7, %xmm0
+; SSE2-NEXT: psubq %xmm7, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: psrlq %xmm4, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
+; SSE2-NEXT: psrlq %xmm5, %xmm2
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: psrlq %xmm4, %xmm3
+; SSE2-NEXT: psrlq %xmm5, %xmm1
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm3[0],xmm1[1]
+; SSE2-NEXT: xorpd %xmm2, %xmm1
+; SSE2-NEXT: psubq %xmm2, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec4i64_ashr_clamped:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm4
+; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: pxor %xmm7, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259519,9223372039002259519]
+; SSE41-NEXT: movdqa %xmm8, %xmm6
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [2147483711,2147483711,2147483711,2147483711]
+; SSE41-NEXT: movdqa %xmm5, %xmm0
+; SSE41-NEXT: pcmpgtd %xmm9, %xmm0
+; SSE41-NEXT: pand %xmm6, %xmm0
+; SSE41-NEXT: movapd {{.*#+}} xmm9 = [63,63]
+; SSE41-NEXT: movapd %xmm9, %xmm6
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm6
+; SSE41-NEXT: pxor %xmm2, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm7, %xmm8
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,0,2,2]
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
+; SSE41-NEXT: pand %xmm8, %xmm5
+; SSE41-NEXT: movdqa %xmm5, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm9
+; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: psrlq %xmm9, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm9[2,3,2,3]
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: psrlq %xmm3, %xmm5
+; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm2[0,1,2,3],xmm5[4,5,6,7]
+; SSE41-NEXT: movdqa %xmm4, %xmm2
+; SSE41-NEXT: psrlq %xmm9, %xmm2
+; SSE41-NEXT: psrlq %xmm3, %xmm4
+; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0,1,2,3],xmm4[4,5,6,7]
+; SSE41-NEXT: pxor %xmm5, %xmm4
+; SSE41-NEXT: psubq %xmm5, %xmm4
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: psrlq %xmm6, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
+; SSE41-NEXT: psrlq %xmm3, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psrlq %xmm6, %xmm2
+; SSE41-NEXT: psrlq %xmm3, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: psubq %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm4, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX2-LABEL: combine_vec4i64_ashr_clamped:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [9223372036854775870,9223372036854775870,9223372036854775870,9223372036854775870]
+; AVX2-NEXT: vpcmpgtq %ymm4, %ymm3, %ymm3
+; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm4 = [63,63,63,63]
+; AVX2-NEXT: vblendvpd %ymm3, %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlvq %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: combine_vec4i64_ashr_clamped:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsravq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %1 = tail call <4 x i64> @llvm.umin.v4i64(<4 x i64> %y, <4 x i64> <i64 63, i64 63, i64 63, i64 63>)
+ %2 = ashr <4 x i64> %x, %1
+ ret <4 x i64> %2
+}
diff --git a/llvm/test/CodeGen/X86/dagcombine-shifts.ll b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
index 42b325d..734abfe 100644
--- a/llvm/test/CodeGen/X86/dagcombine-shifts.ll
+++ b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
@@ -322,5 +322,132 @@ define void @g(i32 %a) nounwind {
ret void
}
+define i32 @shift_zext_shl(i8 zeroext %x) {
+; X86-LABEL: shift_zext_shl:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl $64, %eax
+; X86-NEXT: shll $9, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shift_zext_shl:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $64, %eax
+; X64-NEXT: shll $9, %eax
+; X64-NEXT: retq
+ %a = and i8 %x, 64
+ %b = zext i8 %a to i16
+ %c = shl i16 %b, 9
+ %d = zext i16 %c to i32
+ ret i32 %d
+}
+
+define i32 @shift_zext_shl2(i8 zeroext %x) {
+; X86-LABEL: shift_zext_shl2:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl $64, %eax
+; X86-NEXT: shll $9, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shift_zext_shl2:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $64, %eax
+; X64-NEXT: shll $9, %eax
+; X64-NEXT: retq
+ %a = and i8 %x, 64
+ %b = zext i8 %a to i32
+ %c = shl i32 %b, 9
+ ret i32 %c
+}
+
+define <4 x i32> @shift_zext_shl_vec(<4 x i8> %x) nounwind {
+; X86-LABEL: shift_zext_shl_vec:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: andl $64, %ecx
+; X86-NEXT: shll $9, %ecx
+; X86-NEXT: andl $63, %edx
+; X86-NEXT: shll $8, %edx
+; X86-NEXT: andl $31, %esi
+; X86-NEXT: shll $7, %esi
+; X86-NEXT: andl $23, %edi
+; X86-NEXT: shll $6, %edi
+; X86-NEXT: movl %edi, 12(%eax)
+; X86-NEXT: movl %esi, 8(%eax)
+; X86-NEXT: movl %edx, 4(%eax)
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
+;
+; X64-LABEL: shift_zext_shl_vec:
+; X64: # %bb.0:
+; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: pxor %xmm1, %xmm1
+; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X64-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X64-NEXT: retq
+ %a = and <4 x i8> %x, <i8 64, i8 63, i8 31, i8 23>
+ %b = zext <4 x i8> %a to <4 x i16>
+ %c = shl <4 x i16> %b, <i16 9, i16 8, i16 7, i16 6>
+ %d = zext <4 x i16> %c to <4 x i32>
+ ret <4 x i32> %d
+}
+
+define <4 x i32> @shift_zext_shl2_vec(<4 x i8> %x) nounwind {
+; X86-LABEL: shift_zext_shl2_vec:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: andl $23, %edi
+; X86-NEXT: andl $31, %esi
+; X86-NEXT: andl $63, %edx
+; X86-NEXT: andl $64, %ecx
+; X86-NEXT: shll $9, %ecx
+; X86-NEXT: shll $8, %edx
+; X86-NEXT: shll $7, %esi
+; X86-NEXT: shll $6, %edi
+; X86-NEXT: movl %edi, 12(%eax)
+; X86-NEXT: movl %esi, 8(%eax)
+; X86-NEXT: movl %edx, 4(%eax)
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
+;
+; X64-LABEL: shift_zext_shl2_vec:
+; X64: # %bb.0:
+; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: pxor %xmm1, %xmm1
+; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X64-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X64-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X64-NEXT: retq
+ %a = and <4 x i8> %x, <i8 64, i8 63, i8 31, i8 23>
+ %b = zext <4 x i8> %a to <4 x i32>
+ %c = shl <4 x i32> %b, <i32 9, i32 8, i32 7, i32 6>
+ ret <4 x i32> %c
+}
+
declare dso_local void @f(i64)
diff --git a/llvm/test/CodeGen/X86/extractelement-load.ll b/llvm/test/CodeGen/X86/extractelement-load.ll
index 9d573ef..022b25a 100644
--- a/llvm/test/CodeGen/X86/extractelement-load.ll
+++ b/llvm/test/CodeGen/X86/extractelement-load.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32-SSE2
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86-SSE2
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=X64,X64-SSSE3
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX1
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX2
@@ -7,23 +7,16 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define i32 @t(ptr %val) nounwind {
-; X32-SSE2-LABEL: t:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[2,3,2,3]
-; X32-SSE2-NEXT: movd %xmm0, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: t:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl 8(%eax), %eax
+; X86-SSE2-NEXT: retl
;
-; X64-SSSE3-LABEL: t:
-; X64-SSSE3: # %bb.0:
-; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = mem[2,3,2,3]
-; X64-SSSE3-NEXT: movd %xmm0, %eax
-; X64-SSSE3-NEXT: retq
-;
-; X64-AVX-LABEL: t:
-; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: movl 8(%rdi), %eax
-; X64-AVX-NEXT: retq
+; X64-LABEL: t:
+; X64: # %bb.0:
+; X64-NEXT: movl 8(%rdi), %eax
+; X64-NEXT: retq
%tmp2 = load <2 x i64>, ptr %val, align 16 ; <<2 x i64>> [#uses=1]
%tmp3 = bitcast <2 x i64> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp4 = extractelement <4 x i32> %tmp3, i32 2 ; <i32> [#uses=1]
@@ -33,9 +26,9 @@ define i32 @t(ptr %val) nounwind {
; Case where extractelement of load ends up as undef.
; (Making sure this doesn't crash.)
define i32 @t2(ptr %xp) {
-; X32-SSE2-LABEL: t2:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: t2:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: retl
;
; X64-LABEL: t2:
; X64: # %bb.0:
@@ -51,12 +44,12 @@ define i32 @t2(ptr %xp) {
; narrow load.
define void @t3(ptr %a0) {
-; X32-SSE2-LABEL: t3:
-; X32-SSE2: # %bb.0: # %bb
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movups (%eax), %xmm0
-; X32-SSE2-NEXT: movhps %xmm0, (%eax)
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: t3:
+; X86-SSE2: # %bb.0: # %bb
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movups (%eax), %xmm0
+; X86-SSE2-NEXT: movhps %xmm0, (%eax)
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: t3:
; X64-SSSE3: # %bb.0: # %bb
@@ -81,14 +74,12 @@ bb:
; This is testing for an assertion - the extraction was assuming that the undef
; second shuffle operand was a post-bitcast type instead of a pre-bitcast type.
define i64 @t4(ptr %a) {
-; X32-SSE2-LABEL: t4:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movdqa (%eax), %xmm0
-; X32-SSE2-NEXT: movd %xmm0, %eax
-; X32-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X32-SSE2-NEXT: movd %xmm0, %edx
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: t4:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl (%ecx), %eax
+; X86-SSE2-NEXT: movl 4(%ecx), %edx
+; X86-SSE2-NEXT: retl
;
; X64-LABEL: t4:
; X64: # %bb.0:
@@ -103,13 +94,13 @@ define i64 @t4(ptr %a) {
; Don't extract from a volatile.
define void @t5(ptr%a0, ptr%a1) {
-; X32-SSE2-LABEL: t5:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movaps (%ecx), %xmm0
-; X32-SSE2-NEXT: movhps %xmm0, (%eax)
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: t5:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movaps (%ecx), %xmm0
+; X86-SSE2-NEXT: movhps %xmm0, (%eax)
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: t5:
; X64-SSSE3: # %bb.0:
@@ -130,24 +121,24 @@ define void @t5(ptr%a0, ptr%a1) {
; Check for multiuse.
define float @t6(ptr%a0) {
-; X32-SSE2-LABEL: t6:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: pushl %eax
-; X32-SSE2-NEXT: .cfi_def_cfa_offset 8
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movaps (%eax), %xmm0
-; X32-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X32-SSE2-NEXT: xorps %xmm1, %xmm1
-; X32-SSE2-NEXT: cmpeqss %xmm0, %xmm1
-; X32-SSE2-NEXT: movss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
-; X32-SSE2-NEXT: andps %xmm1, %xmm2
-; X32-SSE2-NEXT: andnps %xmm0, %xmm1
-; X32-SSE2-NEXT: orps %xmm2, %xmm1
-; X32-SSE2-NEXT: movss %xmm1, (%esp)
-; X32-SSE2-NEXT: flds (%esp)
-; X32-SSE2-NEXT: popl %eax
-; X32-SSE2-NEXT: .cfi_def_cfa_offset 4
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: t6:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %eax
+; X86-SSE2-NEXT: .cfi_def_cfa_offset 8
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movaps (%eax), %xmm0
+; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-SSE2-NEXT: xorps %xmm1, %xmm1
+; X86-SSE2-NEXT: cmpeqss %xmm0, %xmm1
+; X86-SSE2-NEXT: movss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-SSE2-NEXT: andps %xmm1, %xmm2
+; X86-SSE2-NEXT: andnps %xmm0, %xmm1
+; X86-SSE2-NEXT: orps %xmm2, %xmm1
+; X86-SSE2-NEXT: movss %xmm1, (%esp)
+; X86-SSE2-NEXT: flds (%esp)
+; X86-SSE2-NEXT: popl %eax
+; X86-SSE2-NEXT: .cfi_def_cfa_offset 4
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: t6:
; X64-SSSE3: # %bb.0:
@@ -184,20 +175,20 @@ define float @t6(ptr%a0) {
}
define void @PR43971(ptr%a0, ptr%a1) {
-; X32-SSE2-LABEL: PR43971:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movaps 16(%ecx), %xmm0
-; X32-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; X32-SSE2-NEXT: xorps %xmm1, %xmm1
-; X32-SSE2-NEXT: cmpltss %xmm0, %xmm1
-; X32-SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X32-SSE2-NEXT: andps %xmm1, %xmm2
-; X32-SSE2-NEXT: andnps %xmm0, %xmm1
-; X32-SSE2-NEXT: orps %xmm2, %xmm1
-; X32-SSE2-NEXT: movss %xmm1, (%eax)
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: PR43971:
+; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movaps 16(%ecx), %xmm0
+; X86-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; X86-SSE2-NEXT: xorps %xmm1, %xmm1
+; X86-SSE2-NEXT: cmpltss %xmm0, %xmm1
+; X86-SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: andps %xmm1, %xmm2
+; X86-SSE2-NEXT: andnps %xmm0, %xmm1
+; X86-SSE2-NEXT: orps %xmm2, %xmm1
+; X86-SSE2-NEXT: movss %xmm1, (%eax)
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: PR43971:
; X64-SSSE3: # %bb.0: # %entry
@@ -231,22 +222,22 @@ entry:
}
define float @PR43971_1(ptr%a0) nounwind {
-; X32-SSE2-LABEL: PR43971_1:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: pushl %eax
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movaps (%eax), %xmm0
-; X32-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X32-SSE2-NEXT: xorps %xmm1, %xmm1
-; X32-SSE2-NEXT: cmpeqss %xmm0, %xmm1
-; X32-SSE2-NEXT: movss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
-; X32-SSE2-NEXT: andps %xmm1, %xmm2
-; X32-SSE2-NEXT: andnps %xmm0, %xmm1
-; X32-SSE2-NEXT: orps %xmm2, %xmm1
-; X32-SSE2-NEXT: movss %xmm1, (%esp)
-; X32-SSE2-NEXT: flds (%esp)
-; X32-SSE2-NEXT: popl %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: PR43971_1:
+; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-NEXT: pushl %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movaps (%eax), %xmm0
+; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-SSE2-NEXT: xorps %xmm1, %xmm1
+; X86-SSE2-NEXT: cmpeqss %xmm0, %xmm1
+; X86-SSE2-NEXT: movss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-SSE2-NEXT: andps %xmm1, %xmm2
+; X86-SSE2-NEXT: andnps %xmm0, %xmm1
+; X86-SSE2-NEXT: orps %xmm2, %xmm1
+; X86-SSE2-NEXT: movss %xmm1, (%esp)
+; X86-SSE2-NEXT: flds (%esp)
+; X86-SSE2-NEXT: popl %eax
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: PR43971_1:
; X64-SSSE3: # %bb.0: # %entry
@@ -283,17 +274,48 @@ entry:
ret float %cond
}
+define i32 @PR85419(ptr %p0) {
+; X86-SSE2-LABEL: PR85419:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl (%ecx), %edx
+; X86-SSE2-NEXT: xorl %eax, %eax
+; X86-SSE2-NEXT: orl 4(%ecx), %edx
+; X86-SSE2-NEXT: je .LBB8_2
+; X86-SSE2-NEXT: # %bb.1:
+; X86-SSE2-NEXT: movl 8(%ecx), %eax
+; X86-SSE2-NEXT: .LBB8_2:
+; X86-SSE2-NEXT: retl
+;
+; X64-LABEL: PR85419:
+; X64: # %bb.0:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq $0, (%rdi)
+; X64-NEXT: je .LBB8_2
+; X64-NEXT: # %bb.1:
+; X64-NEXT: movl 8(%rdi), %eax
+; X64-NEXT: .LBB8_2:
+; X64-NEXT: retq
+ %load = load <2 x i64>, ptr %p0, align 16
+ %vecext.i = extractelement <2 x i64> %load, i64 0
+ %cmp = icmp eq i64 %vecext.i, 0
+ %.cast = bitcast <2 x i64> %load to <4 x i32>
+ %vecext.i2 = extractelement <4 x i32> %.cast, i64 2
+ %retval.0 = select i1 %cmp, i32 0, i32 %vecext.i2
+ ret i32 %retval.0
+}
+
; Test for bad extractions from a VBROADCAST_LOAD of the <2 x i16> non-uniform constant bitcast as <4 x i32>.
define void @subextract_broadcast_load_constant(ptr nocapture %0, ptr nocapture %1, ptr nocapture %2) nounwind {
-; X32-SSE2-LABEL: subextract_broadcast_load_constant:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-SSE2-NEXT: movl $-1583308898, (%edx) # imm = 0xA1A09F9E
-; X32-SSE2-NEXT: movw $-24674, (%ecx) # imm = 0x9F9E
-; X32-SSE2-NEXT: movw $-24160, (%eax) # imm = 0xA1A0
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: subextract_broadcast_load_constant:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SSE2-NEXT: movl $-1583308898, (%edx) # imm = 0xA1A09F9E
+; X86-SSE2-NEXT: movw $-24674, (%ecx) # imm = 0x9F9E
+; X86-SSE2-NEXT: movw $-24160, (%eax) # imm = 0xA1A0
+; X86-SSE2-NEXT: retl
;
; X64-LABEL: subextract_broadcast_load_constant:
; X64: # %bb.0:
@@ -319,15 +341,15 @@ define void @subextract_broadcast_load_constant(ptr nocapture %0, ptr nocapture
; A scalar load is favored over a XMM->GPR register transfer in this example.
define i32 @multi_use_load_scalarization(ptr %p) nounwind {
-; X32-SSE2-LABEL: multi_use_load_scalarization:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movl (%ecx), %eax
-; X32-SSE2-NEXT: movdqu (%ecx), %xmm0
-; X32-SSE2-NEXT: pcmpeqd %xmm1, %xmm1
-; X32-SSE2-NEXT: psubd %xmm1, %xmm0
-; X32-SSE2-NEXT: movdqa %xmm0, (%ecx)
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: multi_use_load_scalarization:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl (%ecx), %eax
+; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
+; X86-SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; X86-SSE2-NEXT: psubd %xmm1, %xmm0
+; X86-SSE2-NEXT: movdqa %xmm0, (%ecx)
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: multi_use_load_scalarization:
; X64-SSSE3: # %bb.0:
@@ -354,15 +376,15 @@ define i32 @multi_use_load_scalarization(ptr %p) nounwind {
}
define i32 @multi_use_volatile_load_scalarization(ptr %p) nounwind {
-; X32-SSE2-LABEL: multi_use_volatile_load_scalarization:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movdqu (%ecx), %xmm0
-; X32-SSE2-NEXT: pcmpeqd %xmm1, %xmm1
-; X32-SSE2-NEXT: movd %xmm0, %eax
-; X32-SSE2-NEXT: psubd %xmm1, %xmm0
-; X32-SSE2-NEXT: movdqa %xmm0, (%ecx)
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: multi_use_volatile_load_scalarization:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
+; X86-SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; X86-SSE2-NEXT: movd %xmm0, %eax
+; X86-SSE2-NEXT: psubd %xmm1, %xmm0
+; X86-SSE2-NEXT: movdqa %xmm0, (%ecx)
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: multi_use_volatile_load_scalarization:
; X64-SSSE3: # %bb.0:
@@ -398,41 +420,41 @@ define i32 @multi_use_volatile_load_scalarization(ptr %p) nounwind {
@zero = internal unnamed_addr global <8 x i32> zeroinitializer, align 32
define i32 @main() nounwind {
-; X32-SSE2-LABEL: main:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: pushl %ebp
-; X32-SSE2-NEXT: movl %esp, %ebp
-; X32-SSE2-NEXT: pushl %esi
-; X32-SSE2-NEXT: andl $-32, %esp
-; X32-SSE2-NEXT: subl $64, %esp
-; X32-SSE2-NEXT: movdqa zero, %xmm0
-; X32-SSE2-NEXT: movaps n1+16, %xmm1
-; X32-SSE2-NEXT: movaps n1, %xmm2
-; X32-SSE2-NEXT: movaps %xmm2, zero
-; X32-SSE2-NEXT: movaps %xmm1, zero+16
-; X32-SSE2-NEXT: movaps {{.*#+}} xmm1 = [2,2,2,2]
-; X32-SSE2-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
-; X32-SSE2-NEXT: movaps %xmm1, (%esp)
-; X32-SSE2-NEXT: movdqa (%esp), %xmm1
-; X32-SSE2-NEXT: movaps {{[0-9]+}}(%esp), %xmm2
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X32-SSE2-NEXT: movd %xmm2, %eax
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; X32-SSE2-NEXT: movd %xmm2, %ecx
-; X32-SSE2-NEXT: xorl %edx, %edx
-; X32-SSE2-NEXT: divl %ecx
-; X32-SSE2-NEXT: movl %eax, %ecx
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X32-SSE2-NEXT: movd %xmm0, %eax
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; X32-SSE2-NEXT: movd %xmm0, %esi
-; X32-SSE2-NEXT: xorl %edx, %edx
-; X32-SSE2-NEXT: divl %esi
-; X32-SSE2-NEXT: addl %ecx, %eax
-; X32-SSE2-NEXT: leal -4(%ebp), %esp
-; X32-SSE2-NEXT: popl %esi
-; X32-SSE2-NEXT: popl %ebp
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: main:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: pushl %edi
+; X86-SSE2-NEXT: pushl %esi
+; X86-SSE2-NEXT: andl $-32, %esp
+; X86-SSE2-NEXT: subl $64, %esp
+; X86-SSE2-NEXT: movaps n1+16, %xmm0
+; X86-SSE2-NEXT: movaps n1, %xmm1
+; X86-SSE2-NEXT: movl zero+4, %ecx
+; X86-SSE2-NEXT: movl zero+8, %eax
+; X86-SSE2-NEXT: movaps %xmm1, zero
+; X86-SSE2-NEXT: movaps %xmm0, zero+16
+; X86-SSE2-NEXT: movaps {{.*#+}} xmm0 = [2,2,2,2]
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movaps %xmm0, (%esp)
+; X86-SSE2-NEXT: movdqa (%esp), %xmm0
+; X86-SSE2-NEXT: movaps {{[0-9]+}}(%esp), %xmm1
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; X86-SSE2-NEXT: movd %xmm1, %esi
+; X86-SSE2-NEXT: xorl %edx, %edx
+; X86-SSE2-NEXT: divl %esi
+; X86-SSE2-NEXT: movl %eax, %esi
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-SSE2-NEXT: movd %xmm0, %edi
+; X86-SSE2-NEXT: movl %ecx, %eax
+; X86-SSE2-NEXT: xorl %edx, %edx
+; X86-SSE2-NEXT: divl %edi
+; X86-SSE2-NEXT: addl %esi, %eax
+; X86-SSE2-NEXT: leal -8(%ebp), %esp
+; X86-SSE2-NEXT: popl %esi
+; X86-SSE2-NEXT: popl %edi
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: main:
; X64-SSSE3: # %bb.0:
@@ -440,31 +462,29 @@ define i32 @main() nounwind {
; X64-SSSE3-NEXT: movq %rsp, %rbp
; X64-SSSE3-NEXT: andq $-32, %rsp
; X64-SSSE3-NEXT: subq $64, %rsp
-; X64-SSSE3-NEXT: movdqa zero(%rip), %xmm0
; X64-SSSE3-NEXT: movq n1@GOTPCREL(%rip), %rax
-; X64-SSSE3-NEXT: movaps (%rax), %xmm1
-; X64-SSSE3-NEXT: movaps 16(%rax), %xmm2
-; X64-SSSE3-NEXT: movaps %xmm1, zero(%rip)
-; X64-SSSE3-NEXT: movaps %xmm2, zero+16(%rip)
-; X64-SSSE3-NEXT: movaps {{.*#+}} xmm1 = [2,2,2,2]
-; X64-SSSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
-; X64-SSSE3-NEXT: movaps %xmm1, (%rsp)
-; X64-SSSE3-NEXT: movdqa (%rsp), %xmm1
-; X64-SSSE3-NEXT: movaps {{[0-9]+}}(%rsp), %xmm2
-; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X64-SSSE3-NEXT: movd %xmm2, %eax
-; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; X64-SSSE3-NEXT: movd %xmm2, %ecx
+; X64-SSSE3-NEXT: movaps (%rax), %xmm0
+; X64-SSSE3-NEXT: movaps 16(%rax), %xmm1
+; X64-SSSE3-NEXT: movl zero+4(%rip), %ecx
+; X64-SSSE3-NEXT: movl zero+8(%rip), %eax
+; X64-SSSE3-NEXT: movaps %xmm0, zero(%rip)
+; X64-SSSE3-NEXT: movaps %xmm1, zero+16(%rip)
+; X64-SSSE3-NEXT: movaps {{.*#+}} xmm0 = [2,2,2,2]
+; X64-SSSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; X64-SSSE3-NEXT: movaps %xmm0, (%rsp)
+; X64-SSSE3-NEXT: movdqa (%rsp), %xmm0
+; X64-SSSE3-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1
+; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; X64-SSSE3-NEXT: movd %xmm1, %esi
; X64-SSSE3-NEXT: xorl %edx, %edx
-; X64-SSSE3-NEXT: divl %ecx
-; X64-SSSE3-NEXT: movl %eax, %ecx
+; X64-SSSE3-NEXT: divl %esi
+; X64-SSSE3-NEXT: movl %eax, %esi
; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X64-SSSE3-NEXT: movd %xmm0, %eax
-; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; X64-SSSE3-NEXT: movd %xmm0, %esi
+; X64-SSSE3-NEXT: movd %xmm0, %edi
+; X64-SSSE3-NEXT: movl %ecx, %eax
; X64-SSSE3-NEXT: xorl %edx, %edx
-; X64-SSSE3-NEXT: divl %esi
-; X64-SSSE3-NEXT: addl %ecx, %eax
+; X64-SSSE3-NEXT: divl %edi
+; X64-SSSE3-NEXT: addl %esi, %eax
; X64-SSSE3-NEXT: movq %rbp, %rsp
; X64-SSSE3-NEXT: popq %rbp
; X64-SSSE3-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir b/llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir
index 56cbe3f..37a90a2 100644
--- a/llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir
+++ b/llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir
@@ -119,6 +119,7 @@
name: foo
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
stack:
- { id: 0, name: a.addr, size: 4, alignment: 4, debug-info-variable: '!11',
diff --git a/llvm/test/CodeGen/X86/heap-alloc-markers.mir b/llvm/test/CodeGen/X86/heap-alloc-markers.mir
index 0bf8365..6e0dc50 100644
--- a/llvm/test/CodeGen/X86/heap-alloc-markers.mir
+++ b/llvm/test/CodeGen/X86/heap-alloc-markers.mir
@@ -34,6 +34,7 @@ name: test
# CHECK-LABEL: {{^}}test:
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0.entry:
diff --git a/llvm/test/CodeGen/X86/huge-stack-offset.ll b/llvm/test/CodeGen/X86/huge-stack-offset.ll
index 68dcfa7..e825328 100644
--- a/llvm/test/CodeGen/X86/huge-stack-offset.ll
+++ b/llvm/test/CodeGen/X86/huge-stack-offset.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=x86_64-linux-unknown | FileCheck %s --check-prefix=CHECK-64
-; RUN: llc < %s -mtriple=i386-linux-unknown | FileCheck %s --check-prefix=CHECK-32
+; RUN: llc < %s -mtriple=x86_64-linux-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-64
+; RUN: llc < %s -mtriple=i386-linux-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-32
; Test that a large stack offset uses a single add/sub instruction to
; adjust the stack pointer.
diff --git a/llvm/test/CodeGen/X86/huge-stack-offset2.ll b/llvm/test/CodeGen/X86/huge-stack-offset2.ll
index 3bf0260..053643eb 100644
--- a/llvm/test/CodeGen/X86/huge-stack-offset2.ll
+++ b/llvm/test/CodeGen/X86/huge-stack-offset2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefix=CHECK
+; RUN: llc < %s -mtriple=x86_64-linux -verify-machineinstrs | FileCheck %s --check-prefix=CHECK
; Test how we handle pathologically large stack frames when RAX is live through
; the prologue and epilogue.
diff --git a/llvm/test/CodeGen/X86/insertelement-var-index.ll b/llvm/test/CodeGen/X86/insertelement-var-index.ll
index 8ed8495..5420e6b 100644
--- a/llvm/test/CodeGen/X86/insertelement-var-index.ll
+++ b/llvm/test/CodeGen/X86/insertelement-var-index.ll
@@ -1009,18 +1009,19 @@ define <2 x i64> @arg_i64_v2i64(<2 x i64> %v, i64 %x, i32 %y) nounwind {
; X86AVX2-NEXT: pushl %esi
; X86AVX2-NEXT: andl $-16, %esp
; X86AVX2-NEXT: subl $48, %esp
-; X86AVX2-NEXT: movl 8(%ebp), %eax
-; X86AVX2-NEXT: movl 12(%ebp), %ecx
-; X86AVX2-NEXT: movl 16(%ebp), %edx
+; X86AVX2-NEXT: movl 8(%ebp), %edx
+; X86AVX2-NEXT: movl 12(%ebp), %eax
+; X86AVX2-NEXT: movl 16(%ebp), %ecx
; X86AVX2-NEXT: vmovaps %xmm0, (%esp)
-; X86AVX2-NEXT: leal (%edx,%edx), %esi
+; X86AVX2-NEXT: addl %ecx, %ecx
+; X86AVX2-NEXT: movl %ecx, %esi
; X86AVX2-NEXT: andl $3, %esi
-; X86AVX2-NEXT: movl %eax, (%esp,%esi,4)
+; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %xmm0
; X86AVX2-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT: leal 1(%edx,%edx), %eax
-; X86AVX2-NEXT: andl $3, %eax
-; X86AVX2-NEXT: movl %ecx, 16(%esp,%eax,4)
+; X86AVX2-NEXT: incl %ecx
+; X86AVX2-NEXT: andl $3, %ecx
+; X86AVX2-NEXT: movl %eax, 16(%esp,%ecx,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %xmm0
; X86AVX2-NEXT: leal -4(%ebp), %esp
; X86AVX2-NEXT: popl %esi
@@ -1362,12 +1363,13 @@ define <2 x i64> @load_i64_v2i64(<2 x i64> %v, ptr %p, i32 %y) nounwind {
; X86AVX2-NEXT: movl (%ecx), %edx
; X86AVX2-NEXT: movl 4(%ecx), %ecx
; X86AVX2-NEXT: vmovaps %xmm0, (%esp)
-; X86AVX2-NEXT: leal (%eax,%eax), %esi
+; X86AVX2-NEXT: addl %eax, %eax
+; X86AVX2-NEXT: movl %eax, %esi
; X86AVX2-NEXT: andl $3, %esi
; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %xmm0
; X86AVX2-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT: leal 1(%eax,%eax), %eax
+; X86AVX2-NEXT: incl %eax
; X86AVX2-NEXT: andl $3, %eax
; X86AVX2-NEXT: movl %ecx, 16(%esp,%eax,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %xmm0
@@ -1742,18 +1744,19 @@ define <4 x i64> @arg_i64_v4i64(<4 x i64> %v, i64 %x, i32 %y) nounwind {
; X86AVX2-NEXT: pushl %esi
; X86AVX2-NEXT: andl $-32, %esp
; X86AVX2-NEXT: subl $96, %esp
-; X86AVX2-NEXT: movl 8(%ebp), %eax
-; X86AVX2-NEXT: movl 12(%ebp), %ecx
-; X86AVX2-NEXT: movl 16(%ebp), %edx
+; X86AVX2-NEXT: movl 8(%ebp), %edx
+; X86AVX2-NEXT: movl 12(%ebp), %eax
+; X86AVX2-NEXT: movl 16(%ebp), %ecx
; X86AVX2-NEXT: vmovaps %ymm0, (%esp)
-; X86AVX2-NEXT: leal (%edx,%edx), %esi
+; X86AVX2-NEXT: addl %ecx, %ecx
+; X86AVX2-NEXT: movl %ecx, %esi
; X86AVX2-NEXT: andl $7, %esi
-; X86AVX2-NEXT: movl %eax, (%esp,%esi,4)
+; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %ymm0
; X86AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT: leal 1(%edx,%edx), %eax
-; X86AVX2-NEXT: andl $7, %eax
-; X86AVX2-NEXT: movl %ecx, 32(%esp,%eax,4)
+; X86AVX2-NEXT: incl %ecx
+; X86AVX2-NEXT: andl $7, %ecx
+; X86AVX2-NEXT: movl %eax, 32(%esp,%ecx,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %ymm0
; X86AVX2-NEXT: leal -4(%ebp), %esp
; X86AVX2-NEXT: popl %esi
@@ -2128,12 +2131,13 @@ define <4 x i64> @load_i64_v4i64(<4 x i64> %v, ptr %p, i32 %y) nounwind {
; X86AVX2-NEXT: movl (%ecx), %edx
; X86AVX2-NEXT: movl 4(%ecx), %ecx
; X86AVX2-NEXT: vmovaps %ymm0, (%esp)
-; X86AVX2-NEXT: leal (%eax,%eax), %esi
+; X86AVX2-NEXT: addl %eax, %eax
+; X86AVX2-NEXT: movl %eax, %esi
; X86AVX2-NEXT: andl $7, %esi
; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %ymm0
; X86AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT: leal 1(%eax,%eax), %eax
+; X86AVX2-NEXT: incl %eax
; X86AVX2-NEXT: andl $7, %eax
; X86AVX2-NEXT: movl %ecx, 32(%esp,%eax,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %ymm0
diff --git a/llvm/test/CodeGen/X86/instr-symbols.mir b/llvm/test/CodeGen/X86/instr-symbols.mir
index a900288..7af6ca8 100644
--- a/llvm/test/CodeGen/X86/instr-symbols.mir
+++ b/llvm/test/CodeGen/X86/instr-symbols.mir
@@ -23,6 +23,7 @@ name: test
# CHECK-LABEL: {{^}}test:
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0.entry:
diff --git a/llvm/test/CodeGen/X86/int-to-fp-demanded.ll b/llvm/test/CodeGen/X86/int-to-fp-demanded.ll
new file mode 100644
index 0000000..cdde03f
--- /dev/null
+++ b/llvm/test/CodeGen/X86/int-to-fp-demanded.ll
@@ -0,0 +1,382 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
+
+declare void @use.i1(i1)
+declare void @use.i32(i32)
+define i32 @sitofp_signbit_only(i32 %i_in) nounwind {
+; X86-LABEL: sitofp_signbit_only:
+; X86: # %bb.0:
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: fildl (%esp)
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: movl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl $8, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_signbit_only:
+; X64: # %bb.0:
+; X64-NEXT: cvtsi2ss %edi, %xmm0
+; X64-NEXT: movmskps %xmm0, %eax
+; X64-NEXT: shll $31, %eax
+; X64-NEXT: retq
+ %f = sitofp i32 %i_in to float
+ %i = bitcast float %f to i32
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
+
+define i32 @sitofp_signbit_only_okay_width(i16 %i_in) nounwind {
+; X86-LABEL: sitofp_signbit_only_okay_width:
+; X86: # %bb.0:
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movw %ax, {{[0-9]+}}(%esp)
+; X86-NEXT: filds {{[0-9]+}}(%esp)
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: movl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl $8, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_signbit_only_okay_width:
+; X64: # %bb.0:
+; X64-NEXT: movswl %di, %eax
+; X64-NEXT: cvtsi2ss %eax, %xmm0
+; X64-NEXT: movmskps %xmm0, %eax
+; X64-NEXT: shll $31, %eax
+; X64-NEXT: retq
+ %f = sitofp i16 %i_in to float
+ %i = bitcast float %f to i32
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
+
+define i32 @sitofp_signbit_only_fail_bad_width1(i64 %i_in) nounwind {
+; X86-LABEL: sitofp_signbit_only_fail_bad_width1:
+; X86: # %bb.0:
+; X86-NEXT: pushl %eax
+; X86-NEXT: fildll {{[0-9]+}}(%esp)
+; X86-NEXT: fstps (%esp)
+; X86-NEXT: movl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT: andl (%esp), %eax
+; X86-NEXT: popl %ecx
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_signbit_only_fail_bad_width1:
+; X64: # %bb.0:
+; X64-NEXT: cvtsi2ss %rdi, %xmm0
+; X64-NEXT: movmskps %xmm0, %eax
+; X64-NEXT: shll $31, %eax
+; X64-NEXT: retq
+ %f = sitofp i64 %i_in to float
+ %i = bitcast float %f to i32
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
+
+define <2 x i16> @sitofp_signbit_only_fail_bad_width2(i32 %i_in) nounwind {
+; X86-LABEL: sitofp_signbit_only_fail_bad_width2:
+; X86: # %bb.0:
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: fildl (%esp)
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %edx
+; X86-NEXT: shrl $16, %edx
+; X86-NEXT: andl $32768, %eax # imm = 0x8000
+; X86-NEXT: andl $32768, %edx # imm = 0x8000
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NEXT: # kill: def $dx killed $dx killed $edx
+; X86-NEXT: addl $8, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_signbit_only_fail_bad_width2:
+; X64: # %bb.0:
+; X64-NEXT: cvtsi2ss %edi, %xmm0
+; X64-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: retq
+ %f = sitofp i32 %i_in to float
+ %i2xi16 = bitcast float %f to <2 x i16>
+ %r = and <2 x i16> %i2xi16, <i16 32768, i16 32768>
+ ret <2 x i16> %r
+}
+
+define i32 @sitofp_many_bits_fail(i32 %i_in) nounwind {
+; X86-LABEL: sitofp_many_bits_fail:
+; X86: # %bb.0:
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: fildl (%esp)
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: movl $-2147483647, %eax # imm = 0x80000001
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl $8, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_many_bits_fail:
+; X64: # %bb.0:
+; X64-NEXT: cvtsi2ss %edi, %xmm0
+; X64-NEXT: movd %xmm0, %eax
+; X64-NEXT: andl $-2147483647, %eax # imm = 0x80000001
+; X64-NEXT: retq
+ %f = sitofp i32 %i_in to float
+ %i = bitcast float %f to i32
+ %r = and i32 %i, 2147483649
+ ret i32 %r
+}
+
+define i32 @sitofp_multiuse_fail(i32 %i_in) nounwind {
+; X86-LABEL: sitofp_multiuse_fail:
+; X86: # %bb.0:
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: fildl {{[0-9]+}}(%esp)
+; X86-NEXT: fsts {{[0-9]+}}(%esp)
+; X86-NEXT: fstps (%esp)
+; X86-NEXT: calll use.i32@PLT
+; X86-NEXT: movl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_multiuse_fail:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rbx
+; X64-NEXT: cvtsi2ss %edi, %xmm0
+; X64-NEXT: movd %xmm0, %ebx
+; X64-NEXT: movl %ebx, %edi
+; X64-NEXT: callq use.i32@PLT
+; X64-NEXT: andl $-2147483648, %ebx # imm = 0x80000000
+; X64-NEXT: movl %ebx, %eax
+; X64-NEXT: popq %rbx
+; X64-NEXT: retq
+ %f = sitofp i32 %i_in to float
+ %i = bitcast float %f to i32
+ call void @use.i32(i32 %i)
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
+
+define i32 @sitofp_multiuse_okay(i32 %i_in) nounwind {
+; X86-LABEL: sitofp_multiuse_okay:
+; X86: # %bb.0:
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: fildl {{[0-9]+}}(%esp)
+; X86-NEXT: fsts {{[0-9]+}}(%esp)
+; X86-NEXT: fstps (%esp)
+; X86-NEXT: calll use.i1@PLT
+; X86-NEXT: movl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_multiuse_okay:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rbx
+; X64-NEXT: cvtsi2ss %edi, %xmm0
+; X64-NEXT: movd %xmm0, %ebx
+; X64-NEXT: movl %ebx, %edi
+; X64-NEXT: callq use.i1@PLT
+; X64-NEXT: andl $-2147483648, %ebx # imm = 0x80000000
+; X64-NEXT: movl %ebx, %eax
+; X64-NEXT: popq %rbx
+; X64-NEXT: retq
+ %f = sitofp i32 %i_in to float
+ %i = bitcast float %f to i32
+ %cmp = icmp slt i32 %i, 0
+ call void @use.i1(i32 %i)
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
+
+define i32 @uitofp_signbit_only(i32 %i_in) nounwind {
+; X86-LABEL: uitofp_signbit_only:
+; X86: # %bb.0:
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: uitofp_signbit_only:
+; X64: # %bb.0:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: retq
+ %f = uitofp i32 %i_in to float
+ %i = bitcast float %f to i32
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
+
+define i32 @uitofp_signbit_only_okay_width(i16 %i_in) nounwind {
+; X86-LABEL: uitofp_signbit_only_okay_width:
+; X86: # %bb.0:
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: uitofp_signbit_only_okay_width:
+; X64: # %bb.0:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: retq
+ %f = uitofp i16 %i_in to float
+ %i = bitcast float %f to i32
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
+
+define i32 @uitofp_signbit_only_okay_width1(i64 %i_in) nounwind {
+; X86-LABEL: uitofp_signbit_only_okay_width1:
+; X86: # %bb.0:
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: uitofp_signbit_only_okay_width1:
+; X64: # %bb.0:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: retq
+ %f = uitofp i64 %i_in to float
+ %i = bitcast float %f to i32
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
+
+define <2 x i16> @uitofp_signbit_only_fail_bad_width2(i32 %i_in) nounwind {
+; X86-LABEL: uitofp_signbit_only_fail_bad_width2:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: fildll {{[0-9]+}}(%esp)
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: movl $32768, %eax # imm = 0x8000
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: uitofp_signbit_only_fail_bad_width2:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: cvtsi2ss %rax, %xmm0
+; X64-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: retq
+ %f = uitofp i32 %i_in to float
+ %i2xi16 = bitcast float %f to <2 x i16>
+ %r = and <2 x i16> %i2xi16, <i16 32768, i16 32768>
+ ret <2 x i16> %r
+}
+
+define i32 @uitofp_many_bits_fail(i32 %i_in) nounwind {
+; X86-LABEL: uitofp_many_bits_fail:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: fildll {{[0-9]+}}(%esp)
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl $1, %eax
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: uitofp_many_bits_fail:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: cvtsi2ss %rax, %xmm0
+; X64-NEXT: movd %xmm0, %eax
+; X64-NEXT: andl $1, %eax
+; X64-NEXT: retq
+ %f = uitofp i32 %i_in to float
+ %i = bitcast float %f to i32
+ %r = and i32 %i, 2147483649
+ ret i32 %r
+}
+
+define i32 @uitofp_multiuse_fail(i32 %i_in) nounwind {
+; X86-LABEL: uitofp_multiuse_fail:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: fildll {{[0-9]+}}(%esp)
+; X86-NEXT: fstps (%esp)
+; X86-NEXT: calll use.i32@PLT
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: uitofp_multiuse_fail:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: cvtsi2ss %rax, %xmm0
+; X64-NEXT: movd %xmm0, %edi
+; X64-NEXT: callq use.i32@PLT
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %f = uitofp i32 %i_in to float
+ %i = bitcast float %f to i32
+ call void @use.i32(i32 %i)
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
+
+define i32 @uitofp_multiuse_okay(i32 %i_in) nounwind {
+; X86-LABEL: uitofp_multiuse_okay:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: fildll {{[0-9]+}}(%esp)
+; X86-NEXT: fstps (%esp)
+; X86-NEXT: calll use.i1@PLT
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: uitofp_multiuse_okay:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: cvtsi2ss %rax, %xmm0
+; X64-NEXT: movd %xmm0, %edi
+; X64-NEXT: callq use.i1@PLT
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %f = uitofp i32 %i_in to float
+ %i = bitcast float %f to i32
+ %cmp = icmp slt i32 %i, 0
+ call void @use.i1(i32 %i)
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
diff --git a/llvm/test/CodeGen/X86/isel-select-cmov.ll b/llvm/test/CodeGen/X86/isel-select-cmov.ll
index 0e5293c..39a20bf 100644
--- a/llvm/test/CodeGen/X86/isel-select-cmov.ll
+++ b/llvm/test/CodeGen/X86/isel-select-cmov.ll
@@ -13,6 +13,8 @@
; RUN: llc < %s -global-isel -global-isel-abort=1 -mtriple=i686-apple-darwin10 -verify-machineinstrs | FileCheck %s --check-prefix=GISEL-X86
; RUN: llc < %s -global-isel -global-isel-abort=1 -mtriple=i686-apple-darwin10 -verify-machineinstrs -mattr=+cmov | FileCheck %s --check-prefix=GISEL-X86-CMOV
+; RUN: llc < %s -fast-isel -fast-isel-abort=1 -mtriple=x86_64-apple-darwin10 -verify-machineinstrs -mattr=+ndd | FileCheck %s --check-prefix=NDD
+
; Test conditional move for the supported types (i16, i32, and i32) and
; conditon input (argument or cmp).
; When cmov is not available (i8 type or X86), the branch is expected.
@@ -114,6 +116,16 @@ define zeroext i8 @select_cmov_i8(i1 zeroext %cond, i8 zeroext %a, i8 zeroext %b
; GISEL-X86-CMOV-NEXT: cmovnew %dx, %ax
; GISEL-X86-CMOV-NEXT: ## kill: def $al killed $al killed $eax
; GISEL-X86-CMOV-NEXT: retl
+;
+; NDD-LABEL: select_cmov_i8:
+; NDD: ## %bb.0:
+; NDD-NEXT: testb $1, %dil
+; NDD-NEXT: jne LBB0_2
+; NDD-NEXT: ## %bb.1:
+; NDD-NEXT: movl %edx, %esi
+; NDD-NEXT: LBB0_2:
+; NDD-NEXT: movzbl %sil, %eax
+; NDD-NEXT: retq
%1 = select i1 %cond, i8 %a, i8 %b
ret i8 %1
}
@@ -207,6 +219,13 @@ define zeroext i16 @select_cmov_i16(i1 zeroext %cond, i16 zeroext %a, i16 zeroex
; GISEL-X86-CMOV-NEXT: cmovnew %dx, %ax
; GISEL-X86-CMOV-NEXT: ## kill: def $ax killed $ax killed $eax
; GISEL-X86-CMOV-NEXT: retl
+;
+; NDD-LABEL: select_cmov_i16:
+; NDD: ## %bb.0:
+; NDD-NEXT: testb $1, %dil
+; NDD-NEXT: cmovnew %si, %dx, %ax
+; NDD-NEXT: movzwl %ax, %eax
+; NDD-NEXT: retq
%1 = select i1 %cond, i16 %a, i16 %b
ret i16 %1
}
@@ -305,6 +324,13 @@ define zeroext i16 @select_cmp_cmov_i16(i16 zeroext %a, i16 zeroext %b) {
; GISEL-X86-CMOV-NEXT: cmovew %cx, %ax
; GISEL-X86-CMOV-NEXT: ## kill: def $ax killed $ax killed $eax
; GISEL-X86-CMOV-NEXT: retl
+;
+; NDD-LABEL: select_cmp_cmov_i16:
+; NDD: ## %bb.0:
+; NDD-NEXT: cmpw %si, %di
+; NDD-NEXT: cmovbw %di, %si, %ax
+; NDD-NEXT: movzwl %ax, %eax
+; NDD-NEXT: retq
%1 = icmp ult i16 %a, %b
%2 = select i1 %1, i16 %a, i16 %b
ret i16 %2
@@ -391,6 +417,12 @@ define i32 @select_cmov_i32(i1 zeroext %cond, i32 %a, i32 %b) {
; GISEL-X86-CMOV-NEXT: testl %ecx, %ecx
; GISEL-X86-CMOV-NEXT: cmovnel {{[0-9]+}}(%esp), %eax
; GISEL-X86-CMOV-NEXT: retl
+;
+; NDD-LABEL: select_cmov_i32:
+; NDD: ## %bb.0:
+; NDD-NEXT: testb $1, %dil
+; NDD-NEXT: cmovnel %esi, %edx, %eax
+; NDD-NEXT: retq
%1 = select i1 %cond, i32 %a, i32 %b
ret i32 %1
}
@@ -482,6 +514,12 @@ define i32 @select_cmp_cmov_i32(i32 %a, i32 %b) {
; GISEL-X86-CMOV-NEXT: andl $1, %edx
; GISEL-X86-CMOV-NEXT: cmovel %ecx, %eax
; GISEL-X86-CMOV-NEXT: retl
+;
+; NDD-LABEL: select_cmp_cmov_i32:
+; NDD: ## %bb.0:
+; NDD-NEXT: cmpl %esi, %edi
+; NDD-NEXT: cmovbl %edi, %esi, %eax
+; NDD-NEXT: retq
%1 = icmp ult i32 %a, %b
%2 = select i1 %1, i32 %a, i32 %b
ret i32 %2
@@ -584,6 +622,12 @@ define i64 @select_cmov_i64(i1 zeroext %cond, i64 %a, i64 %b) {
; GISEL-X86-CMOV-NEXT: cmovnel {{[0-9]+}}(%esp), %eax
; GISEL-X86-CMOV-NEXT: cmovnel {{[0-9]+}}(%esp), %edx
; GISEL-X86-CMOV-NEXT: retl
+;
+; NDD-LABEL: select_cmov_i64:
+; NDD: ## %bb.0:
+; NDD-NEXT: testb $1, %dil
+; NDD-NEXT: cmovneq %rsi, %rdx, %rax
+; NDD-NEXT: retq
%1 = select i1 %cond, i64 %a, i64 %b
ret i64 %1
}
@@ -754,6 +798,12 @@ define i64 @select_cmp_cmov_i64(i64 %a, i64 %b) nounwind {
; GISEL-X86-CMOV-NEXT: popl %ebx
; GISEL-X86-CMOV-NEXT: popl %ebp
; GISEL-X86-CMOV-NEXT: retl
+;
+; NDD-LABEL: select_cmp_cmov_i64:
+; NDD: ## %bb.0:
+; NDD-NEXT: cmpq %rsi, %rdi
+; NDD-NEXT: cmovbq %rdi, %rsi, %rax
+; NDD-NEXT: retq
%1 = icmp ult i64 %a, %b
%2 = select i1 %1, i64 %a, i64 %b
ret i64 %2
diff --git a/llvm/test/CodeGen/X86/isel-traps.ll b/llvm/test/CodeGen/X86/isel-traps.ll
new file mode 100644
index 0000000..c207387
--- /dev/null
+++ b/llvm/test/CodeGen/X86/isel-traps.ll
@@ -0,0 +1,73 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=ALL,X64
+; RUN: llc < %s -fast-isel -fast-isel-abort=1 -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=ALL,X64
+; RUN: llc < %s -global-isel -global-isel-abort=1 -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=ALL,GISEL-X64
+; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=ALL,X86
+; RUN: llc < %s -fast-isel -fast-isel-abort=1 -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=ALL,X86
+; RUN: llc < %s -global-isel -global-isel-abort=1 -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=ALL,GISEL-X86
+
+declare void @llvm.trap()
+
+define void @test_trap() {
+; ALL-LABEL: test_trap:
+; ALL: # %bb.0:
+; ALL-NEXT: ud2
+; ALL-NEXT: ret{{[l|q]}}
+ tail call void @llvm.trap()
+ ret void
+}
+
+define void @test_debugtrap() {
+; ALL-LABEL: test_debugtrap:
+; ALL: # %bb.0:
+; ALL-NEXT: int3
+; ALL-NEXT: ret{{[l|q]}}
+ tail call void @llvm.debugtrap()
+ ret void
+}
+
+define void @test_ubsantrap() {
+; ALL-LABEL: test_ubsantrap:
+; ALL: # %bb.0:
+; ALL-NEXT: ud1l 12(%eax), %eax
+; ALL-NEXT: ret{{[l|q]}}
+ call void @llvm.ubsantrap(i8 12)
+ ret void
+}
+
+define void @test_ubsantrap_custom() nounwind {
+; X64-LABEL: test_ubsantrap_custom:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movl $42, %edi
+; X64-NEXT: callq guide@PLT
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
+;
+; GISEL-X64-LABEL: test_ubsantrap_custom:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: pushq %rax
+; GISEL-X64-NEXT: movl $42, %edi
+; GISEL-X64-NEXT: callq guide
+; GISEL-X64-NEXT: popq %rax
+; GISEL-X64-NEXT: retq
+;
+; X86-LABEL: test_ubsantrap_custom:
+; X86: # %bb.0:
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: movl $42, (%esp)
+; X86-NEXT: calll guide
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: retl
+;
+; GISEL-X86-LABEL: test_ubsantrap_custom:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: subl $12, %esp
+; GISEL-X86-NEXT: movl $42, %eax
+; GISEL-X86-NEXT: movl %eax, (%esp)
+; GISEL-X86-NEXT: calll guide
+; GISEL-X86-NEXT: addl $12, %esp
+; GISEL-X86-NEXT: retl
+ call void @llvm.ubsantrap(i8 42) "trap-func-name"="guide"
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/known-never-zero.ll b/llvm/test/CodeGen/X86/known-never-zero.ll
index cc98627..39d02f9 100644
--- a/llvm/test/CodeGen/X86/known-never-zero.ll
+++ b/llvm/test/CodeGen/X86/known-never-zero.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=CHECK
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=X64
;; Use cttz to test if we properly prove never-zero. There is a very
;; simple transform from cttz -> cttz_zero_undef if its operand is
@@ -9,50 +10,82 @@ declare i32 @llvm.uadd.sat.i32(i32, i32)
declare i32 @llvm.umax.i32(i32, i32)
declare i32 @llvm.umin.i32(i32, i32)
declare i32 @llvm.smin.i32(i32, i32)
+declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>)
declare i32 @llvm.smax.i32(i32, i32)
+declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>)
declare i32 @llvm.bswap.i32(i32)
declare i32 @llvm.bitreverse.i32(i32)
declare i32 @llvm.ctpop.i32(i32)
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>)
declare i32 @llvm.abs.i32(i32, i1)
declare i32 @llvm.fshl.i32(i32, i32, i32)
declare i32 @llvm.fshr.i32(i32, i32, i32)
define i32 @or_known_nonzero(i32 %x) {
-; CHECK-LABEL: or_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: or_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $1, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%z = or i32 %x, 1
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @or_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: or_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl %esi, %edi
-; CHECK-NEXT: je .LBB1_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB1_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: or_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: je .LBB1_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB1_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: orl %esi, %edi
+; X64-NEXT: je .LBB1_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB1_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = or i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @select_known_nonzero(i1 %c, i32 %x) {
-; CHECK-LABEL: select_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %esi
-; CHECK-NEXT: testb $1, %dil
-; CHECK-NEXT: movl $122, %eax
-; CHECK-NEXT: cmovnel %esi, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: select_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $1, %eax
+; X86-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $122, %ecx
+; X86-NEXT: cmovnel %eax, %ecx
+; X86-NEXT: rep bsfl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: select_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %esi
+; X64-NEXT: testb $1, %dil
+; X64-NEXT: movl $122, %eax
+; X64-NEXT: cmovnel %esi, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%y = or i32 %x, 1
%z = select i1 %c, i32 %y, i32 122
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -60,20 +93,36 @@ define i32 @select_known_nonzero(i1 %c, i32 %x) {
}
define i32 @select_maybe_zero(i1 %c, i32 %x) {
-; CHECK-LABEL: select_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %esi
-; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: testb $1, %dil
-; CHECK-NEXT: cmovnel %esi, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB3_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB3_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: select_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: orl $1, %ecx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X86-NEXT: cmovnel %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB3_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB3_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: select_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %esi
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: testb $1, %dil
+; X64-NEXT: cmovnel %esi, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB3_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB3_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%y = or i32 %x, 1
%z = select i1 %c, i32 %y, i32 0
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -81,28 +130,45 @@ define i32 @select_maybe_zero(i1 %c, i32 %x) {
}
define i32 @shl_known_nonzero_1s_bit_set(i32 %x) {
-; CHECK-LABEL: shl_known_nonzero_1s_bit_set:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $123, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: shl_known_nonzero_1s_bit_set:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $123, %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shl_known_nonzero_1s_bit_set:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $123, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%z = shl i32 123, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @shl_known_nonzero_nsw(i32 %x, i32 %yy) {
-; CHECK-LABEL: shl_known_nonzero_nsw:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: shl_known_nonzero_nsw:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shl_known_nonzero_nsw:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = shl nsw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -110,14 +176,23 @@ define i32 @shl_known_nonzero_nsw(i32 %x, i32 %yy) {
}
define i32 @shl_known_nonzero_nuw(i32 %x, i32 %yy) {
-; CHECK-LABEL: shl_known_nonzero_nuw:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: shl_known_nonzero_nuw:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shl_known_nonzero_nuw:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = shl nuw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -125,67 +200,116 @@ define i32 @shl_known_nonzero_nuw(i32 %x, i32 %yy) {
}
define i32 @shl_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: shl_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB7_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB7_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: shl_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB7_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB7_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shl_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB7_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB7_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = shl nuw nsw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @uaddsat_known_nonzero(i32 %x) {
-; CHECK-LABEL: uaddsat_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: incl %edi
-; CHECK-NEXT: movl $-1, %eax
-; CHECK-NEXT: cmovnel %edi, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: uaddsat_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: incl %eax
+; X86-NEXT: movl $-1, %ecx
+; X86-NEXT: cmovnel %eax, %ecx
+; X86-NEXT: rep bsfl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: uaddsat_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: incl %edi
+; X64-NEXT: movl $-1, %eax
+; X64-NEXT: cmovnel %edi, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.uadd.sat.i32(i32 %x, i32 1)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @uaddsat_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: uaddsat_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addl %esi, %edi
-; CHECK-NEXT: movl $-1, %eax
-; CHECK-NEXT: cmovael %edi, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB9_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB9_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: uaddsat_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $-1, %eax
+; X86-NEXT: cmovael %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB9_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB9_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: uaddsat_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: addl %esi, %edi
+; X64-NEXT: movl $-1, %eax
+; X64-NEXT: cmovael %edi, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB9_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB9_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.uadd.sat.i32(i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @umax_known_nonzero(i32 %x, i32 %y) {
-; CHECK-LABEL: umax_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: movl $4, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: cmpl %eax, %edi
-; CHECK-NEXT: cmoval %edi, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: umax_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $4, %edx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: cmpl %edx, %eax
+; X86-NEXT: cmoval %eax, %edx
+; X86-NEXT: rep bsfl %edx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: umax_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: movl $4, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: cmpl %eax, %edi
+; X64-NEXT: cmoval %edi, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%yy = shl nuw i32 4, %y
%z = call i32 @llvm.umax.i32(i32 %x, i32 %yy)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -193,35 +317,62 @@ define i32 @umax_known_nonzero(i32 %x, i32 %y) {
}
define i32 @umax_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: umax_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: cmpl %esi, %edi
-; CHECK-NEXT: cmoval %edi, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB11_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB11_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: umax_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: cmpl %eax, %ecx
+; X86-NEXT: cmoval %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB11_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB11_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: umax_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: cmpl %esi, %edi
+; X64-NEXT: cmoval %edi, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB11_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB11_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.umax.i32(i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @umin_known_nonzero(i32 %xx, i32 %yy) {
-; CHECK-LABEL: umin_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $4, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: addl $4, %esi
-; CHECK-NEXT: cmpl %esi, %eax
-; CHECK-NEXT: cmovbl %eax, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: umin_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $4, %edx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: addl $4, %eax
+; X86-NEXT: cmpl %eax, %edx
+; X86-NEXT: cmovbl %edx, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: umin_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $4, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: addl $4, %esi
+; X64-NEXT: cmpl %esi, %eax
+; X64-NEXT: cmovbl %eax, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%x = shl nuw i32 4, %xx
%y = add nuw nsw i32 %yy, 4
%z = call i32 @llvm.umin.i32(i32 %x, i32 %y)
@@ -230,36 +381,63 @@ define i32 @umin_known_nonzero(i32 %xx, i32 %yy) {
}
define i32 @umin_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: umin_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: cmpl $54, %edi
-; CHECK-NEXT: movl $54, %eax
-; CHECK-NEXT: cmovbl %edi, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB13_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB13_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: umin_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: cmpl $54, %ecx
+; X86-NEXT: movl $54, %eax
+; X86-NEXT: cmovbl %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB13_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB13_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: umin_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: cmpl $54, %edi
+; X64-NEXT: movl $54, %eax
+; X64-NEXT: cmovbl %edi, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB13_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB13_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.umin.i32(i32 %x, i32 54)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @smin_known_nonzero(i32 %xx, i32 %yy) {
-; CHECK-LABEL: smin_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $4, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: addl $4, %esi
-; CHECK-NEXT: cmpl %esi, %eax
-; CHECK-NEXT: cmovll %eax, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: smin_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $4, %edx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: addl $4, %eax
+; X86-NEXT: cmpl %eax, %edx
+; X86-NEXT: cmovll %edx, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smin_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $4, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: addl $4, %esi
+; X64-NEXT: cmpl %esi, %eax
+; X64-NEXT: cmovll %eax, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%x = shl nuw i32 4, %xx
%y = add nuw nsw i32 %yy, 4
%z = call i32 @llvm.smin.i32(i32 %x, i32 %y)
@@ -267,37 +445,120 @@ define i32 @smin_known_nonzero(i32 %xx, i32 %yy) {
ret i32 %r
}
+define i32 @smin_known_zero(i32 %x, i32 %y) {
+; X86-LABEL: smin_known_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $-54, %eax
+; X86-NEXT: movl $-54, %ecx
+; X86-NEXT: cmovll %eax, %ecx
+; X86-NEXT: rep bsfl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smin_known_zero:
+; X64: # %bb.0:
+; X64-NEXT: cmpl $-54, %edi
+; X64-NEXT: movl $-54, %eax
+; X64-NEXT: cmovll %edi, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+ %z = call i32 @llvm.smin.i32(i32 %x, i32 -54)
+ %r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
+ ret i32 %r
+}
+
+define <4 x i32> @smin_known_zero_vec(<4 x i32> %x, <4 x i32> %y) {
+; X86-LABEL: smin_known_zero_vec:
+; X86: # %bb.0:
+; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967242,4294967273,4294967284,4294967295]
+; X86-NEXT: movdqa %xmm1, %xmm2
+; X86-NEXT: pcmpgtd %xmm0, %xmm2
+; X86-NEXT: pand %xmm2, %xmm0
+; X86-NEXT: pandn %xmm1, %xmm2
+; X86-NEXT: por %xmm2, %xmm0
+; X86-NEXT: pcmpeqd %xmm1, %xmm1
+; X86-NEXT: paddd %xmm0, %xmm1
+; X86-NEXT: pand %xmm1, %xmm0
+; X86-NEXT: pxor %xmm1, %xmm1
+; X86-NEXT: pcmpeqd %xmm1, %xmm0
+; X86-NEXT: psrld $31, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: smin_known_zero_vec:
+; X64: # %bb.0:
+; X64-NEXT: vpminsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; X64-NEXT: vpaddd %xmm1, %xmm0, %xmm1
+; X64-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X64-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpsrld $31, %xmm0, %xmm0
+; X64-NEXT: retq
+ %z = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %x, <4 x i32> <i32 -54, i32 -23, i32 -12, i32 -1>)
+ %r = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %z)
+ %3 = icmp eq <4 x i32> %r, <i32 1, i32 1, i32 1, i32 1>
+ %ret = zext <4 x i1> %3 to <4 x i32>
+ ret <4 x i32> %ret
+}
+
define i32 @smin_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: smin_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: cmpl $54, %edi
-; CHECK-NEXT: movl $54, %eax
-; CHECK-NEXT: cmovll %edi, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB15_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB15_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: smin_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: cmpl $54, %ecx
+; X86-NEXT: movl $54, %eax
+; X86-NEXT: cmovll %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB17_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB17_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smin_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: cmpl $54, %edi
+; X64-NEXT: movl $54, %eax
+; X64-NEXT: cmovll %edi, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB17_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB17_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.smin.i32(i32 %x, i32 54)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @smax_known_nonzero(i32 %xx, i32 %yy) {
-; CHECK-LABEL: smax_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $4, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: addl $4, %esi
-; CHECK-NEXT: cmpl %esi, %eax
-; CHECK-NEXT: cmovgl %eax, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: smax_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $4, %edx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: addl $4, %eax
+; X86-NEXT: cmpl %eax, %edx
+; X86-NEXT: cmovgl %edx, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smax_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $4, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: addl $4, %esi
+; X64-NEXT: cmpl %esi, %eax
+; X64-NEXT: cmovgl %eax, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%x = shl nuw i32 4, %xx
%y = add nuw nsw i32 %yy, 4
%z = call i32 @llvm.smax.i32(i32 %x, i32 %y)
@@ -306,35 +567,125 @@ define i32 @smax_known_nonzero(i32 %xx, i32 %yy) {
}
define i32 @smax_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: smax_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: cmpl $55, %edi
-; CHECK-NEXT: movl $54, %eax
-; CHECK-NEXT: cmovgel %edi, %eax
-; CHECK-NEXT: bsfl %eax, %ecx
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: cmovnel %ecx, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: smax_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $55, %eax
+; X86-NEXT: movl $54, %ecx
+; X86-NEXT: cmovgel %eax, %ecx
+; X86-NEXT: rep bsfl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smax_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: cmpl $55, %edi
+; X64-NEXT: movl $54, %eax
+; X64-NEXT: cmovgel %edi, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.smax.i32(i32 %x, i32 54)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
+define <4 x i32> @smax_known_zero_vec(<4 x i32> %x, <4 x i32> %y) {
+; X86-LABEL: smax_known_zero_vec:
+; X86: # %bb.0:
+; X86-NEXT: movdqa {{.*#+}} xmm1 = [54,23,12,1]
+; X86-NEXT: movdqa %xmm0, %xmm2
+; X86-NEXT: pcmpgtd %xmm1, %xmm2
+; X86-NEXT: pand %xmm2, %xmm0
+; X86-NEXT: pandn %xmm1, %xmm2
+; X86-NEXT: por %xmm2, %xmm0
+; X86-NEXT: pcmpeqd %xmm1, %xmm1
+; X86-NEXT: paddd %xmm0, %xmm1
+; X86-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT: pxor %xmm1, %xmm0
+; X86-NEXT: pcmpgtd %xmm1, %xmm0
+; X86-NEXT: psrld $31, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: smax_known_zero_vec:
+; X64: # %bb.0:
+; X64-NEXT: vpmaxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; X64-NEXT: vpaddd %xmm1, %xmm0, %xmm1
+; X64-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpminud %xmm1, %xmm0, %xmm1
+; X64-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: retq
+ %z = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %x, <4 x i32> <i32 54, i32 23, i32 12, i32 1>)
+ %r = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %z)
+ %3 = icmp eq <4 x i32> %r, <i32 1, i32 1, i32 1, i32 1>
+ %ret = zext <4 x i1> %3 to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+define i32 @smax_known_zero(i32 %x, i32 %y) {
+; X86-LABEL: smax_known_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: testl %ecx, %ecx
+; X86-NEXT: movl $-1, %eax
+; X86-NEXT: cmovnsl %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB21_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB21_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smax_known_zero:
+; X64: # %bb.0:
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: movl $-1, %eax
+; X64-NEXT: cmovnsl %edi, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB21_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB21_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
+ %z = call i32 @llvm.smax.i32(i32 %x, i32 -1)
+ %r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
+ ret i32 %r
+}
+
define i32 @rotr_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: rotr_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: orl $256, %edi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: rorl %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB18_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB18_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotr_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rorl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB22_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB22_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotr_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: orl $256, %edi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: rorl %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB22_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB22_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 256
%shr = lshr i32 %x, %y
%sub = sub i32 32, %y
@@ -345,19 +696,33 @@ define i32 @rotr_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @rotr_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: rotr_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: rorl %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB19_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB19_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotr_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rorl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB23_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB23_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotr_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: rorl %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB23_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB23_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%shr = lshr i32 %x, %y
%sub = sub i32 32, %y
%shl = shl i32 %x, %sub
@@ -367,14 +732,23 @@ define i32 @rotr_maybe_zero(i32 %x, i32 %y) {
}
define i32 @rotr_with_fshr_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: rotr_with_fshr_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: orl $256, %edi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: rorl %cl, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotr_with_fshr_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rorl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotr_with_fshr_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: orl $256, %edi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: rorl %cl, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 256
%z = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -382,39 +756,68 @@ define i32 @rotr_with_fshr_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @rotr_with_fshr_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: rotr_with_fshr_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: rorl %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB21_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB21_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotr_with_fshr_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rorl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB25_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB25_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotr_with_fshr_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: rorl %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB25_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB25_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @rotl_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: rotl_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: orl $256, %edi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: roll %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB22_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB22_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotl_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: roll %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB26_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB26_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotl_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: orl $256, %edi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: roll %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB26_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB26_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 256
%shl = shl i32 %x, %y
%sub = sub i32 32, %y
@@ -425,19 +828,33 @@ define i32 @rotl_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @rotl_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: rotl_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: roll %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB23_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB23_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotl_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: roll %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB27_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB27_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotl_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: roll %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB27_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB27_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%shl = shl i32 %x, %y
%sub = sub i32 32, %y
%shr = lshr i32 %x, %sub
@@ -447,14 +864,23 @@ define i32 @rotl_maybe_zero(i32 %x, i32 %y) {
}
define i32 @rotl_with_fshl_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: rotl_with_fshl_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: orl $256, %edi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: roll %cl, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotl_with_fshl_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: roll %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotl_with_fshl_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: orl $256, %edi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: roll %cl, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 256
%z = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -462,47 +888,78 @@ define i32 @rotl_with_fshl_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @rotl_with_fshl_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: rotl_with_fshl_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: roll %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB25_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB25_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotl_with_fshl_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: roll %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB29_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB29_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotl_with_fshl_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: roll %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB29_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB29_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @sra_known_nonzero_sign_bit_set(i32 %x) {
-; CHECK-LABEL: sra_known_nonzero_sign_bit_set:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: sarl %cl, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sra_known_nonzero_sign_bit_set:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
+; X86-NEXT: sarl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sra_known_nonzero_sign_bit_set:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: sarl %cl, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%z = ashr i32 2147606891, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @sra_known_nonzero_exact(i32 %x, i32 %yy) {
-; CHECK-LABEL: sra_known_nonzero_exact:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: sarl %cl, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sra_known_nonzero_exact:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: sarl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sra_known_nonzero_exact:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: sarl %cl, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = ashr exact i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -510,47 +967,78 @@ define i32 @sra_known_nonzero_exact(i32 %x, i32 %yy) {
}
define i32 @sra_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: sra_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: sarl %cl, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB28_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB28_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sra_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: sarl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB32_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB32_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sra_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: sarl %cl, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB32_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB32_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = ashr exact i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @srl_known_nonzero_sign_bit_set(i32 %x) {
-; CHECK-LABEL: srl_known_nonzero_sign_bit_set:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shrl %cl, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: srl_known_nonzero_sign_bit_set:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: srl_known_nonzero_sign_bit_set:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shrl %cl, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%z = lshr i32 2147606891, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @srl_known_nonzero_exact(i32 %x, i32 %yy) {
-; CHECK-LABEL: srl_known_nonzero_exact:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shrl %cl, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: srl_known_nonzero_exact:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: srl_known_nonzero_exact:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shrl %cl, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = lshr exact i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -558,33 +1046,56 @@ define i32 @srl_known_nonzero_exact(i32 %x, i32 %yy) {
}
define i32 @srl_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: srl_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shrl %cl, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB31_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB31_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: srl_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB35_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB35_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: srl_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shrl %cl, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB35_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB35_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = lshr exact i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @udiv_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: udiv_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: orl $64, %eax
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: divl %esi
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: udiv_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $64, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: divl {{[0-9]+}}(%esp)
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: udiv_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $64, %eax
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: divl %esi
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 64
%z = udiv exact i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -592,33 +1103,56 @@ define i32 @udiv_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @udiv_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: udiv_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: divl %esi
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB33_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB33_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: udiv_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: divl {{[0-9]+}}(%esp)
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB37_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB37_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: udiv_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: divl %esi
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB37_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB37_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = udiv exact i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @sdiv_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: sdiv_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: orl $64, %eax
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %esi
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sdiv_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $64, %eax
+; X86-NEXT: cltd
+; X86-NEXT: idivl {{[0-9]+}}(%esp)
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sdiv_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $64, %eax
+; X64-NEXT: cltd
+; X64-NEXT: idivl %esi
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 64
%z = sdiv exact i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -626,31 +1160,53 @@ define i32 @sdiv_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @sdiv_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: sdiv_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %esi
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB35_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB35_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sdiv_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cltd
+; X86-NEXT: idivl {{[0-9]+}}(%esp)
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB39_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB39_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sdiv_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: cltd
+; X64-NEXT: idivl %esi
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB39_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB39_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = sdiv exact i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @add_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: add_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %edi
-; CHECK-NEXT: addl %esi, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: add_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $1, %eax
+; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: add_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %edi
+; X64-NEXT: addl %esi, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 1
%z = add nuw i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -658,17 +1214,30 @@ define i32 @add_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @add_maybe_zero(i32 %xx, i32 %y) {
-; CHECK-LABEL: add_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %edi
-; CHECK-NEXT: addl %esi, %edi
-; CHECK-NEXT: je .LBB37_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB37_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: add_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $1, %eax
+; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: je .LBB41_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB41_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: add_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %edi
+; X64-NEXT: addl %esi, %edi
+; X64-NEXT: je .LBB41_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB41_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 1
%z = add nsw i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -676,15 +1245,24 @@ define i32 @add_maybe_zero(i32 %xx, i32 %y) {
}
define i32 @sub_known_nonzero_neg_case(i32 %xx) {
-; CHECK-LABEL: sub_known_nonzero_neg_case:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $256, %eax # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: negl %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sub_known_nonzero_neg_case:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: negl %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sub_known_nonzero_neg_case:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $256, %eax # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: negl %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = shl nuw nsw i32 256, %xx
%z = sub i32 0, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -692,14 +1270,24 @@ define i32 @sub_known_nonzero_neg_case(i32 %xx) {
}
define i32 @sub_known_nonzero_ne_case(i32 %xx, i32 %yy) {
-; CHECK-LABEL: sub_known_nonzero_ne_case:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: orl $64, %eax
-; CHECK-NEXT: andl $-65, %edi
-; CHECK-NEXT: subl %eax, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sub_known_nonzero_ne_case:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: orl $64, %ecx
+; X86-NEXT: andl $-65, %eax
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sub_known_nonzero_ne_case:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $64, %eax
+; X64-NEXT: andl $-65, %edi
+; X64-NEXT: subl %eax, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 64
%y = and i32 %xx, -65
%z = sub i32 %y, %x
@@ -708,18 +1296,32 @@ define i32 @sub_known_nonzero_ne_case(i32 %xx, i32 %yy) {
}
define i32 @sub_maybe_zero(i32 %x) {
-; CHECK-LABEL: sub_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: orl $64, %eax
-; CHECK-NEXT: subl %edi, %eax
-; CHECK-NEXT: je .LBB40_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB40_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sub_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: orl $64, %eax
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: je .LBB44_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB44_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sub_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $64, %eax
+; X64-NEXT: subl %edi, %eax
+; X64-NEXT: je .LBB44_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB44_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%y = or i32 %x, 64
%z = sub i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -727,34 +1329,60 @@ define i32 @sub_maybe_zero(i32 %x) {
}
define i32 @sub_maybe_zero2(i32 %x) {
-; CHECK-LABEL: sub_maybe_zero2:
-; CHECK: # %bb.0:
-; CHECK-NEXT: negl %edi
-; CHECK-NEXT: je .LBB41_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB41_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sub_maybe_zero2:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: negl %eax
+; X86-NEXT: je .LBB45_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB45_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sub_maybe_zero2:
+; X64: # %bb.0:
+; X64-NEXT: negl %edi
+; X64-NEXT: je .LBB45_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB45_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = sub i32 0, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @mul_known_nonzero_nsw(i32 %x, i32 %yy) {
-; CHECK-LABEL: mul_known_nonzero_nsw:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: imull %edi, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB42_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB42_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: mul_known_nonzero_nsw:
+; X86: # %bb.0:
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB46_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB46_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: mul_known_nonzero_nsw:
+; X64: # %bb.0:
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: imull %edi, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB46_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB46_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = mul nsw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -762,18 +1390,32 @@ define i32 @mul_known_nonzero_nsw(i32 %x, i32 %yy) {
}
define i32 @mul_known_nonzero_nuw(i32 %x, i32 %yy) {
-; CHECK-LABEL: mul_known_nonzero_nuw:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: imull %edi, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB43_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB43_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: mul_known_nonzero_nuw:
+; X86: # %bb.0:
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB47_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB47_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: mul_known_nonzero_nuw:
+; X64: # %bb.0:
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: imull %edi, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB47_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB47_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = mul nuw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -781,36 +1423,63 @@ define i32 @mul_known_nonzero_nuw(i32 %x, i32 %yy) {
}
define i32 @mul_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: mul_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: imull %esi, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB44_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB44_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: mul_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB48_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB48_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: mul_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: imull %esi, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB48_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB48_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = mul nuw nsw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @bitcast_known_nonzero(<2 x i16> %xx) {
-; CHECK-LABEL: bitcast_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; CHECK-NEXT: pslld $23, %xmm0
-; CHECK-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-NEXT: cvttps2dq %xmm0, %xmm0
-; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; CHECK-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: bsfl %eax, %ecx
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: cmovnel %ecx, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: bitcast_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; X86-NEXT: pslld $23, %xmm0
+; X86-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT: cvttps2dq %xmm0, %xmm0
+; X86-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; X86-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT: movd %xmm0, %eax
+; X86-NEXT: bsfl %eax, %ecx
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: cmovnel %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: bitcast_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; X64-NEXT: vpslld $23, %xmm0, %xmm0
+; X64-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vcvttps2dq %xmm0, %xmm0
+; X64-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; X64-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vmovd %xmm0, %eax
+; X64-NEXT: bsfl %eax, %ecx
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: cmovnel %ecx, %eax
+; X64-NEXT: retq
%x = shl nuw nsw <2 x i16> <i16 256, i16 256>, %xx
%z = bitcast <2 x i16> %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -818,49 +1487,83 @@ define i32 @bitcast_known_nonzero(<2 x i16> %xx) {
}
define i32 @bitcast_maybe_zero(<2 x i16> %x) {
-; CHECK-LABEL: bitcast_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB46_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB46_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: bitcast_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movd %xmm0, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB50_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB50_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: bitcast_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: vmovd %xmm0, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB50_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB50_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = bitcast <2 x i16> %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @bitcast_from_float(float %x) {
-; CHECK-LABEL: bitcast_from_float:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB47_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB47_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: bitcast_from_float:
+; X86: # %bb.0:
+; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: movd %xmm0, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB51_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB51_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: bitcast_from_float:
+; X64: # %bb.0:
+; X64-NEXT: vmovd %xmm0, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB51_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB51_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = bitcast float %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @zext_known_nonzero(i16 %xx) {
-; CHECK-LABEL: zext_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $256, %eax # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: zext_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: movzwl %ax, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: zext_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $256, %eax # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = shl nuw nsw i16 256, %xx
%z = zext i16 %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -868,32 +1571,54 @@ define i32 @zext_known_nonzero(i16 %xx) {
}
define i32 @zext_maybe_zero(i16 %x) {
-; CHECK-LABEL: zext_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: testw %di, %di
-; CHECK-NEXT: je .LBB49_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: movzwl %di, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB49_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: zext_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testw %ax, %ax
+; X86-NEXT: je .LBB53_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: movzwl %ax, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB53_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: zext_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: testw %di, %di
+; X64-NEXT: je .LBB53_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: movzwl %di, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB53_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = zext i16 %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @sext_known_nonzero(i16 %xx) {
-; CHECK-LABEL: sext_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $256, %eax # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: cwtl
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sext_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: cwtl
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sext_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $256, %eax # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: cwtl
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = shl nuw nsw i16 256, %xx
%z = sext i16 %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -901,17 +1626,29 @@ define i32 @sext_known_nonzero(i16 %xx) {
}
define i32 @sext_maybe_zero(i16 %x) {
-; CHECK-LABEL: sext_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: testw %di, %di
-; CHECK-NEXT: je .LBB51_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: movswl %di, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB51_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sext_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movswl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB55_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB55_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sext_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: testw %di, %di
+; X64-NEXT: je .LBB55_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: movswl %di, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB55_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = sext i16 %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
diff --git a/llvm/test/CodeGen/X86/late-remat-update.mir b/llvm/test/CodeGen/X86/late-remat-update.mir
index 84a78f8..dd4e99c 100644
--- a/llvm/test/CodeGen/X86/late-remat-update.mir
+++ b/llvm/test/CodeGen/X86/late-remat-update.mir
@@ -66,6 +66,7 @@ registers:
liveins:
- { reg: '$edi', virtual-reg: '%0' }
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0.entry:
diff --git a/llvm/test/CodeGen/X86/limit-split-cost.mir b/llvm/test/CodeGen/X86/limit-split-cost.mir
index 6f5329e..7ec0404 100644
--- a/llvm/test/CodeGen/X86/limit-split-cost.mir
+++ b/llvm/test/CodeGen/X86/limit-split-cost.mir
@@ -86,6 +86,7 @@ registers:
liveins:
- { reg: '$edi', virtual-reg: '%0' }
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0.entry:
diff --git a/llvm/test/CodeGen/X86/masked_store.ll b/llvm/test/CodeGen/X86/masked_store.ll
index 898b34e..6aa0a81 100644
--- a/llvm/test/CodeGen/X86/masked_store.ll
+++ b/llvm/test/CodeGen/X86/masked_store.ll
@@ -12,7 +12,7 @@
; vXf64
;
-define void @store_v1f64_v1i64(<1 x i64> %trigger, ptr %addr, <1 x double> %val) {
+define void @store_v1f64_v1i64(<1 x i64> %trigger, ptr %addr, <1 x double> %val) nounwind {
; SSE-LABEL: store_v1f64_v1i64:
; SSE: ## %bb.0:
; SSE-NEXT: testq %rdi, %rdi
@@ -46,7 +46,7 @@ define void @store_v1f64_v1i64(<1 x i64> %trigger, ptr %addr, <1 x double> %val)
ret void
}
-define void @store_v2f64_v2i64(<2 x i64> %trigger, ptr %addr, <2 x double> %val) {
+define void @store_v2f64_v2i64(<2 x i64> %trigger, ptr %addr, <2 x double> %val) nounwind {
; SSE-LABEL: store_v2f64_v2i64:
; SSE: ## %bb.0:
; SSE-NEXT: movmskpd %xmm0, %eax
@@ -106,7 +106,7 @@ define void @store_v2f64_v2i64(<2 x i64> %trigger, ptr %addr, <2 x double> %val)
ret void
}
-define void @store_v4f64_v4i64(<4 x i64> %trigger, ptr %addr, <4 x double> %val) {
+define void @store_v4f64_v4i64(<4 x i64> %trigger, ptr %addr, <4 x double> %val) nounwind {
; SSE2-LABEL: store_v4f64_v4i64:
; SSE2: ## %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
@@ -222,7 +222,7 @@ define void @store_v4f64_v4i64(<4 x i64> %trigger, ptr %addr, <4 x double> %val)
; vXf32
;
-define void @store_v2f32_v2i32(<2 x i32> %trigger, ptr %addr, <2 x float> %val) {
+define void @store_v2f32_v2i32(<2 x i32> %trigger, ptr %addr, <2 x float> %val) nounwind {
; SSE2-LABEL: store_v2f32_v2i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -314,7 +314,7 @@ define void @store_v2f32_v2i32(<2 x i32> %trigger, ptr %addr, <2 x float> %val)
ret void
}
-define void @store_v4f32_v4i32(<4 x float> %x, ptr %ptr, <4 x float> %y, <4 x i32> %mask) {
+define void @store_v4f32_v4i32(<4 x float> %x, ptr %ptr, <4 x float> %y, <4 x i32> %mask) nounwind {
; SSE2-LABEL: store_v4f32_v4i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: movmskps %xmm2, %eax
@@ -425,7 +425,7 @@ define void @store_v4f32_v4i32(<4 x float> %x, ptr %ptr, <4 x float> %y, <4 x i3
ret void
}
-define void @store_v8f32_v8i32(<8 x float> %x, ptr %ptr, <8 x float> %y, <8 x i32> %mask) {
+define void @store_v8f32_v8i32(<8 x float> %x, ptr %ptr, <8 x float> %y, <8 x i32> %mask) nounwind {
; SSE2-LABEL: store_v8f32_v8i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: packssdw %xmm5, %xmm4
@@ -605,7 +605,7 @@ define void @store_v8f32_v8i32(<8 x float> %x, ptr %ptr, <8 x float> %y, <8 x i3
ret void
}
-define void @store_v16f32_v16i32(<16 x float> %x, ptr %ptr, <16 x float> %y, <16 x i32> %mask) {
+define void @store_v16f32_v16i32(<16 x float> %x, ptr %ptr, <16 x float> %y, <16 x i32> %mask) nounwind {
; SSE2-LABEL: store_v16f32_v16i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm4
@@ -914,7 +914,7 @@ define void @store_v16f32_v16i32(<16 x float> %x, ptr %ptr, <16 x float> %y, <16
; vXi64
;
-define void @store_v2i64_v2i64(<2 x i64> %trigger, ptr %addr, <2 x i64> %val) {
+define void @store_v2i64_v2i64(<2 x i64> %trigger, ptr %addr, <2 x i64> %val) nounwind {
; SSE2-LABEL: store_v2i64_v2i64:
; SSE2: ## %bb.0:
; SSE2-NEXT: movmskpd %xmm0, %eax
@@ -998,7 +998,7 @@ define void @store_v2i64_v2i64(<2 x i64> %trigger, ptr %addr, <2 x i64> %val) {
ret void
}
-define void @store_v4i64_v4i64(<4 x i64> %trigger, ptr %addr, <4 x i64> %val) {
+define void @store_v4i64_v4i64(<4 x i64> %trigger, ptr %addr, <4 x i64> %val) nounwind {
; SSE2-LABEL: store_v4i64_v4i64:
; SSE2: ## %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
@@ -1122,7 +1122,7 @@ define void @store_v4i64_v4i64(<4 x i64> %trigger, ptr %addr, <4 x i64> %val) {
; vXi32
;
-define void @store_v1i32_v1i32(<1 x i32> %trigger, ptr %addr, <1 x i32> %val) {
+define void @store_v1i32_v1i32(<1 x i32> %trigger, ptr %addr, <1 x i32> %val) nounwind {
; SSE-LABEL: store_v1i32_v1i32:
; SSE: ## %bb.0:
; SSE-NEXT: testl %edi, %edi
@@ -1156,7 +1156,7 @@ define void @store_v1i32_v1i32(<1 x i32> %trigger, ptr %addr, <1 x i32> %val) {
ret void
}
-define void @store_v2i32_v2i32(<2 x i32> %trigger, ptr %addr, <2 x i32> %val) {
+define void @store_v2i32_v2i32(<2 x i32> %trigger, ptr %addr, <2 x i32> %val) nounwind {
; SSE2-LABEL: store_v2i32_v2i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -1256,7 +1256,7 @@ define void @store_v2i32_v2i32(<2 x i32> %trigger, ptr %addr, <2 x i32> %val) {
ret void
}
-define void @store_v4i32_v4i32(<4 x i32> %trigger, ptr %addr, <4 x i32> %val) {
+define void @store_v4i32_v4i32(<4 x i32> %trigger, ptr %addr, <4 x i32> %val) nounwind {
; SSE2-LABEL: store_v4i32_v4i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: pxor %xmm2, %xmm2
@@ -1370,7 +1370,7 @@ define void @store_v4i32_v4i32(<4 x i32> %trigger, ptr %addr, <4 x i32> %val) {
ret void
}
-define void @store_v8i32_v8i32(<8 x i32> %trigger, ptr %addr, <8 x i32> %val) {
+define void @store_v8i32_v8i32(<8 x i32> %trigger, ptr %addr, <8 x i32> %val) nounwind {
; SSE2-LABEL: store_v8i32_v8i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: pxor %xmm4, %xmm4
@@ -1560,7 +1560,7 @@ define void @store_v8i32_v8i32(<8 x i32> %trigger, ptr %addr, <8 x i32> %val) {
; vXi16
;
-define void @store_v8i16_v8i16(<8 x i16> %trigger, ptr %addr, <8 x i16> %val) {
+define void @store_v8i16_v8i16(<8 x i16> %trigger, ptr %addr, <8 x i16> %val) nounwind {
; SSE2-LABEL: store_v8i16_v8i16:
; SSE2: ## %bb.0:
; SSE2-NEXT: pxor %xmm2, %xmm2
@@ -1907,7 +1907,7 @@ define void @store_v8i16_v8i16(<8 x i16> %trigger, ptr %addr, <8 x i16> %val) {
ret void
}
-define void @store_v16i16_v16i16(<16 x i16> %trigger, ptr %addr, <16 x i16> %val) {
+define void @store_v16i16_v16i16(<16 x i16> %trigger, ptr %addr, <16 x i16> %val) nounwind {
; SSE2-LABEL: store_v16i16_v16i16:
; SSE2: ## %bb.0:
; SSE2-NEXT: pxor %xmm4, %xmm4
@@ -2676,7 +2676,7 @@ define void @store_v16i16_v16i16(<16 x i16> %trigger, ptr %addr, <16 x i16> %val
; vXi8
;
-define void @store_v16i8_v16i8(<16 x i8> %trigger, ptr %addr, <16 x i8> %val) {
+define void @store_v16i8_v16i8(<16 x i8> %trigger, ptr %addr, <16 x i8> %val) nounwind {
; SSE2-LABEL: store_v16i8_v16i8:
; SSE2: ## %bb.0:
; SSE2-NEXT: pxor %xmm2, %xmm2
@@ -3273,7 +3273,7 @@ define void @store_v16i8_v16i8(<16 x i8> %trigger, ptr %addr, <16 x i8> %val) {
ret void
}
-define void @store_v32i8_v32i8(<32 x i8> %trigger, ptr %addr, <32 x i8> %val) {
+define void @store_v32i8_v32i8(<32 x i8> %trigger, ptr %addr, <32 x i8> %val) nounwind {
; SSE2-LABEL: store_v32i8_v32i8:
; SSE2: ## %bb.0:
; SSE2-NEXT: pxor %xmm4, %xmm4
@@ -4670,7 +4670,7 @@ define void @store_v32i8_v32i8(<32 x i8> %trigger, ptr %addr, <32 x i8> %val) {
;;; Stores with Constant Masks
-define void @mstore_constmask_v4i32_v4i32(<4 x i32> %trigger, ptr %addr, <4 x i32> %val) {
+define void @mstore_constmask_v4i32_v4i32(<4 x i32> %trigger, ptr %addr, <4 x i32> %val) nounwind {
; SSE-LABEL: mstore_constmask_v4i32_v4i32:
; SSE: ## %bb.0:
; SSE-NEXT: movups %xmm1, (%rdi)
@@ -4693,7 +4693,7 @@ define void @mstore_constmask_v4i32_v4i32(<4 x i32> %trigger, ptr %addr, <4 x i3
; Make sure we are able to detect all ones constant mask after type legalization
; to avoid masked stores.
-define void @mstore_constmask_allones_split(<16 x i64> %trigger, ptr %addr, <16 x i64> %val) {
+define void @mstore_constmask_allones_split(<16 x i64> %trigger, ptr %addr, <16 x i64> %val) nounwind {
; SSE2-LABEL: mstore_constmask_allones_split:
; SSE2: ## %bb.0:
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm0
@@ -4810,7 +4810,7 @@ define void @mstore_constmask_allones_split(<16 x i64> %trigger, ptr %addr, <16
; When only one element of the mask is set, reduce to a scalar store.
-define void @one_mask_bit_set1(ptr %addr, <4 x i32> %val) {
+define void @one_mask_bit_set1(ptr %addr, <4 x i32> %val) nounwind {
; SSE-LABEL: one_mask_bit_set1:
; SSE: ## %bb.0:
; SSE-NEXT: movss %xmm0, (%rdi)
@@ -4832,7 +4832,7 @@ define void @one_mask_bit_set1(ptr %addr, <4 x i32> %val) {
; Choose a different element to show that the correct address offset is produced.
-define void @one_mask_bit_set2(ptr %addr, <4 x float> %val) {
+define void @one_mask_bit_set2(ptr %addr, <4 x float> %val) nounwind {
; SSE2-LABEL: one_mask_bit_set2:
; SSE2: ## %bb.0:
; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
@@ -4860,7 +4860,7 @@ define void @one_mask_bit_set2(ptr %addr, <4 x float> %val) {
; Choose a different scalar type and a high element of a 256-bit vector because AVX doesn't support those evenly.
-define void @one_mask_bit_set3(ptr %addr, <4 x i64> %val) {
+define void @one_mask_bit_set3(ptr %addr, <4 x i64> %val) nounwind {
; SSE-LABEL: one_mask_bit_set3:
; SSE: ## %bb.0:
; SSE-NEXT: movlps %xmm1, 16(%rdi)
@@ -4886,7 +4886,7 @@ define void @one_mask_bit_set3(ptr %addr, <4 x i64> %val) {
; Choose a different scalar type and a high element of a 256-bit vector because AVX doesn't support those evenly.
-define void @one_mask_bit_set4(ptr %addr, <4 x double> %val) {
+define void @one_mask_bit_set4(ptr %addr, <4 x double> %val) nounwind {
; SSE-LABEL: one_mask_bit_set4:
; SSE: ## %bb.0:
; SSE-NEXT: movhps %xmm1, 24(%rdi)
@@ -4912,7 +4912,7 @@ define void @one_mask_bit_set4(ptr %addr, <4 x double> %val) {
; Try a 512-bit vector to make sure AVX doesn't die and AVX512 works as expected.
-define void @one_mask_bit_set5(ptr %addr, <8 x double> %val) {
+define void @one_mask_bit_set5(ptr %addr, <8 x double> %val) nounwind {
; SSE-LABEL: one_mask_bit_set5:
; SSE: ## %bb.0:
; SSE-NEXT: movlps %xmm3, 48(%rdi)
@@ -4944,7 +4944,7 @@ define void @one_mask_bit_set5(ptr %addr, <8 x double> %val) {
}
; Try one elt in each half of a vector that needs to split
-define void @one_mask_bit_set6(ptr %addr, <16 x i64> %val) {
+define void @one_mask_bit_set6(ptr %addr, <16 x i64> %val) nounwind {
; SSE2-LABEL: one_mask_bit_set6:
; SSE2: ## %bb.0:
; SSE2-NEXT: movlps %xmm3, 48(%rdi)
@@ -4999,7 +4999,7 @@ define void @one_mask_bit_set6(ptr %addr, <16 x i64> %val) {
ret void
}
-define void @top_bits_unset_stack() {
+define void @top_bits_unset_stack() nounwind {
; SSE-LABEL: top_bits_unset_stack:
; SSE: ## %bb.0: ## %entry
; SSE-NEXT: xorps %xmm0, %xmm0
@@ -5047,7 +5047,6 @@ define void @top_bits_unset_stack() {
; X86-AVX512-LABEL: top_bits_unset_stack:
; X86-AVX512: ## %bb.0: ## %entry
; X86-AVX512-NEXT: subl $76, %esp
-; X86-AVX512-NEXT: .cfi_def_cfa_offset 80
; X86-AVX512-NEXT: vxorpd %xmm0, %xmm0, %xmm0
; X86-AVX512-NEXT: movb $63, %al
; X86-AVX512-NEXT: kmovd %eax, %k1
@@ -5064,7 +5063,7 @@ entry:
; SimplifyDemandedBits eliminates an ashr here.
-define void @masked_store_bool_mask_demand_trunc_sext(<4 x double> %x, ptr %p, <4 x i32> %masksrc) {
+define void @masked_store_bool_mask_demand_trunc_sext(<4 x double> %x, ptr %p, <4 x i32> %masksrc) nounwind {
; SSE-LABEL: masked_store_bool_mask_demand_trunc_sext:
; SSE: ## %bb.0:
; SSE-NEXT: pslld $31, %xmm2
@@ -5160,7 +5159,7 @@ define void @masked_store_bool_mask_demand_trunc_sext(<4 x double> %x, ptr %p, <
; PR26697
-define void @one_mask_bit_set1_variable(ptr %addr, <4 x float> %val, <4 x i32> %mask) {
+define void @one_mask_bit_set1_variable(ptr %addr, <4 x float> %val, <4 x i32> %mask) nounwind {
; SSE2-LABEL: one_mask_bit_set1_variable:
; SSE2: ## %bb.0:
; SSE2-NEXT: movmskps %xmm1, %eax
@@ -5267,7 +5266,7 @@ define void @one_mask_bit_set1_variable(ptr %addr, <4 x float> %val, <4 x i32> %
; This needs to be widened to v4i32.
; This used to assert in type legalization. PR38436
; FIXME: The codegen for AVX512 should use KSHIFT to zero the upper bits of the mask.
-define void @widen_masked_store(<3 x i32> %v, ptr %p, <3 x i1> %mask) {
+define void @widen_masked_store(<3 x i32> %v, ptr %p, <3 x i1> %mask) nounwind {
; SSE2-LABEL: widen_masked_store:
; SSE2: ## %bb.0:
; SSE2-NEXT: andb $1, %sil
@@ -5448,7 +5447,7 @@ define void @widen_masked_store(<3 x i32> %v, ptr %p, <3 x i1> %mask) {
ret void
}
-define void @zero_mask(ptr %addr, <2 x double> %val) {
+define void @zero_mask(ptr %addr, <2 x double> %val) nounwind {
; SSE-LABEL: zero_mask:
; SSE: ## %bb.0:
; SSE-NEXT: retq
@@ -5464,7 +5463,7 @@ define void @zero_mask(ptr %addr, <2 x double> %val) {
ret void
}
-define void @PR11210(<4 x float> %x, ptr %ptr, <4 x float> %y, <2 x i64> %mask) {
+define void @PR11210(<4 x float> %x, ptr %ptr, <4 x float> %y, <2 x i64> %mask) nounwind {
; SSE2-LABEL: PR11210:
; SSE2: ## %bb.0:
; SSE2-NEXT: movmskps %xmm2, %eax
@@ -5638,492 +5637,248 @@ define void @PR11210(<4 x float> %x, ptr %ptr, <4 x float> %y, <2 x i64> %mask)
ret void
}
-define void @store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts(ptr %trigger.ptr, ptr %val.ptr, ptr %dst) {
-; SSE2-LABEL: store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts:
-; SSE2: ## %bb.0:
-; SSE2-NEXT: movdqa (%rdi), %xmm6
-; SSE2-NEXT: movdqa 32(%rdi), %xmm7
-; SSE2-NEXT: movdqa 64(%rdi), %xmm8
-; SSE2-NEXT: movl 80(%rsi), %eax
-; SSE2-NEXT: movl 64(%rsi), %r8d
-; SSE2-NEXT: movl 48(%rsi), %r9d
-; SSE2-NEXT: movl 32(%rsi), %r10d
-; SSE2-NEXT: movl 16(%rsi), %r11d
-; SSE2-NEXT: movdqa 80(%rsi), %xmm0
-; SSE2-NEXT: movdqa 64(%rsi), %xmm1
-; SSE2-NEXT: movdqa 48(%rsi), %xmm2
-; SSE2-NEXT: movdqa 32(%rsi), %xmm3
-; SSE2-NEXT: movdqa 16(%rsi), %xmm4
-; SSE2-NEXT: movdqa (%rsi), %xmm5
-; SSE2-NEXT: packssdw 48(%rdi), %xmm7
-; SSE2-NEXT: packssdw 16(%rdi), %xmm6
-; SSE2-NEXT: packsswb %xmm7, %xmm6
-; SSE2-NEXT: packssdw 80(%rdi), %xmm8
-; SSE2-NEXT: packsswb %xmm8, %xmm8
-; SSE2-NEXT: pmovmskb %xmm6, %edi
-; SSE2-NEXT: andl $21845, %edi ## imm = 0x5555
-; SSE2-NEXT: pmovmskb %xmm8, %ecx
-; SSE2-NEXT: andl $85, %ecx
-; SSE2-NEXT: shll $16, %ecx
-; SSE2-NEXT: orl %edi, %ecx
-; SSE2-NEXT: testb $1, %cl
-; SSE2-NEXT: jne LBB31_1
-; SSE2-NEXT: ## %bb.2: ## %else
-; SSE2-NEXT: testb $2, %cl
-; SSE2-NEXT: jne LBB31_3
-; SSE2-NEXT: LBB31_4: ## %else2
-; SSE2-NEXT: testb $4, %cl
-; SSE2-NEXT: jne LBB31_5
-; SSE2-NEXT: LBB31_6: ## %else4
-; SSE2-NEXT: testb $8, %cl
-; SSE2-NEXT: jne LBB31_7
-; SSE2-NEXT: LBB31_8: ## %else6
-; SSE2-NEXT: testb $16, %cl
-; SSE2-NEXT: jne LBB31_9
-; SSE2-NEXT: LBB31_10: ## %else8
-; SSE2-NEXT: testb $32, %cl
-; SSE2-NEXT: jne LBB31_11
-; SSE2-NEXT: LBB31_12: ## %else10
-; SSE2-NEXT: testb $64, %cl
-; SSE2-NEXT: jne LBB31_13
-; SSE2-NEXT: LBB31_14: ## %else12
-; SSE2-NEXT: testb %cl, %cl
-; SSE2-NEXT: js LBB31_15
-; SSE2-NEXT: LBB31_16: ## %else14
-; SSE2-NEXT: testl $256, %ecx ## imm = 0x100
-; SSE2-NEXT: jne LBB31_17
-; SSE2-NEXT: LBB31_18: ## %else16
-; SSE2-NEXT: testl $512, %ecx ## imm = 0x200
-; SSE2-NEXT: jne LBB31_19
-; SSE2-NEXT: LBB31_20: ## %else18
-; SSE2-NEXT: testl $1024, %ecx ## imm = 0x400
-; SSE2-NEXT: jne LBB31_21
-; SSE2-NEXT: LBB31_22: ## %else20
-; SSE2-NEXT: testl $2048, %ecx ## imm = 0x800
-; SSE2-NEXT: jne LBB31_23
-; SSE2-NEXT: LBB31_24: ## %else22
-; SSE2-NEXT: testl $4096, %ecx ## imm = 0x1000
-; SSE2-NEXT: jne LBB31_25
-; SSE2-NEXT: LBB31_26: ## %else24
-; SSE2-NEXT: testl $8192, %ecx ## imm = 0x2000
-; SSE2-NEXT: jne LBB31_27
-; SSE2-NEXT: LBB31_28: ## %else26
-; SSE2-NEXT: testl $16384, %ecx ## imm = 0x4000
-; SSE2-NEXT: jne LBB31_29
-; SSE2-NEXT: LBB31_30: ## %else28
-; SSE2-NEXT: testw %cx, %cx
-; SSE2-NEXT: js LBB31_31
-; SSE2-NEXT: LBB31_32: ## %else30
-; SSE2-NEXT: testl $65536, %ecx ## imm = 0x10000
-; SSE2-NEXT: jne LBB31_33
-; SSE2-NEXT: LBB31_34: ## %else32
-; SSE2-NEXT: testl $131072, %ecx ## imm = 0x20000
-; SSE2-NEXT: jne LBB31_35
-; SSE2-NEXT: LBB31_36: ## %else34
-; SSE2-NEXT: testl $262144, %ecx ## imm = 0x40000
-; SSE2-NEXT: jne LBB31_37
-; SSE2-NEXT: LBB31_38: ## %else36
-; SSE2-NEXT: testl $524288, %ecx ## imm = 0x80000
-; SSE2-NEXT: jne LBB31_39
-; SSE2-NEXT: LBB31_40: ## %else38
-; SSE2-NEXT: testl $1048576, %ecx ## imm = 0x100000
-; SSE2-NEXT: jne LBB31_41
-; SSE2-NEXT: LBB31_42: ## %else40
-; SSE2-NEXT: testl $2097152, %ecx ## imm = 0x200000
-; SSE2-NEXT: jne LBB31_43
-; SSE2-NEXT: LBB31_44: ## %else42
-; SSE2-NEXT: testl $4194304, %ecx ## imm = 0x400000
-; SSE2-NEXT: je LBB31_46
-; SSE2-NEXT: LBB31_45: ## %cond.store43
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: movl %eax, 88(%rdx)
-; SSE2-NEXT: LBB31_46: ## %else44
-; SSE2-NEXT: movb $1, %al
-; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne LBB31_48
-; SSE2-NEXT: ## %bb.47: ## %cond.store45
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: movl %eax, 92(%rdx)
-; SSE2-NEXT: LBB31_48: ## %else46
-; SSE2-NEXT: retq
-; SSE2-NEXT: LBB31_1: ## %cond.store
-; SSE2-NEXT: movl (%rsi), %esi
-; SSE2-NEXT: movl %esi, (%rdx)
-; SSE2-NEXT: testb $2, %cl
-; SSE2-NEXT: je LBB31_4
-; SSE2-NEXT: LBB31_3: ## %cond.store1
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,1,1]
-; SSE2-NEXT: movd %xmm6, %esi
-; SSE2-NEXT: movl %esi, 4(%rdx)
-; SSE2-NEXT: testb $4, %cl
-; SSE2-NEXT: je LBB31_6
-; SSE2-NEXT: LBB31_5: ## %cond.store3
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[2,3,2,3]
-; SSE2-NEXT: movd %xmm6, %esi
-; SSE2-NEXT: movl %esi, 8(%rdx)
-; SSE2-NEXT: testb $8, %cl
-; SSE2-NEXT: je LBB31_8
-; SSE2-NEXT: LBB31_7: ## %cond.store5
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[3,3,3,3]
-; SSE2-NEXT: movd %xmm5, %esi
-; SSE2-NEXT: movl %esi, 12(%rdx)
-; SSE2-NEXT: testb $16, %cl
-; SSE2-NEXT: je LBB31_10
-; SSE2-NEXT: LBB31_9: ## %cond.store7
-; SSE2-NEXT: movl %r11d, 16(%rdx)
-; SSE2-NEXT: testb $32, %cl
-; SSE2-NEXT: je LBB31_12
-; SSE2-NEXT: LBB31_11: ## %cond.store9
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,1,1,1]
-; SSE2-NEXT: movd %xmm5, %esi
-; SSE2-NEXT: movl %esi, 20(%rdx)
-; SSE2-NEXT: testb $64, %cl
-; SSE2-NEXT: je LBB31_14
-; SSE2-NEXT: LBB31_13: ## %cond.store11
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
-; SSE2-NEXT: movd %xmm5, %esi
-; SSE2-NEXT: movl %esi, 24(%rdx)
-; SSE2-NEXT: testb %cl, %cl
-; SSE2-NEXT: jns LBB31_16
-; SSE2-NEXT: LBB31_15: ## %cond.store13
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[3,3,3,3]
-; SSE2-NEXT: movd %xmm4, %esi
-; SSE2-NEXT: movl %esi, 28(%rdx)
-; SSE2-NEXT: testl $256, %ecx ## imm = 0x100
-; SSE2-NEXT: je LBB31_18
-; SSE2-NEXT: LBB31_17: ## %cond.store15
-; SSE2-NEXT: movl %r10d, 32(%rdx)
-; SSE2-NEXT: testl $512, %ecx ## imm = 0x200
-; SSE2-NEXT: je LBB31_20
-; SSE2-NEXT: LBB31_19: ## %cond.store17
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,1,1]
-; SSE2-NEXT: movd %xmm4, %esi
-; SSE2-NEXT: movl %esi, 36(%rdx)
-; SSE2-NEXT: testl $1024, %ecx ## imm = 0x400
-; SSE2-NEXT: je LBB31_22
-; SSE2-NEXT: LBB31_21: ## %cond.store19
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,3,2,3]
-; SSE2-NEXT: movd %xmm4, %esi
-; SSE2-NEXT: movl %esi, 40(%rdx)
-; SSE2-NEXT: testl $2048, %ecx ## imm = 0x800
-; SSE2-NEXT: je LBB31_24
-; SSE2-NEXT: LBB31_23: ## %cond.store21
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
-; SSE2-NEXT: movd %xmm3, %esi
-; SSE2-NEXT: movl %esi, 44(%rdx)
-; SSE2-NEXT: testl $4096, %ecx ## imm = 0x1000
-; SSE2-NEXT: je LBB31_26
-; SSE2-NEXT: LBB31_25: ## %cond.store23
-; SSE2-NEXT: movl %r9d, 48(%rdx)
-; SSE2-NEXT: testl $8192, %ecx ## imm = 0x2000
-; SSE2-NEXT: je LBB31_28
-; SSE2-NEXT: LBB31_27: ## %cond.store25
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,1,1]
-; SSE2-NEXT: movd %xmm3, %esi
-; SSE2-NEXT: movl %esi, 52(%rdx)
-; SSE2-NEXT: testl $16384, %ecx ## imm = 0x4000
-; SSE2-NEXT: je LBB31_30
-; SSE2-NEXT: LBB31_29: ## %cond.store27
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
-; SSE2-NEXT: movd %xmm3, %esi
-; SSE2-NEXT: movl %esi, 56(%rdx)
-; SSE2-NEXT: testw %cx, %cx
-; SSE2-NEXT: jns LBB31_32
-; SSE2-NEXT: LBB31_31: ## %cond.store29
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
-; SSE2-NEXT: movd %xmm2, %esi
-; SSE2-NEXT: movl %esi, 60(%rdx)
-; SSE2-NEXT: testl $65536, %ecx ## imm = 0x10000
-; SSE2-NEXT: je LBB31_34
-; SSE2-NEXT: LBB31_33: ## %cond.store31
-; SSE2-NEXT: movl %r8d, 64(%rdx)
-; SSE2-NEXT: testl $131072, %ecx ## imm = 0x20000
-; SSE2-NEXT: je LBB31_36
-; SSE2-NEXT: LBB31_35: ## %cond.store33
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
-; SSE2-NEXT: movd %xmm2, %esi
-; SSE2-NEXT: movl %esi, 68(%rdx)
-; SSE2-NEXT: testl $262144, %ecx ## imm = 0x40000
-; SSE2-NEXT: je LBB31_38
-; SSE2-NEXT: LBB31_37: ## %cond.store35
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; SSE2-NEXT: movd %xmm2, %esi
-; SSE2-NEXT: movl %esi, 72(%rdx)
-; SSE2-NEXT: testl $524288, %ecx ## imm = 0x80000
-; SSE2-NEXT: je LBB31_40
-; SSE2-NEXT: LBB31_39: ## %cond.store37
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
-; SSE2-NEXT: movd %xmm1, %esi
-; SSE2-NEXT: movl %esi, 76(%rdx)
-; SSE2-NEXT: testl $1048576, %ecx ## imm = 0x100000
-; SSE2-NEXT: je LBB31_42
-; SSE2-NEXT: LBB31_41: ## %cond.store39
-; SSE2-NEXT: movl %eax, 80(%rdx)
-; SSE2-NEXT: testl $2097152, %ecx ## imm = 0x200000
-; SSE2-NEXT: je LBB31_44
-; SSE2-NEXT: LBB31_43: ## %cond.store41
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: movl %eax, 84(%rdx)
-; SSE2-NEXT: testl $4194304, %ecx ## imm = 0x400000
-; SSE2-NEXT: jne LBB31_45
-; SSE2-NEXT: jmp LBB31_46
-;
-; SSE4-LABEL: store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts:
-; SSE4: ## %bb.0:
-; SSE4-NEXT: pushq %rbp
-; SSE4-NEXT: .cfi_def_cfa_offset 16
-; SSE4-NEXT: pushq %r15
-; SSE4-NEXT: .cfi_def_cfa_offset 24
-; SSE4-NEXT: pushq %r14
-; SSE4-NEXT: .cfi_def_cfa_offset 32
-; SSE4-NEXT: pushq %r13
-; SSE4-NEXT: .cfi_def_cfa_offset 40
-; SSE4-NEXT: pushq %r12
-; SSE4-NEXT: .cfi_def_cfa_offset 48
-; SSE4-NEXT: pushq %rbx
-; SSE4-NEXT: .cfi_def_cfa_offset 56
-; SSE4-NEXT: .cfi_offset %rbx, -56
-; SSE4-NEXT: .cfi_offset %r12, -48
-; SSE4-NEXT: .cfi_offset %r13, -40
-; SSE4-NEXT: .cfi_offset %r14, -32
-; SSE4-NEXT: .cfi_offset %r15, -24
-; SSE4-NEXT: .cfi_offset %rbp, -16
-; SSE4-NEXT: movdqa (%rdi), %xmm1
-; SSE4-NEXT: movdqa 32(%rdi), %xmm2
-; SSE4-NEXT: movdqa 64(%rdi), %xmm0
-; SSE4-NEXT: movl 92(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 88(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 84(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 80(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 76(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 72(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 68(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 64(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 60(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 56(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 52(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: packssdw 48(%rdi), %xmm2
-; SSE4-NEXT: packssdw 16(%rdi), %xmm1
-; SSE4-NEXT: packsswb %xmm2, %xmm1
-; SSE4-NEXT: packssdw 80(%rdi), %xmm0
-; SSE4-NEXT: packsswb %xmm0, %xmm0
-; SSE4-NEXT: pmovmskb %xmm1, %eax
-; SSE4-NEXT: andl $21845, %eax ## imm = 0x5555
-; SSE4-NEXT: pmovmskb %xmm0, %edi
-; SSE4-NEXT: andl $85, %edi
-; SSE4-NEXT: shll $16, %edi
-; SSE4-NEXT: orl %eax, %edi
-; SSE4-NEXT: movl 48(%rsi), %r13d
-; SSE4-NEXT: testb $1, %dil
-; SSE4-NEXT: movl 44(%rsi), %eax
-; SSE4-NEXT: movl 40(%rsi), %ecx
-; SSE4-NEXT: movl 36(%rsi), %r8d
-; SSE4-NEXT: movl 32(%rsi), %r9d
-; SSE4-NEXT: movl 28(%rsi), %r10d
-; SSE4-NEXT: movl 24(%rsi), %r11d
-; SSE4-NEXT: movl 20(%rsi), %ebx
-; SSE4-NEXT: movl 16(%rsi), %ebp
-; SSE4-NEXT: movl 12(%rsi), %r14d
-; SSE4-NEXT: movl 8(%rsi), %r15d
-; SSE4-NEXT: movl 4(%rsi), %r12d
-; SSE4-NEXT: jne LBB31_1
-; SSE4-NEXT: ## %bb.2: ## %else
-; SSE4-NEXT: testb $2, %dil
-; SSE4-NEXT: jne LBB31_3
-; SSE4-NEXT: LBB31_4: ## %else2
-; SSE4-NEXT: testb $4, %dil
-; SSE4-NEXT: jne LBB31_5
-; SSE4-NEXT: LBB31_6: ## %else4
-; SSE4-NEXT: testb $8, %dil
-; SSE4-NEXT: jne LBB31_7
-; SSE4-NEXT: LBB31_8: ## %else6
-; SSE4-NEXT: testb $16, %dil
-; SSE4-NEXT: jne LBB31_9
-; SSE4-NEXT: LBB31_10: ## %else8
-; SSE4-NEXT: testb $32, %dil
-; SSE4-NEXT: jne LBB31_11
-; SSE4-NEXT: LBB31_12: ## %else10
-; SSE4-NEXT: testb $64, %dil
-; SSE4-NEXT: jne LBB31_13
-; SSE4-NEXT: LBB31_14: ## %else12
-; SSE4-NEXT: testb %dil, %dil
-; SSE4-NEXT: js LBB31_15
-; SSE4-NEXT: LBB31_16: ## %else14
-; SSE4-NEXT: testl $256, %edi ## imm = 0x100
-; SSE4-NEXT: jne LBB31_17
-; SSE4-NEXT: LBB31_18: ## %else16
-; SSE4-NEXT: testl $512, %edi ## imm = 0x200
-; SSE4-NEXT: jne LBB31_19
-; SSE4-NEXT: LBB31_20: ## %else18
-; SSE4-NEXT: testl $1024, %edi ## imm = 0x400
-; SSE4-NEXT: jne LBB31_21
-; SSE4-NEXT: LBB31_22: ## %else20
-; SSE4-NEXT: testl $2048, %edi ## imm = 0x800
-; SSE4-NEXT: jne LBB31_23
-; SSE4-NEXT: LBB31_24: ## %else22
-; SSE4-NEXT: testl $4096, %edi ## imm = 0x1000
-; SSE4-NEXT: jne LBB31_25
-; SSE4-NEXT: LBB31_26: ## %else24
-; SSE4-NEXT: testl $8192, %edi ## imm = 0x2000
-; SSE4-NEXT: jne LBB31_27
-; SSE4-NEXT: LBB31_28: ## %else26
-; SSE4-NEXT: testl $16384, %edi ## imm = 0x4000
-; SSE4-NEXT: jne LBB31_29
-; SSE4-NEXT: LBB31_30: ## %else28
-; SSE4-NEXT: testw %di, %di
-; SSE4-NEXT: js LBB31_31
-; SSE4-NEXT: LBB31_32: ## %else30
-; SSE4-NEXT: testl $65536, %edi ## imm = 0x10000
-; SSE4-NEXT: jne LBB31_33
-; SSE4-NEXT: LBB31_34: ## %else32
-; SSE4-NEXT: testl $131072, %edi ## imm = 0x20000
-; SSE4-NEXT: jne LBB31_35
-; SSE4-NEXT: LBB31_36: ## %else34
-; SSE4-NEXT: testl $262144, %edi ## imm = 0x40000
-; SSE4-NEXT: jne LBB31_37
-; SSE4-NEXT: LBB31_38: ## %else36
-; SSE4-NEXT: testl $524288, %edi ## imm = 0x80000
-; SSE4-NEXT: jne LBB31_39
-; SSE4-NEXT: LBB31_40: ## %else38
-; SSE4-NEXT: testl $1048576, %edi ## imm = 0x100000
-; SSE4-NEXT: jne LBB31_41
-; SSE4-NEXT: LBB31_42: ## %else40
-; SSE4-NEXT: testl $2097152, %edi ## imm = 0x200000
-; SSE4-NEXT: jne LBB31_43
-; SSE4-NEXT: LBB31_44: ## %else42
-; SSE4-NEXT: testl $4194304, %edi ## imm = 0x400000
-; SSE4-NEXT: je LBB31_46
-; SSE4-NEXT: LBB31_45: ## %cond.store43
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 88(%rdx)
-; SSE4-NEXT: LBB31_46: ## %else44
-; SSE4-NEXT: movb $1, %al
-; SSE4-NEXT: testb %al, %al
-; SSE4-NEXT: jne LBB31_48
-; SSE4-NEXT: ## %bb.47: ## %cond.store45
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 92(%rdx)
-; SSE4-NEXT: LBB31_48: ## %else46
-; SSE4-NEXT: popq %rbx
-; SSE4-NEXT: popq %r12
-; SSE4-NEXT: popq %r13
-; SSE4-NEXT: popq %r14
-; SSE4-NEXT: popq %r15
-; SSE4-NEXT: popq %rbp
-; SSE4-NEXT: retq
-; SSE4-NEXT: LBB31_1: ## %cond.store
-; SSE4-NEXT: movl (%rsi), %esi
-; SSE4-NEXT: movl %esi, (%rdx)
-; SSE4-NEXT: testb $2, %dil
-; SSE4-NEXT: je LBB31_4
-; SSE4-NEXT: LBB31_3: ## %cond.store1
-; SSE4-NEXT: movl %r12d, 4(%rdx)
-; SSE4-NEXT: testb $4, %dil
-; SSE4-NEXT: je LBB31_6
-; SSE4-NEXT: LBB31_5: ## %cond.store3
-; SSE4-NEXT: movl %r15d, 8(%rdx)
-; SSE4-NEXT: testb $8, %dil
-; SSE4-NEXT: je LBB31_8
-; SSE4-NEXT: LBB31_7: ## %cond.store5
-; SSE4-NEXT: movl %r14d, 12(%rdx)
-; SSE4-NEXT: testb $16, %dil
-; SSE4-NEXT: je LBB31_10
-; SSE4-NEXT: LBB31_9: ## %cond.store7
-; SSE4-NEXT: movl %ebp, 16(%rdx)
-; SSE4-NEXT: testb $32, %dil
-; SSE4-NEXT: je LBB31_12
-; SSE4-NEXT: LBB31_11: ## %cond.store9
-; SSE4-NEXT: movl %ebx, 20(%rdx)
-; SSE4-NEXT: testb $64, %dil
-; SSE4-NEXT: je LBB31_14
-; SSE4-NEXT: LBB31_13: ## %cond.store11
-; SSE4-NEXT: movl %r11d, 24(%rdx)
-; SSE4-NEXT: testb %dil, %dil
-; SSE4-NEXT: jns LBB31_16
-; SSE4-NEXT: LBB31_15: ## %cond.store13
-; SSE4-NEXT: movl %r10d, 28(%rdx)
-; SSE4-NEXT: testl $256, %edi ## imm = 0x100
-; SSE4-NEXT: je LBB31_18
-; SSE4-NEXT: LBB31_17: ## %cond.store15
-; SSE4-NEXT: movl %r9d, 32(%rdx)
-; SSE4-NEXT: testl $512, %edi ## imm = 0x200
-; SSE4-NEXT: je LBB31_20
-; SSE4-NEXT: LBB31_19: ## %cond.store17
-; SSE4-NEXT: movl %r8d, 36(%rdx)
-; SSE4-NEXT: testl $1024, %edi ## imm = 0x400
-; SSE4-NEXT: je LBB31_22
-; SSE4-NEXT: LBB31_21: ## %cond.store19
-; SSE4-NEXT: movl %ecx, 40(%rdx)
-; SSE4-NEXT: testl $2048, %edi ## imm = 0x800
-; SSE4-NEXT: je LBB31_24
-; SSE4-NEXT: LBB31_23: ## %cond.store21
-; SSE4-NEXT: movl %eax, 44(%rdx)
-; SSE4-NEXT: testl $4096, %edi ## imm = 0x1000
-; SSE4-NEXT: je LBB31_26
-; SSE4-NEXT: LBB31_25: ## %cond.store23
-; SSE4-NEXT: movl %r13d, 48(%rdx)
-; SSE4-NEXT: testl $8192, %edi ## imm = 0x2000
-; SSE4-NEXT: je LBB31_28
-; SSE4-NEXT: LBB31_27: ## %cond.store25
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 52(%rdx)
-; SSE4-NEXT: testl $16384, %edi ## imm = 0x4000
-; SSE4-NEXT: je LBB31_30
-; SSE4-NEXT: LBB31_29: ## %cond.store27
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 56(%rdx)
-; SSE4-NEXT: testw %di, %di
-; SSE4-NEXT: jns LBB31_32
-; SSE4-NEXT: LBB31_31: ## %cond.store29
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 60(%rdx)
-; SSE4-NEXT: testl $65536, %edi ## imm = 0x10000
-; SSE4-NEXT: je LBB31_34
-; SSE4-NEXT: LBB31_33: ## %cond.store31
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 64(%rdx)
-; SSE4-NEXT: testl $131072, %edi ## imm = 0x20000
-; SSE4-NEXT: je LBB31_36
-; SSE4-NEXT: LBB31_35: ## %cond.store33
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 68(%rdx)
-; SSE4-NEXT: testl $262144, %edi ## imm = 0x40000
-; SSE4-NEXT: je LBB31_38
-; SSE4-NEXT: LBB31_37: ## %cond.store35
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 72(%rdx)
-; SSE4-NEXT: testl $524288, %edi ## imm = 0x80000
-; SSE4-NEXT: je LBB31_40
-; SSE4-NEXT: LBB31_39: ## %cond.store37
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 76(%rdx)
-; SSE4-NEXT: testl $1048576, %edi ## imm = 0x100000
-; SSE4-NEXT: je LBB31_42
-; SSE4-NEXT: LBB31_41: ## %cond.store39
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 80(%rdx)
-; SSE4-NEXT: testl $2097152, %edi ## imm = 0x200000
-; SSE4-NEXT: je LBB31_44
-; SSE4-NEXT: LBB31_43: ## %cond.store41
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 84(%rdx)
-; SSE4-NEXT: testl $4194304, %edi ## imm = 0x400000
-; SSE4-NEXT: jne LBB31_45
-; SSE4-NEXT: jmp LBB31_46
+define void @store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts(ptr %trigger.ptr, ptr %val.ptr, ptr %dst) nounwind {
+; SSE-LABEL: store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts:
+; SSE: ## %bb.0:
+; SSE-NEXT: pushq %rbp
+; SSE-NEXT: pushq %r15
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %r13
+; SSE-NEXT: pushq %r12
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: movdqa (%rdi), %xmm1
+; SSE-NEXT: movdqa 32(%rdi), %xmm2
+; SSE-NEXT: movdqa 64(%rdi), %xmm0
+; SSE-NEXT: movl 92(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 88(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 84(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 80(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 76(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 72(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 68(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 64(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 60(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 56(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 52(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: packssdw 48(%rdi), %xmm2
+; SSE-NEXT: packssdw 16(%rdi), %xmm1
+; SSE-NEXT: packsswb %xmm2, %xmm1
+; SSE-NEXT: packssdw 80(%rdi), %xmm0
+; SSE-NEXT: packsswb %xmm0, %xmm0
+; SSE-NEXT: pmovmskb %xmm1, %eax
+; SSE-NEXT: andl $21845, %eax ## imm = 0x5555
+; SSE-NEXT: pmovmskb %xmm0, %edi
+; SSE-NEXT: andl $85, %edi
+; SSE-NEXT: shll $16, %edi
+; SSE-NEXT: orl %eax, %edi
+; SSE-NEXT: movl 48(%rsi), %r13d
+; SSE-NEXT: testb $1, %dil
+; SSE-NEXT: movl 44(%rsi), %eax
+; SSE-NEXT: movl 40(%rsi), %ecx
+; SSE-NEXT: movl 36(%rsi), %r8d
+; SSE-NEXT: movl 32(%rsi), %r9d
+; SSE-NEXT: movl 28(%rsi), %r10d
+; SSE-NEXT: movl 24(%rsi), %r11d
+; SSE-NEXT: movl 20(%rsi), %ebx
+; SSE-NEXT: movl 16(%rsi), %ebp
+; SSE-NEXT: movl 12(%rsi), %r14d
+; SSE-NEXT: movl 8(%rsi), %r15d
+; SSE-NEXT: movl 4(%rsi), %r12d
+; SSE-NEXT: jne LBB31_1
+; SSE-NEXT: ## %bb.2: ## %else
+; SSE-NEXT: testb $2, %dil
+; SSE-NEXT: jne LBB31_3
+; SSE-NEXT: LBB31_4: ## %else2
+; SSE-NEXT: testb $4, %dil
+; SSE-NEXT: jne LBB31_5
+; SSE-NEXT: LBB31_6: ## %else4
+; SSE-NEXT: testb $8, %dil
+; SSE-NEXT: jne LBB31_7
+; SSE-NEXT: LBB31_8: ## %else6
+; SSE-NEXT: testb $16, %dil
+; SSE-NEXT: jne LBB31_9
+; SSE-NEXT: LBB31_10: ## %else8
+; SSE-NEXT: testb $32, %dil
+; SSE-NEXT: jne LBB31_11
+; SSE-NEXT: LBB31_12: ## %else10
+; SSE-NEXT: testb $64, %dil
+; SSE-NEXT: jne LBB31_13
+; SSE-NEXT: LBB31_14: ## %else12
+; SSE-NEXT: testb %dil, %dil
+; SSE-NEXT: js LBB31_15
+; SSE-NEXT: LBB31_16: ## %else14
+; SSE-NEXT: testl $256, %edi ## imm = 0x100
+; SSE-NEXT: jne LBB31_17
+; SSE-NEXT: LBB31_18: ## %else16
+; SSE-NEXT: testl $512, %edi ## imm = 0x200
+; SSE-NEXT: jne LBB31_19
+; SSE-NEXT: LBB31_20: ## %else18
+; SSE-NEXT: testl $1024, %edi ## imm = 0x400
+; SSE-NEXT: jne LBB31_21
+; SSE-NEXT: LBB31_22: ## %else20
+; SSE-NEXT: testl $2048, %edi ## imm = 0x800
+; SSE-NEXT: jne LBB31_23
+; SSE-NEXT: LBB31_24: ## %else22
+; SSE-NEXT: testl $4096, %edi ## imm = 0x1000
+; SSE-NEXT: jne LBB31_25
+; SSE-NEXT: LBB31_26: ## %else24
+; SSE-NEXT: testl $8192, %edi ## imm = 0x2000
+; SSE-NEXT: jne LBB31_27
+; SSE-NEXT: LBB31_28: ## %else26
+; SSE-NEXT: testl $16384, %edi ## imm = 0x4000
+; SSE-NEXT: jne LBB31_29
+; SSE-NEXT: LBB31_30: ## %else28
+; SSE-NEXT: testw %di, %di
+; SSE-NEXT: js LBB31_31
+; SSE-NEXT: LBB31_32: ## %else30
+; SSE-NEXT: testl $65536, %edi ## imm = 0x10000
+; SSE-NEXT: jne LBB31_33
+; SSE-NEXT: LBB31_34: ## %else32
+; SSE-NEXT: testl $131072, %edi ## imm = 0x20000
+; SSE-NEXT: jne LBB31_35
+; SSE-NEXT: LBB31_36: ## %else34
+; SSE-NEXT: testl $262144, %edi ## imm = 0x40000
+; SSE-NEXT: jne LBB31_37
+; SSE-NEXT: LBB31_38: ## %else36
+; SSE-NEXT: testl $524288, %edi ## imm = 0x80000
+; SSE-NEXT: jne LBB31_39
+; SSE-NEXT: LBB31_40: ## %else38
+; SSE-NEXT: testl $1048576, %edi ## imm = 0x100000
+; SSE-NEXT: jne LBB31_41
+; SSE-NEXT: LBB31_42: ## %else40
+; SSE-NEXT: testl $2097152, %edi ## imm = 0x200000
+; SSE-NEXT: jne LBB31_43
+; SSE-NEXT: LBB31_44: ## %else42
+; SSE-NEXT: testl $4194304, %edi ## imm = 0x400000
+; SSE-NEXT: je LBB31_46
+; SSE-NEXT: LBB31_45: ## %cond.store43
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 88(%rdx)
+; SSE-NEXT: LBB31_46: ## %else44
+; SSE-NEXT: movb $1, %al
+; SSE-NEXT: testb %al, %al
+; SSE-NEXT: jne LBB31_48
+; SSE-NEXT: ## %bb.47: ## %cond.store45
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 92(%rdx)
+; SSE-NEXT: LBB31_48: ## %else46
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r12
+; SSE-NEXT: popq %r13
+; SSE-NEXT: popq %r14
+; SSE-NEXT: popq %r15
+; SSE-NEXT: popq %rbp
+; SSE-NEXT: retq
+; SSE-NEXT: LBB31_1: ## %cond.store
+; SSE-NEXT: movl (%rsi), %esi
+; SSE-NEXT: movl %esi, (%rdx)
+; SSE-NEXT: testb $2, %dil
+; SSE-NEXT: je LBB31_4
+; SSE-NEXT: LBB31_3: ## %cond.store1
+; SSE-NEXT: movl %r12d, 4(%rdx)
+; SSE-NEXT: testb $4, %dil
+; SSE-NEXT: je LBB31_6
+; SSE-NEXT: LBB31_5: ## %cond.store3
+; SSE-NEXT: movl %r15d, 8(%rdx)
+; SSE-NEXT: testb $8, %dil
+; SSE-NEXT: je LBB31_8
+; SSE-NEXT: LBB31_7: ## %cond.store5
+; SSE-NEXT: movl %r14d, 12(%rdx)
+; SSE-NEXT: testb $16, %dil
+; SSE-NEXT: je LBB31_10
+; SSE-NEXT: LBB31_9: ## %cond.store7
+; SSE-NEXT: movl %ebp, 16(%rdx)
+; SSE-NEXT: testb $32, %dil
+; SSE-NEXT: je LBB31_12
+; SSE-NEXT: LBB31_11: ## %cond.store9
+; SSE-NEXT: movl %ebx, 20(%rdx)
+; SSE-NEXT: testb $64, %dil
+; SSE-NEXT: je LBB31_14
+; SSE-NEXT: LBB31_13: ## %cond.store11
+; SSE-NEXT: movl %r11d, 24(%rdx)
+; SSE-NEXT: testb %dil, %dil
+; SSE-NEXT: jns LBB31_16
+; SSE-NEXT: LBB31_15: ## %cond.store13
+; SSE-NEXT: movl %r10d, 28(%rdx)
+; SSE-NEXT: testl $256, %edi ## imm = 0x100
+; SSE-NEXT: je LBB31_18
+; SSE-NEXT: LBB31_17: ## %cond.store15
+; SSE-NEXT: movl %r9d, 32(%rdx)
+; SSE-NEXT: testl $512, %edi ## imm = 0x200
+; SSE-NEXT: je LBB31_20
+; SSE-NEXT: LBB31_19: ## %cond.store17
+; SSE-NEXT: movl %r8d, 36(%rdx)
+; SSE-NEXT: testl $1024, %edi ## imm = 0x400
+; SSE-NEXT: je LBB31_22
+; SSE-NEXT: LBB31_21: ## %cond.store19
+; SSE-NEXT: movl %ecx, 40(%rdx)
+; SSE-NEXT: testl $2048, %edi ## imm = 0x800
+; SSE-NEXT: je LBB31_24
+; SSE-NEXT: LBB31_23: ## %cond.store21
+; SSE-NEXT: movl %eax, 44(%rdx)
+; SSE-NEXT: testl $4096, %edi ## imm = 0x1000
+; SSE-NEXT: je LBB31_26
+; SSE-NEXT: LBB31_25: ## %cond.store23
+; SSE-NEXT: movl %r13d, 48(%rdx)
+; SSE-NEXT: testl $8192, %edi ## imm = 0x2000
+; SSE-NEXT: je LBB31_28
+; SSE-NEXT: LBB31_27: ## %cond.store25
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 52(%rdx)
+; SSE-NEXT: testl $16384, %edi ## imm = 0x4000
+; SSE-NEXT: je LBB31_30
+; SSE-NEXT: LBB31_29: ## %cond.store27
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 56(%rdx)
+; SSE-NEXT: testw %di, %di
+; SSE-NEXT: jns LBB31_32
+; SSE-NEXT: LBB31_31: ## %cond.store29
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 60(%rdx)
+; SSE-NEXT: testl $65536, %edi ## imm = 0x10000
+; SSE-NEXT: je LBB31_34
+; SSE-NEXT: LBB31_33: ## %cond.store31
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 64(%rdx)
+; SSE-NEXT: testl $131072, %edi ## imm = 0x20000
+; SSE-NEXT: je LBB31_36
+; SSE-NEXT: LBB31_35: ## %cond.store33
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 68(%rdx)
+; SSE-NEXT: testl $262144, %edi ## imm = 0x40000
+; SSE-NEXT: je LBB31_38
+; SSE-NEXT: LBB31_37: ## %cond.store35
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 72(%rdx)
+; SSE-NEXT: testl $524288, %edi ## imm = 0x80000
+; SSE-NEXT: je LBB31_40
+; SSE-NEXT: LBB31_39: ## %cond.store37
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 76(%rdx)
+; SSE-NEXT: testl $1048576, %edi ## imm = 0x100000
+; SSE-NEXT: je LBB31_42
+; SSE-NEXT: LBB31_41: ## %cond.store39
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 80(%rdx)
+; SSE-NEXT: testl $2097152, %edi ## imm = 0x200000
+; SSE-NEXT: je LBB31_44
+; SSE-NEXT: LBB31_43: ## %cond.store41
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 84(%rdx)
+; SSE-NEXT: testl $4194304, %edi ## imm = 0x400000
+; SSE-NEXT: jne LBB31_45
+; SSE-NEXT: jmp LBB31_46
;
; AVX1-LABEL: store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts:
; AVX1: ## %bb.0:
@@ -6266,7 +6021,7 @@ define void @store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts(ptr %trigge
}
; From https://reviews.llvm.org/rGf8d9097168b7#1165311
-define void @undefshuffle(<8 x i1> %i0, ptr %src, ptr %dst) #0 {
+define void @undefshuffle(<8 x i1> %i0, ptr %src, ptr %dst) nounwind {
; SSE2-LABEL: undefshuffle:
; SSE2: ## %bb.0: ## %else
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
diff --git a/llvm/test/CodeGen/X86/oddshuffles.ll b/llvm/test/CodeGen/X86/oddshuffles.ll
index 5da18ee..01056a8 100644
--- a/llvm/test/CodeGen/X86/oddshuffles.ll
+++ b/llvm/test/CodeGen/X86/oddshuffles.ll
@@ -2369,6 +2369,31 @@ define void @PR41097() {
ret void
}
+; FIXME - should use INSERTPS
+define <2 x float> @PR86068(<2 x float> %0, <2 x float> %1) {
+; SSE2-LABEL: PR86068:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[1,1]
+; SSE2-NEXT: retq
+;
+; SSE42-LABEL: PR86068:
+; SSE42: # %bb.0: # %entry
+; SSE42-NEXT: movshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE42-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; SSE42-NEXT: retq
+;
+; AVX-LABEL: PR86068:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; AVX-NEXT: retq
+entry:
+ %3 = shufflevector <2 x float> %1, <2 x float> poison, <2 x i32> <i32 1, i32 poison>
+ %4 = shufflevector <2 x float> %3, <2 x float> %0, <2 x i32> <i32 0, i32 3>
+ ret <2 x float> %4
+}
+
define void @D107009(ptr %input, ptr %output) {
; SSE-LABEL: D107009:
; SSE: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/optimize-max-0.ll b/llvm/test/CodeGen/X86/optimize-max-0.ll
index 1bd427c..81dafdf 100644
--- a/llvm/test/CodeGen/X86/optimize-max-0.ll
+++ b/llvm/test/CodeGen/X86/optimize-max-0.ll
@@ -489,7 +489,6 @@ define void @bar(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
; CHECK-NEXT: jb LBB1_4
; CHECK-NEXT: ## %bb.5: ## %bb9
; CHECK-NEXT: ## in Loop: Header=BB1_4 Depth=1
-; CHECK-NEXT: movl %edi, %ebx
; CHECK-NEXT: incl %ecx
; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: addl %edi, %edx
diff --git a/llvm/test/CodeGen/X86/pr45378.ll b/llvm/test/CodeGen/X86/pr45378.ll
index 426f4ee..6a5770a 100644
--- a/llvm/test/CodeGen/X86/pr45378.ll
+++ b/llvm/test/CodeGen/X86/pr45378.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefix=AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512bw | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512bw | FileCheck %s --check-prefixes=CHECK,AVX
declare i64 @llvm.vector.reduce.or.v2i64(<2 x i64>)
@@ -71,28 +71,12 @@ define i1 @parseHeaders2_scalar_or(ptr %ptr) nounwind {
}
define i1 @parseHeaders2_scalar_and(ptr %ptr) nounwind {
-; SSE2-LABEL: parseHeaders2_scalar_and:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqu (%rdi), %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: testq %rax, (%rdi)
-; SSE2-NEXT: sete %al
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: parseHeaders2_scalar_and:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movq (%rdi), %rax
-; SSE41-NEXT: testq %rax, 8(%rdi)
-; SSE41-NEXT: sete %al
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: parseHeaders2_scalar_and:
-; AVX: # %bb.0:
-; AVX-NEXT: movq (%rdi), %rax
-; AVX-NEXT: testq %rax, 8(%rdi)
-; AVX-NEXT: sete %al
-; AVX-NEXT: retq
+; CHECK-LABEL: parseHeaders2_scalar_and:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: testq %rax, 8(%rdi)
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: retq
%vload = load <2 x i64>, ptr %ptr, align 8
%v1 = extractelement <2 x i64> %vload, i32 0
%v2 = extractelement <2 x i64> %vload, i32 1
diff --git a/llvm/test/CodeGen/X86/pr85681.ll b/llvm/test/CodeGen/X86/pr85681.ll
new file mode 100644
index 0000000..3b27a02
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr85681.ll
@@ -0,0 +1,41 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=emeraldrapids | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v2 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v3 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s
+
+; PR85681 - shift i1/vXi1 X, Y -> X as only Y==0 is defined
+
+define i32 @shl(i32 %a0) {
+; CHECK-LABEL: shl:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl $-1, %eax
+; CHECK-NEXT: retq
+ %v0 = bitcast i32 %a0 to <32 x i1>
+ %s = shl <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, %v0
+ %r = bitcast <32 x i1> %s to i32
+ ret i32 %r
+}
+
+define i32 @lshr(i32 %a0) {
+; CHECK-LABEL: lshr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl $-1, %eax
+; CHECK-NEXT: retq
+ %v0 = bitcast i32 %a0 to <32 x i1>
+ %s = lshr <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, %v0
+ %r = bitcast <32 x i1> %s to i32
+ ret i32 %r
+}
+
+define i32 @ashr(i32 %a0) {
+; CHECK-LABEL: ashr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl $-1, %eax
+; CHECK-NEXT: retq
+ %v0 = bitcast i32 %a0 to <32 x i1>
+ %s = ashr <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, %v0
+ %r = bitcast <32 x i1> %s to i32
+ ret i32 %r
+}
diff --git a/llvm/test/CodeGen/X86/pr86305.ll b/llvm/test/CodeGen/X86/pr86305.ll
new file mode 100644
index 0000000..79b42bb
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr86305.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avx512bf16 | FileCheck %s
+
+define void @add(ptr %pa, ptr %pb, ptr %pc) nounwind {
+; CHECK-LABEL: add:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: movq %rdx, %rbx
+; CHECK-NEXT: movzwl (%rsi), %eax
+; CHECK-NEXT: shll $16, %eax
+; CHECK-NEXT: vmovd %eax, %xmm0
+; CHECK-NEXT: movzwl (%rdi), %eax
+; CHECK-NEXT: shll $16, %eax
+; CHECK-NEXT: vmovd %eax, %xmm1
+; CHECK-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: callq __truncsfbf2@PLT
+; CHECK-NEXT: vpextrw $0, %xmm0, (%rbx)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
+ %a = load bfloat, ptr %pa
+ %b = load bfloat, ptr %pb
+ %add = fadd bfloat %a, %b
+ store bfloat %add, ptr %pc
+ ret void
+}
+
+define <4 x bfloat> @fptrunc_v4f32(<4 x float> %a) nounwind {
+; CHECK-LABEL: fptrunc_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %r14
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: subq $72, %rsp
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: callq __truncsfbf2@PLT
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,0]
+; CHECK-NEXT: callq __truncsfbf2@PLT
+; CHECK-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vpshufd $255, (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[3,3,3,3]
+; CHECK-NEXT: callq __truncsfbf2@PLT
+; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: callq __truncsfbf2@PLT
+; CHECK-NEXT: vpextrw $0, %xmm0, %ebx
+; CHECK-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vpextrw $0, %xmm0, %ebp
+; CHECK-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vpextrw $0, %xmm0, %r14d
+; CHECK-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vpextrw $0, %xmm0, %r15d
+; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
+; CHECK-NEXT: callq __truncsfbf2@PLT
+; CHECK-NEXT: vpextrw $0, %xmm0, %eax
+; CHECK-NEXT: vmovd %r15d, %xmm0
+; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrw $2, %r14d, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrw $3, %ebp, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrw $4, %ebx, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrw $5, %ebx, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrw $6, %ebx, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrw $7, %ebx, %xmm0, %xmm0
+; CHECK-NEXT: addq $72, %rsp
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: popq %r15
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: retq
+ %b = fptrunc <4 x float> %a to <4 x bfloat>
+ ret <4 x bfloat> %b
+}
diff --git a/llvm/test/CodeGen/X86/pr86880.mir b/llvm/test/CodeGen/X86/pr86880.mir
new file mode 100644
index 0000000..92ebf9a
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr86880.mir
@@ -0,0 +1,21 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=x86_64-- -run-pass=machine-cp -o - %s | FileCheck %s
+
+---
+name: foo
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $eax
+
+ ; CHECK-LABEL: name: foo
+ ; CHECK: liveins: $eax
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: INLINEASM &"", 0 /* attdialect */, 10 /* regdef */, implicit-def dead $eax, 2686986 /* regdef:GR32_NOREX2 */, def renamable $r15d, 10 /* regdef */, implicit-def dead $ecx, 10 /* regdef */, implicit-def dead $edx, 2147483657 /* reguse tiedto:$0 */, $eax(tied-def 3)
+ ; CHECK-NEXT: renamable $ecx = COPY killed renamable $r15d
+ ; CHECK-NEXT: NOOP implicit $ecx
+ INLINEASM &"", 0 /* attdialect */, 10 /* regdef */, implicit-def dead $eax, 2686986 /* regdef:GR32_NOREX2 */, def renamable $r15d, 10 /* regdef */, implicit-def dead $ecx, 10 /* regdef */, implicit-def dead $edx, 2147483657 /* reguse tiedto:$0 */, $eax(tied-def 3)
+ renamable $ecx = COPY killed renamable $r15d
+ NOOP implicit $ecx
+
+...
diff --git a/llvm/test/CodeGen/X86/regalloc-copy-hints.mir b/llvm/test/CodeGen/X86/regalloc-copy-hints.mir
index 13b5a54..d09bcd6 100644
--- a/llvm/test/CodeGen/X86/regalloc-copy-hints.mir
+++ b/llvm/test/CodeGen/X86/regalloc-copy-hints.mir
@@ -103,6 +103,7 @@ registers:
- { id: 82, class: gr32 }
frameInfo:
maxAlignment: 4
+ adjustsStack: true
hasCalls: true
fixedStack:
- { id: 0, size: 4, alignment: 4, stack-id: default, isImmutable: true }
diff --git a/llvm/test/CodeGen/X86/sar_fold.ll b/llvm/test/CodeGen/X86/sar_fold.ll
index 21655e1..0f13969 100644
--- a/llvm/test/CodeGen/X86/sar_fold.ll
+++ b/llvm/test/CodeGen/X86/sar_fold.ll
@@ -44,3 +44,44 @@ define i32 @shl24sar25(i32 %a) #0 {
%2 = ashr exact i32 %1, 25
ret i32 %2
}
+
+define void @shl144sar48(ptr %p) #0 {
+; CHECK-LABEL: shl144sar48:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movswl (%eax), %ecx
+; CHECK-NEXT: movl %ecx, %edx
+; CHECK-NEXT: sarl $31, %edx
+; CHECK-NEXT: shldl $2, %ecx, %edx
+; CHECK-NEXT: shll $2, %ecx
+; CHECK-NEXT: movl %ecx, 12(%eax)
+; CHECK-NEXT: movl %edx, 16(%eax)
+; CHECK-NEXT: movl $0, 8(%eax)
+; CHECK-NEXT: movl $0, 4(%eax)
+; CHECK-NEXT: movl $0, (%eax)
+; CHECK-NEXT: retl
+ %a = load i160, ptr %p
+ %1 = shl i160 %a, 144
+ %2 = ashr exact i160 %1, 46
+ store i160 %2, ptr %p
+ ret void
+}
+
+define void @shl144sar2(ptr %p) #0 {
+; CHECK-LABEL: shl144sar2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movswl (%eax), %ecx
+; CHECK-NEXT: shll $14, %ecx
+; CHECK-NEXT: movl %ecx, 16(%eax)
+; CHECK-NEXT: movl $0, 8(%eax)
+; CHECK-NEXT: movl $0, 12(%eax)
+; CHECK-NEXT: movl $0, 4(%eax)
+; CHECK-NEXT: movl $0, (%eax)
+; CHECK-NEXT: retl
+ %a = load i160, ptr %p
+ %1 = shl i160 %a, 144
+ %2 = ashr exact i160 %1, 2
+ store i160 %2, ptr %p
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/setcc-non-simple-type.ll b/llvm/test/CodeGen/X86/setcc-non-simple-type.ll
index 2187c65..97c3c204 100644
--- a/llvm/test/CodeGen/X86/setcc-non-simple-type.ll
+++ b/llvm/test/CodeGen/X86/setcc-non-simple-type.ll
@@ -60,36 +60,30 @@ define void @failing(ptr %0, ptr %1) nounwind {
; CHECK-NEXT: .LBB0_2: # %vector.body
; CHECK-NEXT: # Parent Loop BB0_1 Depth=1
; CHECK-NEXT: # => This Inner Loop Header: Depth=2
-; CHECK-NEXT: movdqu 1024(%rdx,%rdi), %xmm5
-; CHECK-NEXT: movdqu 1040(%rdx,%rdi), %xmm6
-; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
-; CHECK-NEXT: movq %xmm5, %r8
-; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,2,3]
-; CHECK-NEXT: movq %xmm5, %r9
-; CHECK-NEXT: cmpq 1040(%rdx,%rdi), %rsi
-; CHECK-NEXT: movq %rcx, %r10
-; CHECK-NEXT: sbbq %r9, %r10
-; CHECK-NEXT: setge %r9b
-; CHECK-NEXT: movzbl %r9b, %r9d
-; CHECK-NEXT: andl $1, %r9d
-; CHECK-NEXT: negq %r9
-; CHECK-NEXT: movq %r9, %xmm5
; CHECK-NEXT: cmpq 1024(%rdx,%rdi), %rsi
-; CHECK-NEXT: movq %rcx, %r9
-; CHECK-NEXT: sbbq %r8, %r9
+; CHECK-NEXT: movq %rcx, %r8
+; CHECK-NEXT: sbbq 1032(%rdx,%rdi), %r8
+; CHECK-NEXT: setge %r8b
+; CHECK-NEXT: movzbl %r8b, %r8d
+; CHECK-NEXT: andl $1, %r8d
+; CHECK-NEXT: negq %r8
+; CHECK-NEXT: movq %r8, %xmm5
+; CHECK-NEXT: cmpq 1040(%rdx,%rdi), %rsi
+; CHECK-NEXT: movq %rcx, %r8
+; CHECK-NEXT: sbbq 1048(%rdx,%rdi), %r8
; CHECK-NEXT: setge %r8b
; CHECK-NEXT: movzbl %r8b, %r8d
; CHECK-NEXT: andl $1, %r8d
; CHECK-NEXT: negq %r8
; CHECK-NEXT: movq %r8, %xmm6
-; CHECK-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm5[0]
-; CHECK-NEXT: movdqa %xmm1, %xmm5
-; CHECK-NEXT: psllq %xmm4, %xmm5
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0]
+; CHECK-NEXT: movdqa %xmm1, %xmm6
+; CHECK-NEXT: psllq %xmm4, %xmm6
; CHECK-NEXT: pshufd {{.*#+}} xmm7 = xmm4[2,3,2,3]
; CHECK-NEXT: movdqa %xmm1, %xmm8
; CHECK-NEXT: psllq %xmm7, %xmm8
-; CHECK-NEXT: movsd {{.*#+}} xmm8 = xmm5[0],xmm8[1]
-; CHECK-NEXT: andpd %xmm6, %xmm8
+; CHECK-NEXT: movsd {{.*#+}} xmm8 = xmm6[0],xmm8[1]
+; CHECK-NEXT: andpd %xmm5, %xmm8
; CHECK-NEXT: orpd %xmm8, %xmm3
; CHECK-NEXT: paddq %xmm2, %xmm4
; CHECK-NEXT: addq $32, %rdi
diff --git a/llvm/test/CodeGen/X86/shrink_vmul.ll b/llvm/test/CodeGen/X86/shrink_vmul.ll
index 2610f43..62051d1 100644
--- a/llvm/test/CodeGen/X86/shrink_vmul.ll
+++ b/llvm/test/CodeGen/X86/shrink_vmul.ll
@@ -1983,91 +1983,75 @@ define void @PR34947(ptr %p0, ptr %p1) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: movzwl 16(%eax), %edx
; X86-SSE-NEXT: movl %edx, (%esp) # 4-byte Spill
-; X86-SSE-NEXT: movdqa (%eax), %xmm3
-; X86-SSE-NEXT: movdqa (%ecx), %xmm0
-; X86-SSE-NEXT: movdqa 16(%ecx), %xmm1
-; X86-SSE-NEXT: pxor %xmm5, %xmm5
-; X86-SSE-NEXT: movdqa %xmm3, %xmm2
-; X86-SSE-NEXT: pextrw $7, %xmm3, %eax
-; X86-SSE-NEXT: pextrw $4, %xmm3, %edi
-; X86-SSE-NEXT: pextrw $0, %xmm3, %ebp
-; X86-SSE-NEXT: pextrw $1, %xmm3, %esi
-; X86-SSE-NEXT: pextrw $3, %xmm3, %ebx
-; X86-SSE-NEXT: movdqa %xmm3, %xmm4
-; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[3,3,3,3]
-; X86-SSE-NEXT: movd %xmm3, %ecx
+; X86-SSE-NEXT: movdqa (%eax), %xmm2
+; X86-SSE-NEXT: pxor %xmm1, %xmm1
+; X86-SSE-NEXT: movdqa %xmm2, %xmm0
+; X86-SSE-NEXT: pextrw $7, %xmm2, %eax
+; X86-SSE-NEXT: pextrw $4, %xmm2, %esi
+; X86-SSE-NEXT: pextrw $1, %xmm2, %edi
+; X86-SSE-NEXT: pextrw $0, %xmm2, %ebx
+; X86-SSE-NEXT: pextrw $3, %xmm2, %ebp
+; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X86-SSE-NEXT: xorl %edx, %edx
+; X86-SSE-NEXT: divl 28(%ecx)
+; X86-SSE-NEXT: movd %edx, %xmm1
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
+; X86-SSE-NEXT: movd %xmm3, %eax
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl %ecx
+; X86-SSE-NEXT: divl 24(%ecx)
; X86-SSE-NEXT: movd %edx, %xmm3
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
-; X86-SSE-NEXT: movd %xmm5, %eax
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
-; X86-SSE-NEXT: movd %xmm5, %ecx
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; X86-SSE-NEXT: movl %esi, %eax
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl %ecx
-; X86-SSE-NEXT: movd %edx, %xmm5
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
+; X86-SSE-NEXT: divl 16(%ecx)
+; X86-SSE-NEXT: movd %edx, %xmm1
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-SSE-NEXT: movd %xmm0, %eax
+; X86-SSE-NEXT: xorl %edx, %edx
+; X86-SSE-NEXT: divl 20(%ecx)
+; X86-SSE-NEXT: movd %edx, %xmm0
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
; X86-SSE-NEXT: movl %edi, %eax
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-SSE-NEXT: divl 16(%edi)
+; X86-SSE-NEXT: divl 4(%ecx)
; X86-SSE-NEXT: movd %edx, %xmm3
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
-; X86-SSE-NEXT: movd %xmm2, %eax
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
-; X86-SSE-NEXT: movd %xmm1, %ecx
+; X86-SSE-NEXT: movl %ebx, %eax
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl %ecx
-; X86-SSE-NEXT: movd %edx, %xmm1
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
+; X86-SSE-NEXT: divl (%ecx)
+; X86-SSE-NEXT: movd %edx, %xmm0
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; X86-SSE-NEXT: movl %ebp, %eax
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl (%edi)
-; X86-SSE-NEXT: movd %edx, %xmm1
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; X86-SSE-NEXT: movd %xmm2, %ecx
-; X86-SSE-NEXT: movl %esi, %eax
-; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl %ecx
-; X86-SSE-NEXT: movd %edx, %xmm2
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
-; X86-SSE-NEXT: movd %xmm2, %ecx
-; X86-SSE-NEXT: movl %ebx, %eax
+; X86-SSE-NEXT: divl 12(%ecx)
+; X86-SSE-NEXT: movd %edx, %xmm3
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; X86-SSE-NEXT: movd %xmm2, %eax
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl %ecx
+; X86-SSE-NEXT: divl 8(%ecx)
; X86-SSE-NEXT: movd %edx, %xmm2
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
-; X86-SSE-NEXT: movd %xmm4, %eax
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; X86-SSE-NEXT: movd %xmm0, %ecx
-; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl %ecx
-; X86-SSE-NEXT: movd %edx, %xmm0
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; X86-SSE-NEXT: movl (%esp), %eax # 4-byte Reload
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl 32(%edi)
+; X86-SSE-NEXT: divl 32(%ecx)
; X86-SSE-NEXT: movdqa {{.*#+}} xmm2 = [8199,8199,8199,8199]
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
-; X86-SSE-NEXT: pmuludq %xmm2, %xmm1
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
-; X86-SSE-NEXT: pmuludq %xmm2, %xmm4
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,2,2,3]
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; X86-SSE-NEXT: pmuludq %xmm2, %xmm0
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-SSE-NEXT: pmuludq %xmm2, %xmm3
; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
; X86-SSE-NEXT: pmuludq %xmm2, %xmm1
; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; X86-SSE-NEXT: pmuludq %xmm2, %xmm3
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; X86-SSE-NEXT: imull $8199, %edx, %eax # imm = 0x2007
; X86-SSE-NEXT: movl %eax, (%eax)
-; X86-SSE-NEXT: movdqa %xmm3, (%eax)
+; X86-SSE-NEXT: movdqa %xmm1, (%eax)
; X86-SSE-NEXT: movdqa %xmm0, (%eax)
; X86-SSE-NEXT: addl $4, %esp
; X86-SSE-NEXT: popl %esi
@@ -2204,91 +2188,76 @@ define void @PR34947(ptr %p0, ptr %p1) nounwind {
; X64-SSE-LABEL: PR34947:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movzwl 16(%rdi), %ecx
-; X64-SSE-NEXT: movdqa (%rdi), %xmm3
-; X64-SSE-NEXT: movdqa (%rsi), %xmm0
-; X64-SSE-NEXT: movdqa 16(%rsi), %xmm1
-; X64-SSE-NEXT: pxor %xmm5, %xmm5
-; X64-SSE-NEXT: movdqa %xmm3, %xmm2
-; X64-SSE-NEXT: pextrw $7, %xmm3, %eax
-; X64-SSE-NEXT: pextrw $4, %xmm3, %r8d
-; X64-SSE-NEXT: pextrw $0, %xmm3, %r10d
-; X64-SSE-NEXT: pextrw $1, %xmm3, %edi
-; X64-SSE-NEXT: pextrw $3, %xmm3, %r9d
-; X64-SSE-NEXT: movdqa %xmm3, %xmm4
-; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[3,3,3,3]
-; X64-SSE-NEXT: movd %xmm3, %r11d
-; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl %r11d
-; X64-SSE-NEXT: movd %edx, %xmm3
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
-; X64-SSE-NEXT: movd %xmm5, %eax
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
-; X64-SSE-NEXT: movd %xmm5, %r11d
+; X64-SSE-NEXT: movdqa (%rdi), %xmm2
+; X64-SSE-NEXT: pxor %xmm1, %xmm1
+; X64-SSE-NEXT: movdqa %xmm2, %xmm0
+; X64-SSE-NEXT: pextrw $7, %xmm2, %eax
+; X64-SSE-NEXT: pextrw $4, %xmm2, %edi
+; X64-SSE-NEXT: pextrw $1, %xmm2, %r8d
+; X64-SSE-NEXT: pextrw $0, %xmm2, %r9d
+; X64-SSE-NEXT: pextrw $3, %xmm2, %r10d
+; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl %r11d
-; X64-SSE-NEXT: movd %edx, %xmm5
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
-; X64-SSE-NEXT: movl %r8d, %eax
+; X64-SSE-NEXT: divl 28(%rsi)
+; X64-SSE-NEXT: movd %edx, %xmm1
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
+; X64-SSE-NEXT: movd %xmm3, %eax
; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl 16(%rsi)
+; X64-SSE-NEXT: divl 24(%rsi)
; X64-SSE-NEXT: movd %edx, %xmm3
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
-; X64-SSE-NEXT: movd %xmm2, %eax
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
-; X64-SSE-NEXT: movd %xmm1, %r8d
-; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl %r8d
-; X64-SSE-NEXT: movd %edx, %xmm1
; X64-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
-; X64-SSE-NEXT: movl %r10d, %eax
+; X64-SSE-NEXT: movl %edi, %eax
; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl (%rsi)
+; X64-SSE-NEXT: divl 16(%rsi)
; X64-SSE-NEXT: movd %edx, %xmm1
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; X64-SSE-NEXT: movd %xmm2, %r8d
-; X64-SSE-NEXT: movl %edi, %eax
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X64-SSE-NEXT: movd %xmm0, %eax
; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl %r8d
-; X64-SSE-NEXT: movd %edx, %xmm2
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
-; X64-SSE-NEXT: movd %xmm2, %edi
+; X64-SSE-NEXT: divl 20(%rsi)
+; X64-SSE-NEXT: movd %edx, %xmm0
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; X64-SSE-NEXT: movl %r8d, %eax
+; X64-SSE-NEXT: xorl %edx, %edx
+; X64-SSE-NEXT: divl 4(%rsi)
+; X64-SSE-NEXT: movd %edx, %xmm0
; X64-SSE-NEXT: movl %r9d, %eax
; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl %edi
-; X64-SSE-NEXT: movd %edx, %xmm2
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
-; X64-SSE-NEXT: movd %xmm4, %eax
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; X64-SSE-NEXT: movd %xmm0, %edi
+; X64-SSE-NEXT: divl (%rsi)
+; X64-SSE-NEXT: movd %edx, %xmm3
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; X64-SSE-NEXT: movl %r10d, %eax
; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl %edi
+; X64-SSE-NEXT: divl 12(%rsi)
; X64-SSE-NEXT: movd %edx, %xmm0
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; X64-SSE-NEXT: movd %xmm2, %eax
+; X64-SSE-NEXT: xorl %edx, %edx
+; X64-SSE-NEXT: divl 8(%rsi)
+; X64-SSE-NEXT: movd %edx, %xmm2
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; X64-SSE-NEXT: movl %ecx, %eax
; X64-SSE-NEXT: xorl %edx, %edx
; X64-SSE-NEXT: divl 32(%rsi)
; X64-SSE-NEXT: movdqa {{.*#+}} xmm0 = [8199,8199,8199,8199]
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; X64-SSE-NEXT: pmuludq %xmm0, %xmm1
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X64-SSE-NEXT: pmuludq %xmm0, %xmm2
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
; X64-SSE-NEXT: pmuludq %xmm0, %xmm3
; X64-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
; X64-SSE-NEXT: pmuludq %xmm0, %xmm2
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; X64-SSE-NEXT: pmuludq %xmm0, %xmm1
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X64-SSE-NEXT: pmuludq %xmm0, %xmm2
; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X64-SSE-NEXT: imull $8199, %edx, %eax # imm = 0x2007
; X64-SSE-NEXT: movl %eax, (%rax)
-; X64-SSE-NEXT: movdqa %xmm3, (%rax)
; X64-SSE-NEXT: movdqa %xmm1, (%rax)
+; X64-SSE-NEXT: movdqa %xmm3, (%rax)
; X64-SSE-NEXT: retq
;
; X64-AVX1-LABEL: PR34947:
diff --git a/llvm/test/CodeGen/X86/shuffle-vs-trunc-256.ll b/llvm/test/CodeGen/X86/shuffle-vs-trunc-256.ll
index 2d7e6f6..a80f380 100644
--- a/llvm/test/CodeGen/X86/shuffle-vs-trunc-256.ll
+++ b/llvm/test/CodeGen/X86/shuffle-vs-trunc-256.ll
@@ -1333,8 +1333,10 @@ define <16 x i8> @negative(<32 x i8> %v, <32 x i8> %w) nounwind {
; AVX512VL-LABEL: negative:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm0[2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
-; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; AVX512VL-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0
+; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512VL-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX512VL-NEXT: vpternlogq $206, %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm2[0,3,2,3]
; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/stack-protector.ll b/llvm/test/CodeGen/X86/stack-protector.ll
index a277f9f..f4f3ae4 100644
--- a/llvm/test/CodeGen/X86/stack-protector.ll
+++ b/llvm/test/CodeGen/X86/stack-protector.ll
@@ -1,6 +1,7 @@
; RUN: llc -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck --check-prefix=LINUX-I386 %s
; RUN: llc -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck --check-prefix=LINUX-X64 %s
; RUN: llc -code-model=kernel -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck --check-prefix=LINUX-KERNEL-X64 %s
+; RUN: llc -code-model=kernel -mtriple=x86_64-unknown-freebsd < %s -o - | FileCheck --check-prefix=FREEBSD-KERNEL-X64 %s
; RUN: llc -mtriple=x86_64-apple-darwin < %s -o - | FileCheck --check-prefix=DARWIN-X64 %s
; RUN: llc -mtriple=amd64-pc-openbsd < %s -o - | FileCheck --check-prefix=OPENBSD-AMD64 %s
; RUN: llc -mtriple=i386-pc-windows-msvc < %s -o - | FileCheck -check-prefix=MSVC-I386 %s
@@ -75,6 +76,10 @@ entry:
; LINUX-X64: mov{{l|q}} %fs:
; LINUX-X64: callq __stack_chk_fail
+; FREEBSD-KERNEL-X64-LABEL: test1b:
+; FREEBSD-KERNEL-X64-NOT: mov{{l|q}} __stack_chk_guard@GOTPCREL
+; FREEBSD-KERNEL-X64: callq __stack_chk_fail
+
; LINUX-KERNEL-X64-LABEL: test1b:
; LINUX-KERNEL-X64: mov{{l|q}} %gs:
; LINUX-KERNEL-X64: callq __stack_chk_fail
@@ -118,6 +123,10 @@ entry:
; LINUX-X64: mov{{l|q}} %fs:
; LINUX-X64: callq __stack_chk_fail
+; FREEBSD-KERNEL-X64-LABEL: test1c:
+; FREEBSD-KERNEL-X64: mov{{l|q}} __stack_chk_guard(%rip)
+; FREEBSD-KERNEL-X64: callq __stack_chk_fail
+
; LINUX-KERNEL-X64-LABEL: test1c:
; LINUX-KERNEL-X64: mov{{l|q}} %gs:
; LINUX-KERNEL-X64: callq __stack_chk_fail
diff --git a/llvm/test/CodeGen/X86/statepoint-fastregalloc.mir b/llvm/test/CodeGen/X86/statepoint-fastregalloc.mir
index 02c9310..8bac140 100644
--- a/llvm/test/CodeGen/X86/statepoint-fastregalloc.mir
+++ b/llvm/test/CodeGen/X86/statepoint-fastregalloc.mir
@@ -6,6 +6,8 @@
---
name: test_relocate
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
body: |
bb.0.entry:
liveins: $rdi
@@ -25,6 +27,8 @@ body: |
---
name: test_relocate_multi_regmasks
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
body: |
bb.0.entry:
liveins: $rdi
diff --git a/llvm/test/CodeGen/X86/statepoint-fixup-undef.mir b/llvm/test/CodeGen/X86/statepoint-fixup-undef.mir
index 30a68e6..4a18351 100644
--- a/llvm/test/CodeGen/X86/statepoint-fixup-undef.mir
+++ b/llvm/test/CodeGen/X86/statepoint-fixup-undef.mir
@@ -61,7 +61,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 8
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir b/llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir
index 11968f1..5f05270 100644
--- a/llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir
+++ b/llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir
@@ -231,7 +231,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 1
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/statepoint-invoke-ra-hoist-copies.mir b/llvm/test/CodeGen/X86/statepoint-invoke-ra-hoist-copies.mir
index aae2f38..cf91282 100644
--- a/llvm/test/CodeGen/X86/statepoint-invoke-ra-hoist-copies.mir
+++ b/llvm/test/CodeGen/X86/statepoint-invoke-ra-hoist-copies.mir
@@ -398,7 +398,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 1
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/statepoint-invoke-ra-inline-spiller.mir b/llvm/test/CodeGen/X86/statepoint-invoke-ra-inline-spiller.mir
index 87f5f0f..fcebc69 100644
--- a/llvm/test/CodeGen/X86/statepoint-invoke-ra-inline-spiller.mir
+++ b/llvm/test/CodeGen/X86/statepoint-invoke-ra-inline-spiller.mir
@@ -175,7 +175,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 4
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir b/llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir
index 4925396..8bb39a0 100644
--- a/llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir
+++ b/llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir
@@ -226,7 +226,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 4
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/statepoint-invoke-ra.mir b/llvm/test/CodeGen/X86/statepoint-invoke-ra.mir
index 858ff3f..da651039 100644
--- a/llvm/test/CodeGen/X86/statepoint-invoke-ra.mir
+++ b/llvm/test/CodeGen/X86/statepoint-invoke-ra.mir
@@ -172,7 +172,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 4
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/statepoint-vreg-folding.mir b/llvm/test/CodeGen/X86/statepoint-vreg-folding.mir
index e24d5e8..d40a9a0 100644
--- a/llvm/test/CodeGen/X86/statepoint-vreg-folding.mir
+++ b/llvm/test/CodeGen/X86/statepoint-vreg-folding.mir
@@ -114,7 +114,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 8
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/statepoint-vreg.mir b/llvm/test/CodeGen/X86/statepoint-vreg.mir
index bfeadfc..a0c596f 100644
--- a/llvm/test/CodeGen/X86/statepoint-vreg.mir
+++ b/llvm/test/CodeGen/X86/statepoint-vreg.mir
@@ -134,6 +134,8 @@ registers:
liveins:
- { reg: '$rdi', virtual-reg: '%0' }
- { reg: '$rsi', virtual-reg: '%1' }
+frameInfo:
+ adjustsStack: true
fixedStack: []
stack: []
callSites: []
diff --git a/llvm/test/CodeGen/X86/tls-align.ll b/llvm/test/CodeGen/X86/tls-align.ll
index 3c8ee6b..e996c00 100644
--- a/llvm/test/CodeGen/X86/tls-align.ll
+++ b/llvm/test/CodeGen/X86/tls-align.ll
@@ -12,7 +12,7 @@
define internal fastcc void @foo() unnamed_addr {
entry:
- store <8 x ptr> <ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV7Derived, i64 0, inrange i32 0, i64 2), ptr null, ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV7Derived, i64 0, inrange i32 0, i64 2), ptr null, ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV7Derived, i64 0, inrange i32 0, i64 2), ptr null, ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV7Derived, i64 0, inrange i32 0, i64 2), ptr null>, ptr @array, align 32
+ store <8 x ptr> <ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV7Derived, i64 0, i32 0, i64 2), ptr null, ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV7Derived, i64 0, i32 0, i64 2), ptr null, ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV7Derived, i64 0, i32 0, i64 2), ptr null, ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV7Derived, i64 0, i32 0, i64 2), ptr null>, ptr @array, align 32
ret void
}
diff --git a/llvm/test/CodeGen/X86/tls-desc.ll b/llvm/test/CodeGen/X86/tls-desc.ll
new file mode 100644
index 0000000..c73986e6
--- /dev/null
+++ b/llvm/test/CodeGen/X86/tls-desc.ll
@@ -0,0 +1,199 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=i686 --relocation-model=pic -enable-tlsdesc | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-pc-linux-gnux32 --relocation-model=pic -enable-tlsdesc | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64 --relocation-model=pic -enable-tlsdesc | FileCheck %s --check-prefix=X64
+
+@x = thread_local global i32 0, align 4
+@y = internal thread_local global i32 1, align 4
+@z = external hidden thread_local global i32, align 4
+
+define ptr @f1() nounwind {
+; X86-LABEL: f1:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll .L0$pb
+; X86-NEXT: .L0$pb:
+; X86-NEXT: popl %ebx
+; X86-NEXT: .Ltmp0:
+; X86-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L0$pb), %ebx
+; X86-NEXT: #APP
+; X86-NEXT: #NO_APP
+; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X86-NEXT: leal x@tlsdesc(%ebx), %eax
+; X86-NEXT: calll *x@tlscall(%eax)
+; X86-NEXT: addl %gs:0, %eax
+; X86-NEXT: movl (%esp), %ebx # 4-byte Reload
+; X86-NEXT: #APP
+; X86-NEXT: #NO_APP
+; X86-NEXT: addl $4, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X32-LABEL: f1:
+; X32: # %bb.0:
+; X32-NEXT: pushq %rax
+; X32-NEXT: #APP
+; X32-NEXT: #NO_APP
+; X32-NEXT: leal x@tlsdesc(%rip), %eax
+; X32-NEXT: callq *x@tlscall(%eax)
+; X32-NEXT: # kill: def $eax killed $eax def $rax
+; X32-NEXT: addl %fs:0, %eax
+; X32-NEXT: #APP
+; X32-NEXT: #NO_APP
+; X32-NEXT: popq %rcx
+; X32-NEXT: retq
+;
+; X64-LABEL: f1:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: #APP
+; X64-NEXT: #NO_APP
+; X64-NEXT: leaq x@tlsdesc(%rip), %rax
+; X64-NEXT: callq *x@tlscall(%rax)
+; X64-NEXT: addq %fs:0, %rax
+; X64-NEXT: #APP
+; X64-NEXT: #NO_APP
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %a = call { i32, i32, i32, i32, i32, i32 } asm sideeffect "", "=r,=r,=r,=r,=r,=r,~{dirflag},~{fpsr},~{flags}"()
+ %b = call ptr @llvm.threadlocal.address.p0(ptr @x)
+ %a.0 = extractvalue { i32, i32, i32, i32, i32, i32 } %a, 0
+ %a.1 = extractvalue { i32, i32, i32, i32, i32, i32 } %a, 1
+ %a.2 = extractvalue { i32, i32, i32, i32, i32, i32 } %a, 2
+ %a.3 = extractvalue { i32, i32, i32, i32, i32, i32 } %a, 3
+ %a.4 = extractvalue { i32, i32, i32, i32, i32, i32 } %a, 4
+ %a.5 = extractvalue { i32, i32, i32, i32, i32, i32 } %a, 5
+ call void asm sideeffect "", "r,r,r,r,r,r,~{dirflag},~{fpsr},~{flags}"(i32 %a.0, i32 %a.1, i32 %a.2, i32 %a.3, i32 %a.4, i32 %a.5)
+ ret ptr %b
+}
+
+define i32 @f2() nounwind {
+; X86-LABEL: f2:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebx
+; X86-NEXT: calll .L1$pb
+; X86-NEXT: .L1$pb:
+; X86-NEXT: popl %ebx
+; X86-NEXT: .Ltmp1:
+; X86-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.L1$pb), %ebx
+; X86-NEXT: movl %gs:0, %ecx
+; X86-NEXT: leal x@tlsdesc(%ebx), %eax
+; X86-NEXT: calll *x@tlscall(%eax)
+; X86-NEXT: movl (%eax,%ecx), %eax
+; X86-NEXT: popl %ebx
+; X86-NEXT: retl
+;
+; X32-LABEL: f2:
+; X32: # %bb.0:
+; X32-NEXT: pushq %rax
+; X32-NEXT: movl %fs:0, %ecx
+; X32-NEXT: leal x@tlsdesc(%rip), %eax
+; X32-NEXT: callq *x@tlscall(%eax)
+; X32-NEXT: movl (%eax,%ecx), %eax
+; X32-NEXT: popq %rcx
+; X32-NEXT: retq
+;
+; X64-LABEL: f2:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movq %fs:0, %rcx
+; X64-NEXT: leaq x@tlsdesc(%rip), %rax
+; X64-NEXT: callq *x@tlscall(%rax)
+; X64-NEXT: movl (%rax,%rcx), %eax
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %1 = tail call ptr @llvm.threadlocal.address.p0(ptr @x)
+ %2 = load i32, ptr %1
+ ret i32 %2
+}
+
+define ptr @f3() nounwind {
+; X86-LABEL: f3:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebx
+; X86-NEXT: calll .L2$pb
+; X86-NEXT: .L2$pb:
+; X86-NEXT: popl %ebx
+; X86-NEXT: .Ltmp2:
+; X86-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp2-.L2$pb), %ebx
+; X86-NEXT: leal x@tlsdesc(%ebx), %eax
+; X86-NEXT: calll *x@tlscall(%eax)
+; X86-NEXT: addl %gs:0, %eax
+; X86-NEXT: popl %ebx
+; X86-NEXT: retl
+;
+; X32-LABEL: f3:
+; X32: # %bb.0:
+; X32-NEXT: pushq %rax
+; X32-NEXT: leal x@tlsdesc(%rip), %eax
+; X32-NEXT: callq *x@tlscall(%eax)
+; X32-NEXT: # kill: def $eax killed $eax def $rax
+; X32-NEXT: addl %fs:0, %eax
+; X32-NEXT: popq %rcx
+; X32-NEXT: retq
+;
+; X64-LABEL: f3:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: leaq x@tlsdesc(%rip), %rax
+; X64-NEXT: callq *x@tlscall(%rax)
+; X64-NEXT: addq %fs:0, %rax
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %1 = tail call ptr @llvm.threadlocal.address.p0(ptr @x)
+ ret ptr %1
+}
+
+define i32 @f4() nounwind {
+; X86-LABEL: f4:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebx
+; X86-NEXT: calll .L3$pb
+; X86-NEXT: .L3$pb:
+; X86-NEXT: popl %ebx
+; X86-NEXT: .Ltmp3:
+; X86-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp3-.L3$pb), %ebx
+; X86-NEXT: movl %gs:0, %edx
+; X86-NEXT: leal _TLS_MODULE_BASE_@tlsdesc(%ebx), %eax
+; X86-NEXT: calll *_TLS_MODULE_BASE_@tlscall(%eax)
+; X86-NEXT: movl y@DTPOFF(%eax,%edx), %ecx
+; X86-NEXT: addl z@DTPOFF(%eax,%edx), %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: popl %ebx
+; X86-NEXT: retl
+;
+; X32-LABEL: f4:
+; X32: # %bb.0:
+; X32-NEXT: pushq %rax
+; X32-NEXT: movl %fs:0, %edx
+; X32-NEXT: leal _TLS_MODULE_BASE_@tlsdesc(%rip), %eax
+; X32-NEXT: callq *_TLS_MODULE_BASE_@tlscall(%eax)
+; X32-NEXT: movl y@DTPOFF(%eax,%edx), %ecx
+; X32-NEXT: addl z@DTPOFF(%eax,%edx), %ecx
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: popq %rcx
+; X32-NEXT: retq
+;
+; X64-LABEL: f4:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movq %fs:0, %rdx
+; X64-NEXT: leaq _TLS_MODULE_BASE_@tlsdesc(%rip), %rax
+; X64-NEXT: callq *_TLS_MODULE_BASE_@tlscall(%rax)
+; X64-NEXT: movl y@DTPOFF(%rax,%rdx), %ecx
+; X64-NEXT: addl z@DTPOFF(%rax,%rdx), %ecx
+; X64-NEXT: movl %ecx, %eax
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %1 = load i32, ptr @y, align 4
+ %2 = load i32, ptr @z, align 4
+ %3 = add nsw i32 %1, %2
+ ret i32 %3
+}
diff --git a/llvm/test/CodeGen/X86/tls-loads-control3.ll b/llvm/test/CodeGen/X86/tls-loads-control3.ll
index 82daac5..4e521b1 100644
--- a/llvm/test/CodeGen/X86/tls-loads-control3.ll
+++ b/llvm/test/CodeGen/X86/tls-loads-control3.ll
@@ -183,7 +183,6 @@ define i32 @_Z2f2i(i32 %c) local_unnamed_addr #0 {
; HOIST0-NEXT: # %bb.1: # %while.body.preheader
; HOIST0-NEXT: leaq _ZZ2f2iE2st.0@TLSLD(%rip), %rdi
; HOIST0-NEXT: callq __tls_get_addr@PLT
-; HOIST0-NEXT: movq %rax, %rcx
; HOIST0-NEXT: leaq _ZZ2f2iE2st.0@DTPOFF(%rax), %r15
; HOIST0-NEXT: leaq _ZZ2f2iE2st.1@DTPOFF(%rax), %r12
; HOIST0-NEXT: .p2align 4, 0x90
@@ -245,9 +244,7 @@ define i32 @_Z2f2i(i32 %c) local_unnamed_addr #0 {
; HOIST2-NEXT: movq %rax, %r14
; HOIST2-NEXT: addb %bpl, _ZZ2f2iE2st.0@DTPOFF(%rax)
; HOIST2-NEXT: callq _Z5gfuncv@PLT
-; HOIST2-NEXT: movl %eax, %ecx
-; HOIST2-NEXT: movq %r14, %rax
-; HOIST2-NEXT: addl %ecx, _ZZ2f2iE2st.1@DTPOFF(%r14)
+; HOIST2-NEXT: addl %eax, _ZZ2f2iE2st.1@DTPOFF(%r14)
; HOIST2-NEXT: decl %ebx
; HOIST2-NEXT: jne .LBB1_2
; HOIST2-NEXT: .LBB1_3: # %while.end
diff --git a/llvm/test/CodeGen/X86/var-permute-128.ll b/llvm/test/CodeGen/X86/var-permute-128.ll
index 99a3821..f2240a9 100644
--- a/llvm/test/CodeGen/X86/var-permute-128.ll
+++ b/llvm/test/CodeGen/X86/var-permute-128.ll
@@ -1101,17 +1101,13 @@ define <16 x i8> @var_shuffle_v16i8_from_v32i8_v16i8(<32 x i8> %v, <16 x i8> %in
define void @indices_convert() {
; SSE3-LABEL: indices_convert:
; SSE3: # %bb.0: # %bb
-; SSE3-NEXT: movdqa (%rax), %xmm0
-; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE3-NEXT: movd %xmm1, %eax
-; SSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSE3-NEXT: movaps (%rax), %xmm0
+; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE3-NEXT: movl (%rax), %eax
+; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE3-NEXT: andl $3, %eax
-; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3]
-; SSE3-NEXT: movd %xmm1, %ecx
-; SSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSE3-NEXT: andl $3, %ecx
; SSE3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE3-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE3-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
@@ -1120,17 +1116,13 @@ define void @indices_convert() {
;
; SSSE3-LABEL: indices_convert:
; SSSE3: # %bb.0: # %bb
-; SSSE3-NEXT: movdqa (%rax), %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSSE3-NEXT: movd %xmm1, %eax
-; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movaps (%rax), %xmm0
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movl (%rax), %eax
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: andl $3, %eax
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3]
-; SSSE3-NEXT: movd %xmm1, %ecx
-; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSSE3-NEXT: andl $3, %ecx
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSSE3-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSSE3-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
diff --git a/llvm/test/CodeGen/X86/vec_int_to_fp.ll b/llvm/test/CodeGen/X86/vec_int_to_fp.ll
index 7bbcdee..e26de4b 100644
--- a/llvm/test/CodeGen/X86/vec_int_to_fp.ll
+++ b/llvm/test/CodeGen/X86/vec_int_to_fp.ll
@@ -2911,23 +2911,12 @@ define <8 x float> @uitofp_16i8_to_8f32(<16 x i8> %a) {
;
define <2 x double> @sitofp_load_2i64_to_2f64(ptr%a) {
-; SSE2-LABEL: sitofp_load_2i64_to_2f64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: cvtsi2sdq (%rdi), %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %rax
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2sd %rax, %xmm1
-; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: sitofp_load_2i64_to_2f64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: cvtsi2sdq 8(%rdi), %xmm1
-; SSE41-NEXT: cvtsi2sdq (%rdi), %xmm0
-; SSE41-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE41-NEXT: retq
+; SSE-LABEL: sitofp_load_2i64_to_2f64:
+; SSE: # %bb.0:
+; SSE-NEXT: cvtsi2sdq 8(%rdi), %xmm1
+; SSE-NEXT: cvtsi2sdq (%rdi), %xmm0
+; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: retq
;
; VEX-LABEL: sitofp_load_2i64_to_2f64:
; VEX: # %bb.0:
@@ -3093,35 +3082,16 @@ define <2 x double> @sitofp_load_2i8_to_2f64(ptr%a) {
}
define <4 x double> @sitofp_load_4i64_to_4f64(ptr%a) {
-; SSE2-LABEL: sitofp_load_4i64_to_4f64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: movdqa 16(%rdi), %xmm2
-; SSE2-NEXT: cvtsi2sdq (%rdi), %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %rax
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2sd %rax, %xmm1
-; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2sdq 16(%rdi), %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: cvtsi2sd %rax, %xmm2
-; SSE2-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: sitofp_load_4i64_to_4f64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: cvtsi2sdq 8(%rdi), %xmm1
-; SSE41-NEXT: cvtsi2sdq (%rdi), %xmm0
-; SSE41-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE41-NEXT: cvtsi2sdq 24(%rdi), %xmm2
-; SSE41-NEXT: xorps %xmm1, %xmm1
-; SSE41-NEXT: cvtsi2sdq 16(%rdi), %xmm1
-; SSE41-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; SSE41-NEXT: retq
+; SSE-LABEL: sitofp_load_4i64_to_4f64:
+; SSE: # %bb.0:
+; SSE-NEXT: cvtsi2sdq 8(%rdi), %xmm1
+; SSE-NEXT: cvtsi2sdq (%rdi), %xmm0
+; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: cvtsi2sdq 24(%rdi), %xmm2
+; SSE-NEXT: xorps %xmm1, %xmm1
+; SSE-NEXT: cvtsi2sdq 16(%rdi), %xmm1
+; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE-NEXT: retq
;
; VEX-LABEL: sitofp_load_4i64_to_4f64:
; VEX: # %bb.0:
@@ -3865,22 +3835,14 @@ define <4 x double> @uitofp_load_4i8_to_4f64(ptr%a) {
define <4 x float> @sitofp_load_4i64_to_4f32(ptr%a) {
; SSE2-LABEL: sitofp_load_4i64_to_4f32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: movdqa 16(%rdi), %xmm0
-; SSE2-NEXT: cvtsi2ssq 16(%rdi), %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: xorps %xmm0, %xmm0
-; SSE2-NEXT: cvtsi2ss %rax, %xmm0
-; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE2-NEXT: cvtsi2ssq 24(%rdi), %xmm0
+; SSE2-NEXT: cvtsi2ssq 16(%rdi), %xmm1
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: cvtsi2ssq 8(%rdi), %xmm2
; SSE2-NEXT: xorps %xmm0, %xmm0
; SSE2-NEXT: cvtsi2ssq (%rdi), %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %rax
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
-; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: sitofp_load_4i64_to_4f32:
@@ -4015,39 +3977,24 @@ define <4 x float> @sitofp_load_4i8_to_4f32(ptr%a) {
define <8 x float> @sitofp_load_8i64_to_8f32(ptr%a) {
; SSE2-LABEL: sitofp_load_8i64_to_8f32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: movdqa 16(%rdi), %xmm0
-; SSE2-NEXT: movdqa 32(%rdi), %xmm2
-; SSE2-NEXT: movdqa 48(%rdi), %xmm3
-; SSE2-NEXT: cvtsi2ssq 16(%rdi), %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: xorps %xmm0, %xmm0
-; SSE2-NEXT: cvtsi2ss %rax, %xmm0
-; SSE2-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
+; SSE2-NEXT: cvtsi2ssq 24(%rdi), %xmm0
+; SSE2-NEXT: cvtsi2ssq 16(%rdi), %xmm1
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: cvtsi2ssq 8(%rdi), %xmm2
; SSE2-NEXT: xorps %xmm0, %xmm0
; SSE2-NEXT: cvtsi2ssq (%rdi), %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %rax
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
-; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; SSE2-NEXT: xorps %xmm4, %xmm4
-; SSE2-NEXT: cvtsi2ssq 48(%rdi), %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %rax
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
-; SSE2-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSE2-NEXT: cvtsi2ssq 56(%rdi), %xmm1
+; SSE2-NEXT: xorps %xmm2, %xmm2
+; SSE2-NEXT: cvtsi2ssq 48(%rdi), %xmm2
+; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT: cvtsi2ssq 40(%rdi), %xmm3
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: cvtsi2ssq 32(%rdi), %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: cvtsi2ss %rax, %xmm2
-; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm4[0]
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: sitofp_load_8i64_to_8f32:
@@ -4256,70 +4203,64 @@ define <8 x float> @sitofp_load_8i8_to_8f32(ptr%a) {
define <4 x float> @uitofp_load_4i64_to_4f32(ptr%a) {
; SSE2-LABEL: uitofp_load_4i64_to_4f32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa 16(%rdi), %xmm0
-; SSE2-NEXT: movq 16(%rdi), %rax
+; SSE2-NEXT: movq 24(%rdi), %rax
; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB83_1
; SSE2-NEXT: # %bb.2:
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
+; SSE2-NEXT: cvtsi2ss %rax, %xmm0
; SSE2-NEXT: jmp .LBB83_3
; SSE2-NEXT: .LBB83_1:
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq %rcx
; SSE2-NEXT: andl $1, %eax
; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
-; SSE2-NEXT: addss %xmm1, %xmm1
+; SSE2-NEXT: cvtsi2ss %rax, %xmm0
+; SSE2-NEXT: addss %xmm0, %xmm0
; SSE2-NEXT: .LBB83_3:
-; SSE2-NEXT: movq (%rdi), %rax
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rcx
-; SSE2-NEXT: testq %rcx, %rcx
+; SSE2-NEXT: movq 16(%rdi), %rax
+; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB83_4
; SSE2-NEXT: # %bb.5:
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm2
+; SSE2-NEXT: cvtsi2ss %rax, %xmm1
; SSE2-NEXT: jmp .LBB83_6
; SSE2-NEXT: .LBB83_4:
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: shrq %rcx
+; SSE2-NEXT: andl $1, %eax
+; SSE2-NEXT: orq %rcx, %rax
+; SSE2-NEXT: cvtsi2ss %rax, %xmm1
+; SSE2-NEXT: addss %xmm1, %xmm1
+; SSE2-NEXT: .LBB83_6:
+; SSE2-NEXT: movq (%rdi), %rax
+; SSE2-NEXT: movq 8(%rdi), %rcx
+; SSE2-NEXT: testq %rcx, %rcx
+; SSE2-NEXT: js .LBB83_7
+; SSE2-NEXT: # %bb.8:
+; SSE2-NEXT: cvtsi2ss %rcx, %xmm2
+; SSE2-NEXT: jmp .LBB83_9
+; SSE2-NEXT: .LBB83_7:
; SSE2-NEXT: movq %rcx, %rdx
; SSE2-NEXT: shrq %rdx
; SSE2-NEXT: andl $1, %ecx
; SSE2-NEXT: orq %rdx, %rcx
; SSE2-NEXT: cvtsi2ss %rcx, %xmm2
; SSE2-NEXT: addss %xmm2, %xmm2
-; SSE2-NEXT: .LBB83_6:
-; SSE2-NEXT: movdqa (%rdi), %xmm3
-; SSE2-NEXT: testq %rax, %rax
-; SSE2-NEXT: js .LBB83_7
-; SSE2-NEXT: # %bb.8:
-; SSE2-NEXT: xorps %xmm0, %xmm0
-; SSE2-NEXT: cvtsi2ss %rax, %xmm0
-; SSE2-NEXT: jmp .LBB83_9
-; SSE2-NEXT: .LBB83_7:
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: shrq %rcx
-; SSE2-NEXT: andl $1, %eax
-; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: xorps %xmm0, %xmm0
-; SSE2-NEXT: cvtsi2ss %rax, %xmm0
-; SSE2-NEXT: addss %xmm0, %xmm0
; SSE2-NEXT: .LBB83_9:
-; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB83_10
; SSE2-NEXT: # %bb.11:
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: cvtsi2ss %rax, %xmm2
+; SSE2-NEXT: xorps %xmm0, %xmm0
+; SSE2-NEXT: cvtsi2ss %rax, %xmm0
; SSE2-NEXT: jmp .LBB83_12
; SSE2-NEXT: .LBB83_10:
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq %rcx
; SSE2-NEXT: andl $1, %eax
; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: cvtsi2ss %rax, %xmm2
-; SSE2-NEXT: addss %xmm2, %xmm2
+; SSE2-NEXT: xorps %xmm0, %xmm0
+; SSE2-NEXT: cvtsi2ss %rax, %xmm0
+; SSE2-NEXT: addss %xmm0, %xmm0
; SSE2-NEXT: .LBB83_12:
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -4591,8 +4532,7 @@ define <4 x float> @uitofp_load_4i8_to_4f32(ptr%a) {
define <8 x float> @uitofp_load_8i64_to_8f32(ptr%a) {
; SSE2-LABEL: uitofp_load_8i64_to_8f32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa 16(%rdi), %xmm0
-; SSE2-NEXT: movq 16(%rdi), %rax
+; SSE2-NEXT: movq 24(%rdi), %rax
; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB87_1
; SSE2-NEXT: # %bb.2:
@@ -4606,127 +4546,114 @@ define <8 x float> @uitofp_load_8i64_to_8f32(ptr%a) {
; SSE2-NEXT: cvtsi2ss %rax, %xmm2
; SSE2-NEXT: addss %xmm2, %xmm2
; SSE2-NEXT: .LBB87_3:
-; SSE2-NEXT: movq (%rdi), %rax
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rcx
-; SSE2-NEXT: testq %rcx, %rcx
+; SSE2-NEXT: movq 16(%rdi), %rax
+; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB87_4
; SSE2-NEXT: # %bb.5:
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm1
+; SSE2-NEXT: cvtsi2ss %rax, %xmm1
; SSE2-NEXT: jmp .LBB87_6
; SSE2-NEXT: .LBB87_4:
-; SSE2-NEXT: movq %rcx, %rdx
-; SSE2-NEXT: shrq %rdx
-; SSE2-NEXT: andl $1, %ecx
-; SSE2-NEXT: orq %rdx, %rcx
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm1
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: shrq %rcx
+; SSE2-NEXT: andl $1, %eax
+; SSE2-NEXT: orq %rcx, %rax
+; SSE2-NEXT: cvtsi2ss %rax, %xmm1
; SSE2-NEXT: addss %xmm1, %xmm1
; SSE2-NEXT: .LBB87_6:
-; SSE2-NEXT: movdqa (%rdi), %xmm3
-; SSE2-NEXT: testq %rax, %rax
+; SSE2-NEXT: movq (%rdi), %rax
+; SSE2-NEXT: movq 8(%rdi), %rcx
+; SSE2-NEXT: testq %rcx, %rcx
; SSE2-NEXT: js .LBB87_7
; SSE2-NEXT: # %bb.8:
-; SSE2-NEXT: xorps %xmm0, %xmm0
-; SSE2-NEXT: cvtsi2ss %rax, %xmm0
-; SSE2-NEXT: jmp .LBB87_9
-; SSE2-NEXT: .LBB87_7:
+; SSE2-NEXT: cvtsi2ss %rcx, %xmm3
+; SSE2-NEXT: testq %rax, %rax
+; SSE2-NEXT: jns .LBB87_11
+; SSE2-NEXT: .LBB87_10:
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq %rcx
; SSE2-NEXT: andl $1, %eax
; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: xorps %xmm0, %xmm0
; SSE2-NEXT: cvtsi2ss %rax, %xmm0
; SSE2-NEXT: addss %xmm0, %xmm0
-; SSE2-NEXT: .LBB87_9:
-; SSE2-NEXT: movq 48(%rdi), %rax
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; SSE2-NEXT: movq %xmm3, %rcx
-; SSE2-NEXT: testq %rcx, %rcx
-; SSE2-NEXT: js .LBB87_10
-; SSE2-NEXT: # %bb.11:
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm4
; SSE2-NEXT: jmp .LBB87_12
-; SSE2-NEXT: .LBB87_10:
+; SSE2-NEXT: .LBB87_7:
; SSE2-NEXT: movq %rcx, %rdx
; SSE2-NEXT: shrq %rdx
; SSE2-NEXT: andl $1, %ecx
; SSE2-NEXT: orq %rdx, %rcx
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm4
-; SSE2-NEXT: addss %xmm4, %xmm4
+; SSE2-NEXT: cvtsi2ss %rcx, %xmm3
+; SSE2-NEXT: addss %xmm3, %xmm3
+; SSE2-NEXT: testq %rax, %rax
+; SSE2-NEXT: js .LBB87_10
+; SSE2-NEXT: .LBB87_11:
+; SSE2-NEXT: cvtsi2ss %rax, %xmm0
; SSE2-NEXT: .LBB87_12:
-; SSE2-NEXT: movdqa 48(%rdi), %xmm5
+; SSE2-NEXT: movq 56(%rdi), %rax
; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB87_13
; SSE2-NEXT: # %bb.14:
-; SSE2-NEXT: xorps %xmm3, %xmm3
-; SSE2-NEXT: cvtsi2ss %rax, %xmm3
+; SSE2-NEXT: cvtsi2ss %rax, %xmm5
; SSE2-NEXT: jmp .LBB87_15
; SSE2-NEXT: .LBB87_13:
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq %rcx
; SSE2-NEXT: andl $1, %eax
; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: xorps %xmm3, %xmm3
-; SSE2-NEXT: cvtsi2ss %rax, %xmm3
-; SSE2-NEXT: addss %xmm3, %xmm3
+; SSE2-NEXT: cvtsi2ss %rax, %xmm5
+; SSE2-NEXT: addss %xmm5, %xmm5
; SSE2-NEXT: .LBB87_15:
-; SSE2-NEXT: movq 32(%rdi), %rax
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
-; SSE2-NEXT: movq %xmm5, %rcx
-; SSE2-NEXT: testq %rcx, %rcx
+; SSE2-NEXT: movq 48(%rdi), %rax
+; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB87_16
; SSE2-NEXT: # %bb.17:
-; SSE2-NEXT: xorps %xmm5, %xmm5
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm5
+; SSE2-NEXT: cvtsi2ss %rax, %xmm4
; SSE2-NEXT: jmp .LBB87_18
; SSE2-NEXT: .LBB87_16:
-; SSE2-NEXT: movq %rcx, %rdx
-; SSE2-NEXT: shrq %rdx
-; SSE2-NEXT: andl $1, %ecx
-; SSE2-NEXT: orq %rdx, %rcx
-; SSE2-NEXT: xorps %xmm5, %xmm5
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm5
-; SSE2-NEXT: addss %xmm5, %xmm5
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: shrq %rcx
+; SSE2-NEXT: andl $1, %eax
+; SSE2-NEXT: orq %rcx, %rax
+; SSE2-NEXT: cvtsi2ss %rax, %xmm4
+; SSE2-NEXT: addss %xmm4, %xmm4
; SSE2-NEXT: .LBB87_18:
-; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
-; SSE2-NEXT: movdqa 32(%rdi), %xmm4
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-NEXT: movq 40(%rdi), %rax
; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB87_19
; SSE2-NEXT: # %bb.20:
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
+; SSE2-NEXT: xorps %xmm2, %xmm2
+; SSE2-NEXT: cvtsi2ss %rax, %xmm2
; SSE2-NEXT: jmp .LBB87_21
; SSE2-NEXT: .LBB87_19:
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq %rcx
; SSE2-NEXT: andl $1, %eax
; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
-; SSE2-NEXT: addss %xmm1, %xmm1
+; SSE2-NEXT: xorps %xmm2, %xmm2
+; SSE2-NEXT: cvtsi2ss %rax, %xmm2
+; SSE2-NEXT: addss %xmm2, %xmm2
; SSE2-NEXT: .LBB87_21:
-; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSE2-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: movq 32(%rdi), %rax
; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB87_22
; SSE2-NEXT: # %bb.23:
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: cvtsi2ss %rax, %xmm2
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: cvtsi2ss %rax, %xmm1
; SSE2-NEXT: jmp .LBB87_24
; SSE2-NEXT: .LBB87_22:
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq %rcx
; SSE2-NEXT: andl $1, %eax
; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: cvtsi2ss %rax, %xmm2
-; SSE2-NEXT: addss %xmm2, %xmm2
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: cvtsi2ss %rax, %xmm1
+; SSE2-NEXT: addss %xmm1, %xmm1
; SSE2-NEXT: .LBB87_24:
; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm4[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: uitofp_load_8i64_to_8f32:
diff --git a/llvm/test/CodeGen/X86/vector-half-conversions.ll b/llvm/test/CodeGen/X86/vector-half-conversions.ll
index ba21af2..563cf01 100644
--- a/llvm/test/CodeGen/X86/vector-half-conversions.ll
+++ b/llvm/test/CodeGen/X86/vector-half-conversions.ll
@@ -4989,3 +4989,257 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2 x half> %a) nounwind {
%ext = shufflevector <2 x i32> %cvt, <2 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
ret <4 x i32> %ext
}
+
+define <4 x i32> @fptosi_4f16_to_4i32(<4 x half> %a) nounwind {
+; AVX-LABEL: fptosi_4f16_to_4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: subq $72, %rsp
+; AVX-NEXT: vmovdqa %xmm0, %xmm1
+; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vpsrlq $48, %xmm1, %xmm0
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vinsertps $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: addq $72, %rsp
+; AVX-NEXT: retq
+;
+; F16C-LABEL: fptosi_4f16_to_4i32:
+; F16C: # %bb.0:
+; F16C-NEXT: vcvtph2ps %xmm0, %ymm0
+; F16C-NEXT: vcvttps2dq %ymm0, %ymm0
+; F16C-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; F16C-NEXT: vzeroupper
+; F16C-NEXT: retq
+;
+; AVX512-LABEL: fptosi_4f16_to_4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
+; AVX512-NEXT: vcvttps2dq %ymm0, %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %cvt = fptosi <4 x half> %a to <4 x i32>
+ ret <4 x i32> %cvt
+}
+
+define <4 x i32> @fptoui_2f16_to_4i32(<2 x half> %a) nounwind {
+; AVX1-LABEL: fptoui_2f16_to_4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: subq $40, %rsp
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: callq __extendhfsf2@PLT
+; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT: callq __extendhfsf2@PLT
+; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX1-NEXT: vcvttps2dq %xmm0, %xmm1
+; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX1-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX1-NEXT: addq $40, %rsp
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: fptoui_2f16_to_4i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: subq $40, %rsp
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: callq __extendhfsf2@PLT
+; AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-NEXT: callq __extendhfsf2@PLT
+; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX2-NEXT: vcvttps2dq %xmm0, %xmm1
+; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX2-NEXT: vbroadcastss {{.*#+}} xmm3 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
+; AVX2-NEXT: vsubps %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX2-NEXT: addq $40, %rsp
+; AVX2-NEXT: retq
+;
+; F16C-LABEL: fptoui_2f16_to_4i32:
+; F16C: # %bb.0:
+; F16C-NEXT: vpsrld $16, %xmm0, %xmm1
+; F16C-NEXT: vcvtph2ps %xmm1, %xmm1
+; F16C-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; F16C-NEXT: vcvttps2dq %xmm0, %xmm1
+; F16C-NEXT: vpsrad $31, %xmm1, %xmm2
+; F16C-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; F16C-NEXT: vcvttps2dq %xmm0, %xmm0
+; F16C-NEXT: vpand %xmm2, %xmm0, %xmm0
+; F16C-NEXT: vpor %xmm0, %xmm1, %xmm0
+; F16C-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; F16C-NEXT: retq
+;
+; AVX512F-LABEL: fptoui_2f16_to_4i32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512F-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
+; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512-FASTLANE-LABEL: fptoui_2f16_to_4i32:
+; AVX512-FASTLANE: # %bb.0:
+; AVX512-FASTLANE-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512-FASTLANE-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX512-FASTLANE-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512-FASTLANE-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-FASTLANE-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX512-FASTLANE-NEXT: vcvttps2udq %xmm0, %xmm0
+; AVX512-FASTLANE-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512-FASTLANE-NEXT: retq
+ %cvt = fptoui <2 x half> %a to <2 x i32>
+ %ext = shufflevector <2 x i32> %cvt, <2 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %ext
+}
+
+define <4 x i32> @fptoui_4f16_to_4i32(<4 x half> %a) nounwind {
+; AVX1-LABEL: fptoui_4f16_to_4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: subq $72, %rsp
+; AVX1-NEXT: vmovdqa %xmm0, %xmm1
+; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX1-NEXT: vpsrlq $48, %xmm1, %xmm0
+; AVX1-NEXT: callq __extendhfsf2@PLT
+; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; AVX1-NEXT: callq __extendhfsf2@PLT
+; AVX1-NEXT: vinsertps $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; AVX1-NEXT: vcvttps2dq %xmm0, %xmm1
+; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX1-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT: callq __extendhfsf2@PLT
+; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT: callq __extendhfsf2@PLT
+; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX1-NEXT: vcvttps2dq %xmm0, %xmm1
+; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX1-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX1-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX1-NEXT: addq $72, %rsp
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: fptoui_4f16_to_4i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: subq $72, %rsp
+; AVX2-NEXT: vmovdqa %xmm0, %xmm1
+; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX2-NEXT: vpsrlq $48, %xmm1, %xmm0
+; AVX2-NEXT: callq __extendhfsf2@PLT
+; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; AVX2-NEXT: callq __extendhfsf2@PLT
+; AVX2-NEXT: vinsertps $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; AVX2-NEXT: vcvttps2dq %xmm0, %xmm1
+; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX2-NEXT: vbroadcastss {{.*#+}} xmm3 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
+; AVX2-NEXT: vsubps %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-NEXT: callq __extendhfsf2@PLT
+; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-NEXT: callq __extendhfsf2@PLT
+; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX2-NEXT: vcvttps2dq %xmm0, %xmm1
+; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX2-NEXT: vbroadcastss {{.*#+}} xmm3 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
+; AVX2-NEXT: vsubps %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX2-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX2-NEXT: addq $72, %rsp
+; AVX2-NEXT: retq
+;
+; F16C-LABEL: fptoui_4f16_to_4i32:
+; F16C: # %bb.0:
+; F16C-NEXT: vcvtph2ps %xmm0, %ymm0
+; F16C-NEXT: vcvttps2dq %ymm0, %ymm1
+; F16C-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; F16C-NEXT: vcvttps2dq %ymm0, %ymm0
+; F16C-NEXT: vorps %ymm0, %ymm1, %ymm0
+; F16C-NEXT: vblendvps %ymm1, %ymm0, %ymm1, %ymm0
+; F16C-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; F16C-NEXT: vzeroupper
+; F16C-NEXT: retq
+;
+; AVX512F-LABEL: fptoui_4f16_to_4i32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vcvtph2ps %xmm0, %ymm0
+; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512-FASTLANE-LABEL: fptoui_4f16_to_4i32:
+; AVX512-FASTLANE: # %bb.0:
+; AVX512-FASTLANE-NEXT: vcvtph2ps %xmm0, %ymm0
+; AVX512-FASTLANE-NEXT: vcvttps2udq %ymm0, %ymm0
+; AVX512-FASTLANE-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-FASTLANE-NEXT: vzeroupper
+; AVX512-FASTLANE-NEXT: retq
+ %cvt = fptoui <4 x half> %a to <4 x i32>
+ ret <4 x i32> %cvt
+}
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
index 2b539ae..f56c43e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
@@ -2208,15 +2208,13 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
; AVX2-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
; AVX2-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[21],zero,ymm3[19,20],zero,ymm3[22],zero,ymm3[24],zero,ymm3[22,23],zero,ymm3[25],zero,ymm3[23]
-; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[19],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25]
-; AVX2-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
; AVX2-NEXT: vpor %ymm9, %ymm10, %ymm9
+; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm10 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0]
; AVX2-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm8
; AVX2-NEXT: vpshufd {{.*#+}} ymm9 = ymm0[0,2,1,1,4,6,5,5]
@@ -2284,15 +2282,13 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
; AVX2-FP-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm6
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm7 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[21],zero,ymm3[19,20],zero,ymm3[22],zero,ymm3[24],zero,ymm3[22,23],zero,ymm3[25],zero,ymm3[23]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FP-NEXT: vpor %ymm7, %ymm8, %ymm7
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[19],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm9 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0]
; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm8 = ymm0[0,2,1,1,4,6,5,5]
@@ -2300,15 +2296,13 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,ymm1[29,26],zero,ymm1[28],zero,ymm1[30],zero,ymm1[28,29],zero,ymm1[31],zero,ymm1[29]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm2[27],zero,zero,ymm2[26],zero,ymm2[28],zero,ymm2[30],zero,zero,ymm2[29],zero,ymm2[31],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[29,26],zero,ymm3[28],zero,ymm3[26,27,28,29],zero,ymm3[31],zero,ymm3[29,30],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm10 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,zero,zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
; AVX2-FP-NEXT: vpor %ymm9, %ymm10, %ymm9
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm10 = [255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u]
; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm8
; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm9 = ymm0[2,2,3,3,6,6,7,7]
@@ -2375,15 +2369,13 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm6
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[21],zero,ymm3[19,20],zero,ymm3[22],zero,ymm3[24],zero,ymm3[22,23],zero,ymm3[25],zero,ymm3[23]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[19],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0]
; AVX2-FCP-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [4,6,5,5,5,5,4,6]
@@ -2391,15 +2383,13 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
; AVX2-FCP-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,ymm1[29,26],zero,ymm1[28],zero,ymm1[30],zero,ymm1[28,29],zero,ymm1[31],zero,ymm1[29]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm2[27],zero,zero,ymm2[26],zero,ymm2[28],zero,ymm2[30],zero,zero,ymm2[29],zero,ymm2[31],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[29,26],zero,ymm3[28],zero,ymm3[26,27,28,29],zero,ymm3[31],zero,ymm3[29,30],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,zero,zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
; AVX2-FCP-NEXT: vpor %ymm9, %ymm10, %ymm9
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u]
; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm8
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [6,6,6,6,7,7,7,7]
@@ -2430,10 +2420,10 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX512-LABEL: store_i8_stride5_vf32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512-NEXT: vmovdqa (%rsi), %ymm2
-; AVX512-NEXT: vmovdqa (%rdx), %ymm3
-; AVX512-NEXT: vmovdqa (%rcx), %ymm4
+; AVX512-NEXT: vmovdqa (%rdi), %ymm3
+; AVX512-NEXT: vmovdqa (%rsi), %ymm4
+; AVX512-NEXT: vmovdqa (%rdx), %ymm1
+; AVX512-NEXT: vmovdqa (%rcx), %ymm2
; AVX512-NEXT: vmovdqa (%r8), %ymm0
; AVX512-NEXT: vmovdqa (%rdi), %xmm5
; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[8],zero,xmm5[u,7],zero,xmm5[9],zero,xmm5[u],zero,xmm5[u,10],zero,xmm5[12],zero,xmm5[u,11]
@@ -2463,45 +2453,40 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm8 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
; AVX512-NEXT: vpermd %zmm6, %zmm8, %zmm6
; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm6
-; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = ymm4[u,u,u],zero,ymm4[13,u,u,u],zero,ymm4[14,u,u,u],zero,ymm4[15,u,u,u],zero,ymm4[16,u,u,u],zero,ymm4[17,u,u,u],zero,ymm4[18,u,u]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,13],zero,ymm3[u,u,u,14],zero,ymm3[u,u,u,15],zero,ymm3[u,u,u,16],zero,ymm3[u,u,u,17],zero,ymm3[u,u,u,18],zero,ymm3[u,u]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = ymm2[u,u,u],zero,ymm2[13,u,u,u],zero,ymm2[14,u,u,u],zero,ymm2[15,u,u,u],zero,ymm2[16,u,u,u],zero,ymm2[17,u,u,u],zero,ymm2[18,u,u]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,13],zero,ymm1[u,u,u,14],zero,ymm1[u,u,u,15],zero,ymm1[u,u,u,16],zero,ymm1[u,u,u,17],zero,ymm1[u,u,u,18],zero,ymm1[u,u]
; AVX512-NEXT: vpor %ymm5, %ymm8, %ymm5
-; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u],zero,ymm2[13,u,u,u],zero,ymm2[14,u,u,u],zero,ymm2[15,u,u,u],zero,ymm2[16,u,u,u],zero,ymm2[17,u,u,u],zero,ymm2[18,u,u,u],zero
-; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,13],zero,ymm1[u,u,u,14],zero,ymm1[u,u,u,15],zero,ymm1[u,u,u,16],zero,ymm1[u,u,u,17],zero,ymm1[u,u,u,18],zero,ymm1[u,u,u,19]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u],zero,ymm4[13,u,u,u],zero,ymm4[14,u,u,u],zero,ymm4[15,u,u,u],zero,ymm4[16,u,u,u],zero,ymm4[17,u,u,u],zero,ymm4[18,u,u,u],zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,13],zero,ymm3[u,u,u,14],zero,ymm3[u,u,u,15],zero,ymm3[u,u,u,16],zero,ymm3[u,u,u,17],zero,ymm3[u,u,u,18],zero,ymm3[u,u,u,19]
; AVX512-NEXT: vpor %ymm8, %ymm9, %ymm8
; AVX512-NEXT: vpternlogq $226, %ymm5, %ymm11, %ymm8
-; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero,ymm3[25],zero,zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21,u],zero,ymm4[20],zero,ymm4[22],zero,ymm4[24,u],zero,ymm4[23],zero,ymm4[25,u]
+; AVX512-NEXT: vpor %ymm5, %ymm9, %ymm5
; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm1[21],zero,ymm1[21,20],zero,ymm1[22],zero,ymm1[24],zero,ymm1[22,23],zero,ymm1[25]
+; AVX512-NEXT: vpor %ymm9, %ymm10, %ymm9
; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm10 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400]
-; AVX512-NEXT: vpternlogq $248, %ymm10, %ymm5, %ymm9
-; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero
-; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm3[21],zero,ymm3[21,20],zero,ymm3[22],zero,ymm3[24],zero,ymm3[22,23],zero,ymm3[25]
-; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
-; AVX512-NEXT: vpor %ymm5, %ymm11, %ymm5
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm5
-; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm9
+; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm5
; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm5[4,5,6,7]
; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[12],zero,zero,zero,zero,ymm0[13],zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,ymm0[17],zero,zero,zero,zero,ymm0[18],zero
; AVX512-NEXT: vpshufd {{.*#+}} ymm9 = ymm0[0,2,1,1,4,6,5,5]
; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,3,2]
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
-; AVX512-NEXT: vpandn %ymm9, %ymm11, %ymm9
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
+; AVX512-NEXT: vpandn %ymm9, %ymm10, %ymm9
; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm8, %zmm8
; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm8
-; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,27,u,u,26,u,28,u,30,u,u,29,u,31,u]
-; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[27],zero,zero,ymm3[26],zero,ymm3[28],zero,ymm3[30],zero,zero,ymm3[29],zero,ymm3[31],zero,zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[26],zero,ymm3[28],zero,zero,ymm3[27],zero,ymm3[29],zero,ymm3[31],zero,zero,ymm3[30],zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u],zero,ymm4[26],zero,ymm4[28,u],zero,ymm4[u],zero,ymm4[29],zero,ymm4[31,u],zero,ymm4[30]
+; AVX512-NEXT: vpor %ymm3, %ymm4, %ymm3
; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512-NEXT: vpternlogq $248, %ymm10, %ymm4, %ymm3
-; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,ymm1[26],zero,ymm1[28],zero,ymm1[30],zero,zero,ymm1[29],zero,ymm1[31],zero,zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm2[27,u],zero,ymm2[26],zero,ymm2[28],zero,ymm2[30,u],zero,ymm2[29],zero,ymm2[31,u]
+; AVX512-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm1
-; AVX512-NEXT: vpternlogq $184, %ymm3, %ymm7, %ymm1
+; AVX512-NEXT: vpternlogq $226, %ymm3, %ymm7, %ymm1
; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,2,3,3,6,6,7,7]
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
@@ -2513,10 +2498,10 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX512-FCP-LABEL: store_i8_stride5_vf32:
; AVX512-FCP: # %bb.0:
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm1
-; AVX512-FCP-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512-FCP-NEXT: vmovdqa (%rcx), %ymm3
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm3
+; AVX512-FCP-NEXT: vmovdqa (%rdx), %ymm0
+; AVX512-FCP-NEXT: vmovdqa (%rcx), %ymm1
; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm4
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm4[8],zero,xmm4[u,7],zero,xmm4[9],zero,xmm4[u],zero,xmm4[u,10],zero,xmm4[12],zero,xmm4[u,11]
; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm6
@@ -2545,26 +2530,23 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,9,9,0,10,10,10,10,0]
; AVX512-FCP-NEXT: vpermd %zmm4, %zmm7, %zmm7
; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm7
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm3[u,u,u],zero,ymm3[13,u,u,u],zero,ymm3[14,u,u,u],zero,ymm3[15,u,u,u],zero,ymm3[16,u,u,u],zero,ymm3[17,u,u,u],zero,ymm3[18,u,u]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,13],zero,ymm2[u,u,u,14],zero,ymm2[u,u,u,15],zero,ymm2[u,u,u,16],zero,ymm2[u,u,u,17],zero,ymm2[u,u,u,18],zero,ymm2[u,u]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm1[u,u,u],zero,ymm1[13,u,u,u],zero,ymm1[14,u,u,u],zero,ymm1[15,u,u,u],zero,ymm1[16,u,u,u],zero,ymm1[17,u,u,u],zero,ymm1[18,u,u]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[u,u,u,13],zero,ymm0[u,u,u,14],zero,ymm0[u,u,u,15],zero,ymm0[u,u,u,16],zero,ymm0[u,u,u,17],zero,ymm0[u,u,u,18],zero,ymm0[u,u]
; AVX512-FCP-NEXT: vpor %ymm5, %ymm8, %ymm5
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u],zero,ymm1[13,u,u,u],zero,ymm1[14,u,u,u],zero,ymm1[15,u,u,u],zero,ymm1[16,u,u,u],zero,ymm1[17,u,u,u],zero,ymm1[18,u,u,u],zero
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm0[u,13],zero,ymm0[u,u,u,14],zero,ymm0[u,u,u,15],zero,ymm0[u,u,u,16],zero,ymm0[u,u,u,17],zero,ymm0[u,u,u,18],zero,ymm0[u,u,u,19]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u],zero,ymm3[13,u,u,u],zero,ymm3[14,u,u,u],zero,ymm3[15,u,u,u],zero,ymm3[16,u,u,u],zero,ymm3[17,u,u,u],zero,ymm3[18,u,u,u],zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,13],zero,ymm2[u,u,u,14],zero,ymm2[u,u,u,15],zero,ymm2[u,u,u,16],zero,ymm2[u,u,u,17],zero,ymm2[u,u,u,18],zero,ymm2[u,u,u,19]
; AVX512-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
; AVX512-FCP-NEXT: vpternlogq $226, %ymm5, %ymm10, %ymm8
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero,ymm2[25],zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm3[21,u],zero,ymm3[20],zero,ymm3[22],zero,ymm3[24,u],zero,ymm3[23],zero,ymm3[25,u]
+; AVX512-FCP-NEXT: vpor %ymm5, %ymm9, %ymm5
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[21],zero,zero,ymm0[20],zero,ymm0[22],zero,ymm0[24],zero,zero,ymm0[23],zero,ymm0[25],zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[19],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm0[21],zero,ymm0[21,20],zero,ymm0[22],zero,ymm0[24],zero,ymm0[22,23],zero,ymm0[25]
+; AVX512-FCP-NEXT: vpor %ymm9, %ymm10, %ymm9
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400]
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm10, %ymm5, %ymm9
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[19],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm2[21],zero,ymm2[21,20],zero,ymm2[22],zero,ymm2[24],zero,ymm2[22,23],zero,ymm2[25]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
-; AVX512-FCP-NEXT: vpor %ymm5, %ymm11, %ymm5
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm5
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm9
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm5
; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm5[4,5,6,7]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [4,0,5,5,5,5,0,6]
; AVX512-FCP-NEXT: vpermd %ymm4, %ymm8, %ymm8
@@ -2573,17 +2555,15 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[12],zero,zero,zero,zero,ymm4[13],zero,zero,zero,zero,ymm4[14],zero,zero,zero,zero,ymm4[15],zero,zero,zero,zero,ymm4[16],zero,zero,zero,zero,ymm4[17],zero,zero,zero,zero,ymm4[18],zero
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm9, %zmm8
; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm8
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,27,u,u,26,u,28,u,30,u,u,29,u,31,u]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[27],zero,zero,ymm2[26],zero,ymm2[28],zero,ymm2[30],zero,zero,ymm2[29],zero,ymm2[31],zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[26],zero,ymm2[28],zero,zero,ymm2[27],zero,ymm2[29],zero,ymm2[31],zero,zero,ymm2[30],zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u],zero,ymm3[26],zero,ymm3[28,u],zero,ymm3[u],zero,ymm3[29],zero,ymm3[31,u],zero,ymm3[30]
+; AVX512-FCP-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm10, %ymm3, %ymm2
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm0[26],zero,ymm0[28],zero,zero,ymm0[27],zero,ymm0[29],zero,ymm0[31],zero,zero,ymm0[30],zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,zero,ymm0[26],zero,ymm0[28],zero,ymm0[30],zero,zero,ymm0[29],zero,ymm0[31],zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm1[27,u],zero,ymm1[26],zero,ymm1[28],zero,ymm1[30,u],zero,ymm1[29],zero,ymm1[31,u]
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
-; AVX512-FCP-NEXT: vpternlogq $184, %ymm2, %ymm6, %ymm0
+; AVX512-FCP-NEXT: vpternlogq $226, %ymm2, %ymm6, %ymm0
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [6,6,6,0,7,7,7,7]
; AVX512-FCP-NEXT: vpermd %ymm4, %ymm1, %ymm1
; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
@@ -2595,10 +2575,10 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX512DQ-LABEL: store_i8_stride5_vf32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm2
-; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm3
-; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm4
+; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm3
+; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm4
+; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm1
+; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm2
; AVX512DQ-NEXT: vmovdqa (%r8), %ymm0
; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm5
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[8],zero,xmm5[u,7],zero,xmm5[9],zero,xmm5[u],zero,xmm5[u,10],zero,xmm5[12],zero,xmm5[u,11]
@@ -2628,45 +2608,40 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm8 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
; AVX512DQ-NEXT: vpermd %zmm6, %zmm8, %zmm6
; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm6
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = ymm4[u,u,u],zero,ymm4[13,u,u,u],zero,ymm4[14,u,u,u],zero,ymm4[15,u,u,u],zero,ymm4[16,u,u,u],zero,ymm4[17,u,u,u],zero,ymm4[18,u,u]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,13],zero,ymm3[u,u,u,14],zero,ymm3[u,u,u,15],zero,ymm3[u,u,u,16],zero,ymm3[u,u,u,17],zero,ymm3[u,u,u,18],zero,ymm3[u,u]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = ymm2[u,u,u],zero,ymm2[13,u,u,u],zero,ymm2[14,u,u,u],zero,ymm2[15,u,u,u],zero,ymm2[16,u,u,u],zero,ymm2[17,u,u,u],zero,ymm2[18,u,u]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,13],zero,ymm1[u,u,u,14],zero,ymm1[u,u,u,15],zero,ymm1[u,u,u,16],zero,ymm1[u,u,u,17],zero,ymm1[u,u,u,18],zero,ymm1[u,u]
; AVX512DQ-NEXT: vpor %ymm5, %ymm8, %ymm5
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u],zero,ymm2[13,u,u,u],zero,ymm2[14,u,u,u],zero,ymm2[15,u,u,u],zero,ymm2[16,u,u,u],zero,ymm2[17,u,u,u],zero,ymm2[18,u,u,u],zero
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,13],zero,ymm1[u,u,u,14],zero,ymm1[u,u,u,15],zero,ymm1[u,u,u,16],zero,ymm1[u,u,u,17],zero,ymm1[u,u,u,18],zero,ymm1[u,u,u,19]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u],zero,ymm4[13,u,u,u],zero,ymm4[14,u,u,u],zero,ymm4[15,u,u,u],zero,ymm4[16,u,u,u],zero,ymm4[17,u,u,u],zero,ymm4[18,u,u,u],zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,13],zero,ymm3[u,u,u,14],zero,ymm3[u,u,u,15],zero,ymm3[u,u,u,16],zero,ymm3[u,u,u,17],zero,ymm3[u,u,u,18],zero,ymm3[u,u,u,19]
; AVX512DQ-NEXT: vpor %ymm8, %ymm9, %ymm8
; AVX512DQ-NEXT: vpternlogq $226, %ymm5, %ymm11, %ymm8
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero,ymm3[25],zero,zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21,u],zero,ymm4[20],zero,ymm4[22],zero,ymm4[24,u],zero,ymm4[23],zero,ymm4[25,u]
+; AVX512DQ-NEXT: vpor %ymm5, %ymm9, %ymm5
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm1[21],zero,ymm1[21,20],zero,ymm1[22],zero,ymm1[24],zero,ymm1[22,23],zero,ymm1[25]
+; AVX512DQ-NEXT: vpor %ymm9, %ymm10, %ymm9
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm10 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400]
-; AVX512DQ-NEXT: vpternlogq $248, %ymm10, %ymm5, %ymm9
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm3[21],zero,ymm3[21,20],zero,ymm3[22],zero,ymm3[24],zero,ymm3[22,23],zero,ymm3[25]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
-; AVX512DQ-NEXT: vpor %ymm5, %ymm11, %ymm5
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm5
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm9
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm5
; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm5[4,5,6,7]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[12],zero,zero,zero,zero,ymm0[13],zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,ymm0[17],zero,zero,zero,zero,ymm0[18],zero
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm9 = ymm0[0,2,1,1,4,6,5,5]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,3,2]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
-; AVX512DQ-NEXT: vpandn %ymm9, %ymm11, %ymm9
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
+; AVX512DQ-NEXT: vpandn %ymm9, %ymm10, %ymm9
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm8, %zmm8
; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm8
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,27,u,u,26,u,28,u,30,u,u,29,u,31,u]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[27],zero,zero,ymm3[26],zero,ymm3[28],zero,ymm3[30],zero,zero,ymm3[29],zero,ymm3[31],zero,zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[26],zero,ymm3[28],zero,zero,ymm3[27],zero,ymm3[29],zero,ymm3[31],zero,zero,ymm3[30],zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u],zero,ymm4[26],zero,ymm4[28,u],zero,ymm4[u],zero,ymm4[29],zero,ymm4[31,u],zero,ymm4[30]
+; AVX512DQ-NEXT: vpor %ymm3, %ymm4, %ymm3
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512DQ-NEXT: vpternlogq $248, %ymm10, %ymm4, %ymm3
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,ymm1[26],zero,ymm1[28],zero,ymm1[30],zero,zero,ymm1[29],zero,ymm1[31],zero,zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm2[27,u],zero,ymm2[26],zero,ymm2[28],zero,ymm2[30,u],zero,ymm2[29],zero,ymm2[31,u]
+; AVX512DQ-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm1
-; AVX512DQ-NEXT: vpternlogq $184, %ymm3, %ymm7, %ymm1
+; AVX512DQ-NEXT: vpternlogq $226, %ymm3, %ymm7, %ymm1
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,2,3,3,6,6,7,7]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
@@ -2678,10 +2653,10 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX512DQ-FCP-LABEL: store_i8_stride5_vf32:
; AVX512DQ-FCP: # %bb.0:
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %ymm1
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm4
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm4[8],zero,xmm4[u,7],zero,xmm4[9],zero,xmm4[u],zero,xmm4[u,10],zero,xmm4[12],zero,xmm4[u,11]
; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm6
@@ -2710,26 +2685,23 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,9,9,0,10,10,10,10,0]
; AVX512DQ-FCP-NEXT: vpermd %zmm4, %zmm7, %zmm7
; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm7
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm3[u,u,u],zero,ymm3[13,u,u,u],zero,ymm3[14,u,u,u],zero,ymm3[15,u,u,u],zero,ymm3[16,u,u,u],zero,ymm3[17,u,u,u],zero,ymm3[18,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,13],zero,ymm2[u,u,u,14],zero,ymm2[u,u,u,15],zero,ymm2[u,u,u,16],zero,ymm2[u,u,u,17],zero,ymm2[u,u,u,18],zero,ymm2[u,u]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm1[u,u,u],zero,ymm1[13,u,u,u],zero,ymm1[14,u,u,u],zero,ymm1[15,u,u,u],zero,ymm1[16,u,u,u],zero,ymm1[17,u,u,u],zero,ymm1[18,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[u,u,u,13],zero,ymm0[u,u,u,14],zero,ymm0[u,u,u,15],zero,ymm0[u,u,u,16],zero,ymm0[u,u,u,17],zero,ymm0[u,u,u,18],zero,ymm0[u,u]
; AVX512DQ-FCP-NEXT: vpor %ymm5, %ymm8, %ymm5
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u],zero,ymm1[13,u,u,u],zero,ymm1[14,u,u,u],zero,ymm1[15,u,u,u],zero,ymm1[16,u,u,u],zero,ymm1[17,u,u,u],zero,ymm1[18,u,u,u],zero
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm0[u,13],zero,ymm0[u,u,u,14],zero,ymm0[u,u,u,15],zero,ymm0[u,u,u,16],zero,ymm0[u,u,u,17],zero,ymm0[u,u,u,18],zero,ymm0[u,u,u,19]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u],zero,ymm3[13,u,u,u],zero,ymm3[14,u,u,u],zero,ymm3[15,u,u,u],zero,ymm3[16,u,u,u],zero,ymm3[17,u,u,u],zero,ymm3[18,u,u,u],zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,13],zero,ymm2[u,u,u,14],zero,ymm2[u,u,u,15],zero,ymm2[u,u,u,16],zero,ymm2[u,u,u,17],zero,ymm2[u,u,u,18],zero,ymm2[u,u,u,19]
; AVX512DQ-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm5, %ymm10, %ymm8
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero,ymm2[25],zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm3[21,u],zero,ymm3[20],zero,ymm3[22],zero,ymm3[24,u],zero,ymm3[23],zero,ymm3[25,u]
+; AVX512DQ-FCP-NEXT: vpor %ymm5, %ymm9, %ymm5
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[21],zero,zero,ymm0[20],zero,ymm0[22],zero,ymm0[24],zero,zero,ymm0[23],zero,ymm0[25],zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[19],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm0[21],zero,ymm0[21,20],zero,ymm0[22],zero,ymm0[24],zero,ymm0[22,23],zero,ymm0[25]
+; AVX512DQ-FCP-NEXT: vpor %ymm9, %ymm10, %ymm9
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm10, %ymm5, %ymm9
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[19],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm2[21],zero,ymm2[21,20],zero,ymm2[22],zero,ymm2[24],zero,ymm2[22,23],zero,ymm2[25]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpor %ymm5, %ymm11, %ymm5
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm5
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm9
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm5
; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm5[4,5,6,7]
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [4,0,5,5,5,5,0,6]
; AVX512DQ-FCP-NEXT: vpermd %ymm4, %ymm8, %ymm8
@@ -2738,17 +2710,15 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[12],zero,zero,zero,zero,ymm4[13],zero,zero,zero,zero,ymm4[14],zero,zero,zero,zero,ymm4[15],zero,zero,zero,zero,ymm4[16],zero,zero,zero,zero,ymm4[17],zero,zero,zero,zero,ymm4[18],zero
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm9, %zmm8
; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm8
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,27,u,u,26,u,28,u,30,u,u,29,u,31,u]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[27],zero,zero,ymm2[26],zero,ymm2[28],zero,ymm2[30],zero,zero,ymm2[29],zero,ymm2[31],zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[26],zero,ymm2[28],zero,zero,ymm2[27],zero,ymm2[29],zero,ymm2[31],zero,zero,ymm2[30],zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u],zero,ymm3[26],zero,ymm3[28,u],zero,ymm3[u],zero,ymm3[29],zero,ymm3[31,u],zero,ymm3[30]
+; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm10, %ymm3, %ymm2
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm0[26],zero,ymm0[28],zero,zero,ymm0[27],zero,ymm0[29],zero,ymm0[31],zero,zero,ymm0[30],zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,zero,ymm0[26],zero,ymm0[28],zero,ymm0[30],zero,zero,ymm0[29],zero,ymm0[31],zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm1[27,u],zero,ymm1[26],zero,ymm1[28],zero,ymm1[30,u],zero,ymm1[29],zero,ymm1[31,u]
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %ymm2, %ymm6, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm2, %ymm6, %ymm0
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [6,6,6,0,7,7,7,7]
; AVX512DQ-FCP-NEXT: vpermd %ymm4, %ymm1, %ymm1
; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
@@ -2792,26 +2762,24 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210
; AVX512BW-NEXT: kmovq %rax, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm6, %zmm3 {%k1}
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm6 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm1[21],zero,ymm1[21,20],zero,ymm1[22],zero,ymm1[24],zero,ymm1[22,23],zero,ymm1[25]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,ymm1[12,13],zero,zero,zero,zero,ymm1[14],zero,zero,zero,ymm1[14,15],zero,zero,zero,zero,ymm1[16],zero,zero,zero,ymm1[16,17],zero,zero,zero,zero,ymm1[18],zero,zero,zero
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero
; AVX512BW-NEXT: vpor %ymm6, %ymm7, %ymm6
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,ymm1[12,13],zero,zero,zero,zero,ymm1[14],zero,zero,zero,ymm1[14,15],zero,zero,zero,zero,ymm1[16],zero,zero,zero,ymm1[16,17],zero,zero,zero,zero,ymm1[18],zero,zero,zero
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm1[21],zero,ymm1[21,20],zero,ymm1[22],zero,ymm1[24],zero,ymm1[22,23],zero,ymm1[25]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
; AVX512BW-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm6, %zmm7, %zmm6
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm5[21],zero,zero,ymm5[20],zero,ymm5[22],zero,ymm5[24],zero,zero,ymm5[23],zero,ymm5[25],zero
; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512BW-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm8 = [3,3,3,0,4,4,4,4]
-; AVX512BW-NEXT: vpermd %ymm4, %ymm8, %ymm8
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm7 = [3,3,3,0,4,4,4,4]
+; AVX512BW-NEXT: vpermd %ymm4, %ymm7, %ymm7
; AVX512BW-NEXT: movl $138547332, %eax # imm = 0x8421084
; AVX512BW-NEXT: kmovd %eax, %k1
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 {%k1} = ymm5[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 {%k1} = ymm5[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm5[21],zero,zero,ymm5[20],zero,ymm5[22],zero,ymm5[24],zero,zero,ymm5[23],zero,ymm5[25],zero
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero
+; AVX512BW-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7
; AVX512BW-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
; AVX512BW-NEXT: kmovq %rax, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
@@ -2854,11 +2822,11 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-LABEL: store_i8_stride5_vf32:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm4
+; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm3
; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %ymm0
; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %ymm2
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm3
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm3[8],zero,xmm3[u,7],zero,xmm3[9],zero,xmm3[u],zero,xmm3[u,10],zero,xmm3[12],zero,xmm3[u,11]
+; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm4
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm4[8],zero,xmm4[u,7],zero,xmm4[9],zero,xmm4[u],zero,xmm4[u,10],zero,xmm4[12],zero,xmm4[u,11]
; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %xmm6
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,xmm6[8,u],zero,xmm6[7],zero,xmm6[9,u,11,u],zero,xmm6[10],zero,xmm6[12,u],zero
; AVX512BW-FCP-NEXT: vpor %xmm5, %xmm7, %xmm5
@@ -2871,39 +2839,37 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,xmm8[6],zero,xmm8[8,u],zero,xmm8[7],zero,xmm8[9],zero,xmm8[11,u],zero,xmm8[10],zero,xmm8[12]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[6],zero,xmm7[8],zero,xmm7[u,7],zero,xmm7[9],zero,xmm7[11],zero,xmm7[u,10],zero,xmm7[12],zero
; AVX512BW-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm3, %zmm3
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[0,0,1,1,4,4,5,5]
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm4, %zmm4
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm4 = zmm4[0,0,1,1,4,4,5,5]
; AVX512BW-FCP-NEXT: movabsq $3570337559743967628, %rax # imm = 0x318C631818C6318C
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm3 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm4 {%k1}
; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = mem[0,1,2,3,0,1,2,3]
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [0,0,0,0,0,0,1,1,9,9,10,10,10,10,10,10]
; AVX512BW-FCP-NEXT: vpermd %zmm5, %zmm6, %zmm6
; AVX512BW-FCP-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm3 {%k1}
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm0[21],zero,ymm0[21,20],zero,ymm0[22],zero,ymm0[24],zero,ymm0[22,23],zero,ymm0[25]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm4 {%k1}
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,ymm0[12,13],zero,zero,zero,zero,ymm0[14],zero,zero,zero,ymm0[14,15],zero,zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,ymm0[18],zero,zero,zero
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero
; AVX512BW-FCP-NEXT: vpor %ymm6, %ymm7, %ymm6
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,ymm0[12,13],zero,zero,zero,zero,ymm0[14],zero,zero,zero,ymm0[14,15],zero,zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,ymm0[18],zero,zero,zero
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm0[21],zero,ymm0[21,20],zero,ymm0[22],zero,ymm0[24],zero,ymm0[22,23],zero,ymm0[25]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
; AVX512BW-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm7, %zmm6
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512BW-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [3,3,3,0,4,4,4,4]
-; AVX512BW-FCP-NEXT: vpermd %ymm1, %ymm8, %ymm8
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [3,3,3,0,4,4,4,4]
+; AVX512BW-FCP-NEXT: vpermd %ymm1, %ymm7, %ymm7
; AVX512BW-FCP-NEXT: movl $138547332, %eax # imm = 0x8421084
; AVX512BW-FCP-NEXT: kmovd %eax, %k1
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 {%k1} = ymm4[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 {%k1} = ymm3[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero,ymm3[25],zero
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
+; AVX512BW-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7
; AVX512BW-FCP-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
@@ -2912,16 +2878,14 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: movabsq $1190112520884487201, %rax # imm = 0x1084210842108421
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,zero,zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm3[26],zero,ymm3[28],zero,zero,zero,zero,ymm3[29],zero,ymm3[31],zero,zero,ymm3[30]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero
+; AVX512BW-FCP-NEXT: vpor %ymm3, %ymm1, %ymm1
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX512BW-FCP-NEXT: vpor %ymm4, %ymm1, %ymm1
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm2[27],zero,zero,ymm2[26],zero,ymm2[28],zero,ymm2[30],zero,zero,ymm2[29],zero,ymm2[31],zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,zero,ymm0[26],zero,ymm0[28],zero,ymm0[30],zero,zero,ymm0[29],zero,ymm0[31],zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX512BW-FCP-NEXT: vpor %ymm2, %ymm0, %ymm0
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX512BW-FCP-NEXT: movl $415641996, %eax # imm = 0x18C6318C
; AVX512BW-FCP-NEXT: kmovd %eax, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %ymm1, %ymm0 {%k1}
@@ -2932,7 +2896,7 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: vmovdqu8 %ymm1, %ymm0 {%k1}
; AVX512BW-FCP-NEXT: vmovdqa %ymm0, 128(%r9)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm7, 64(%r9)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, (%r9)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, (%r9)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
@@ -2970,26 +2934,24 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm6, %zmm3 {%k1}
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm6 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm1[21],zero,ymm1[21,20],zero,ymm1[22],zero,ymm1[24],zero,ymm1[22,23],zero,ymm1[25]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,ymm1[12,13],zero,zero,zero,zero,ymm1[14],zero,zero,zero,ymm1[14,15],zero,zero,zero,zero,ymm1[16],zero,zero,zero,ymm1[16,17],zero,zero,zero,zero,ymm1[18],zero,zero,zero
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero
; AVX512DQ-BW-NEXT: vpor %ymm6, %ymm7, %ymm6
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,ymm1[12,13],zero,zero,zero,zero,ymm1[14],zero,zero,zero,ymm1[14,15],zero,zero,zero,zero,ymm1[16],zero,zero,zero,ymm1[16,17],zero,zero,zero,zero,ymm1[18],zero,zero,zero
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm1[21],zero,ymm1[21,20],zero,ymm1[22],zero,ymm1[24],zero,ymm1[22,23],zero,ymm1[25]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
; AVX512DQ-BW-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm6, %zmm7, %zmm6
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm5[21],zero,zero,ymm5[20],zero,ymm5[22],zero,ymm5[24],zero,zero,ymm5[23],zero,ymm5[25],zero
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512DQ-BW-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm8 = [3,3,3,0,4,4,4,4]
-; AVX512DQ-BW-NEXT: vpermd %ymm4, %ymm8, %ymm8
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm7 = [3,3,3,0,4,4,4,4]
+; AVX512DQ-BW-NEXT: vpermd %ymm4, %ymm7, %ymm7
; AVX512DQ-BW-NEXT: movl $138547332, %eax # imm = 0x8421084
; AVX512DQ-BW-NEXT: kmovd %eax, %k1
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 {%k1} = ymm5[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 {%k1} = ymm5[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm5[21],zero,zero,ymm5[20],zero,ymm5[22],zero,ymm5[24],zero,zero,ymm5[23],zero,ymm5[25],zero
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero
+; AVX512DQ-BW-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7
; AVX512DQ-BW-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
@@ -3032,11 +2994,11 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-LABEL: store_i8_stride5_vf32:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm3
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %ymm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %ymm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm3
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm3[8],zero,xmm3[u,7],zero,xmm3[9],zero,xmm3[u],zero,xmm3[u,10],zero,xmm3[12],zero,xmm3[u,11]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm4
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm4[8],zero,xmm4[u,7],zero,xmm4[9],zero,xmm4[u],zero,xmm4[u,10],zero,xmm4[12],zero,xmm4[u,11]
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %xmm6
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,xmm6[8,u],zero,xmm6[7],zero,xmm6[9,u,11,u],zero,xmm6[10],zero,xmm6[12,u],zero
; AVX512DQ-BW-FCP-NEXT: vpor %xmm5, %xmm7, %xmm5
@@ -3049,39 +3011,37 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,xmm8[6],zero,xmm8[8,u],zero,xmm8[7],zero,xmm8[9],zero,xmm8[11,u],zero,xmm8[10],zero,xmm8[12]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[6],zero,xmm7[8],zero,xmm7[u,7],zero,xmm7[9],zero,xmm7[11],zero,xmm7[u,10],zero,xmm7[12],zero
; AVX512DQ-BW-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm3, %zmm3
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[0,0,1,1,4,4,5,5]
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm4, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm4 = zmm4[0,0,1,1,4,4,5,5]
; AVX512DQ-BW-FCP-NEXT: movabsq $3570337559743967628, %rax # imm = 0x318C631818C6318C
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm3 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm4 {%k1}
; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = mem[0,1,2,3,0,1,2,3]
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [0,0,0,0,0,0,1,1,9,9,10,10,10,10,10,10]
; AVX512DQ-BW-FCP-NEXT: vpermd %zmm5, %zmm6, %zmm6
; AVX512DQ-BW-FCP-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm3 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm0[21],zero,ymm0[21,20],zero,ymm0[22],zero,ymm0[24],zero,ymm0[22,23],zero,ymm0[25]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm4 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,ymm0[12,13],zero,zero,zero,zero,ymm0[14],zero,zero,zero,ymm0[14,15],zero,zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,ymm0[18],zero,zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero
; AVX512DQ-BW-FCP-NEXT: vpor %ymm6, %ymm7, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,ymm0[12,13],zero,zero,zero,zero,ymm0[14],zero,zero,zero,ymm0[14,15],zero,zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,ymm0[18],zero,zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm0[21],zero,ymm0[21,20],zero,ymm0[22],zero,ymm0[24],zero,ymm0[22,23],zero,ymm0[25]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
; AVX512DQ-BW-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm7, %zmm6
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [3,3,3,0,4,4,4,4]
-; AVX512DQ-BW-FCP-NEXT: vpermd %ymm1, %ymm8, %ymm8
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [3,3,3,0,4,4,4,4]
+; AVX512DQ-BW-FCP-NEXT: vpermd %ymm1, %ymm7, %ymm7
; AVX512DQ-BW-FCP-NEXT: movl $138547332, %eax # imm = 0x8421084
; AVX512DQ-BW-FCP-NEXT: kmovd %eax, %k1
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 {%k1} = ymm4[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 {%k1} = ymm3[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero,ymm3[25],zero
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7
; AVX512DQ-BW-FCP-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
@@ -3090,16 +3050,14 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: movabsq $1190112520884487201, %rax # imm = 0x1084210842108421
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,zero,zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm3[26],zero,ymm3[28],zero,zero,zero,zero,ymm3[29],zero,ymm3[31],zero,zero,ymm3[30]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm3, %ymm1, %ymm1
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm4, %ymm1, %ymm1
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm2[27],zero,zero,ymm2[26],zero,ymm2[28],zero,ymm2[30],zero,zero,ymm2[29],zero,ymm2[31],zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,zero,ymm0[26],zero,ymm0[28],zero,ymm0[30],zero,zero,ymm0[29],zero,ymm0[31],zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX512DQ-BW-FCP-NEXT: vpor %ymm2, %ymm0, %ymm0
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX512DQ-BW-FCP-NEXT: movl $415641996, %eax # imm = 0x18C6318C
; AVX512DQ-BW-FCP-NEXT: kmovd %eax, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm1, %ymm0 {%k1}
@@ -3110,7 +3068,7 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm1, %ymm0 {%k1}
; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm0, 128(%r9)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm7, 64(%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, (%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, (%r9)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <32 x i8>, ptr %in.vecptr0, align 64
@@ -4148,209 +4106,200 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX2-LABEL: store_i8_stride5_vf64:
; AVX2: # %bb.0:
-; AVX2-NEXT: subq $312, %rsp # imm = 0x138
-; AVX2-NEXT: vmovdqa 32(%rdi), %ymm10
+; AVX2-NEXT: subq $248, %rsp
+; AVX2-NEXT: vmovdqa 32(%rdi), %ymm4
; AVX2-NEXT: vmovdqa (%rcx), %xmm1
; AVX2-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-NEXT: vmovdqa 32(%rcx), %xmm11
-; AVX2-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vmovdqa 32(%rcx), %xmm7
+; AVX2-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-NEXT: vmovdqa {{.*#+}} xmm0 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
; AVX2-NEXT: vpshufb %xmm0, %xmm1, %xmm1
; AVX2-NEXT: vmovdqa (%rdx), %xmm3
; AVX2-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-NEXT: vmovdqa 32(%rdx), %xmm12
-; AVX2-NEXT: vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vmovdqa 32(%rdx), %xmm10
+; AVX2-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
; AVX2-NEXT: vpshufb %xmm2, %xmm3, %xmm3
; AVX2-NEXT: vpor %xmm1, %xmm3, %xmm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1]
-; AVX2-NEXT: vmovdqa (%rdi), %xmm4
-; AVX2-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vmovdqa (%rdi), %xmm5
+; AVX2-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
-; AVX2-NEXT: vpshufb %xmm3, %xmm4, %xmm4
-; AVX2-NEXT: vmovdqa (%rsi), %xmm14
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
-; AVX2-NEXT: vpshufb %xmm5, %xmm14, %xmm8
-; AVX2-NEXT: vpor %xmm4, %xmm8, %xmm4
-; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255]
-; AVX2-NEXT: vpblendvb %ymm8, %ymm1, %ymm4, %ymm1
-; AVX2-NEXT: vmovdqa (%r8), %xmm4
-; AVX2-NEXT: vmovdqa %xmm4, (%rsp) # 16-byte Spill
-; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,2]
-; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,1,1]
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm9, %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpshufb %xmm3, %xmm5, %xmm5
+; AVX2-NEXT: vmovdqa (%rsi), %xmm6
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm8 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
+; AVX2-NEXT: vpshufb %xmm8, %xmm6, %xmm9
+; AVX2-NEXT: vpor %xmm5, %xmm9, %xmm5
+; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,0,1,1]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255]
+; AVX2-NEXT: vpblendvb %ymm9, %ymm1, %ymm5, %ymm1
+; AVX2-NEXT: vmovdqa (%r8), %xmm5
+; AVX2-NEXT: vmovdqa %xmm5, (%rsp) # 16-byte Spill
+; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,2]
+; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,1,1]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
+; AVX2-NEXT: vpblendvb %ymm12, %ymm1, %ymm5, %ymm1
; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vmovdqa 32(%rdi), %xmm4
-; AVX2-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-NEXT: vpshufb %xmm0, %xmm11, %xmm0
-; AVX2-NEXT: vpshufb %xmm2, %xmm12, %xmm1
+; AVX2-NEXT: vmovdqa 32(%rdi), %xmm5
+; AVX2-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vpshufb %xmm0, %xmm7, %xmm0
+; AVX2-NEXT: vpshufb %xmm2, %xmm10, %xmm1
; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vmovdqa 32(%rsi), %xmm2
; AVX2-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-NEXT: vpshufb %xmm3, %xmm4, %xmm1
-; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm5, %xmm1
+; AVX2-NEXT: vpshufb %xmm8, %xmm2, %xmm2
; AVX2-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vmovdqa 32(%rsi), %ymm2
+; AVX2-NEXT: vmovdqa 32(%rsi), %ymm11
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1]
-; AVX2-NEXT: vpblendvb %ymm8, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpblendvb %ymm9, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vmovdqa 32(%r8), %xmm1
; AVX2-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,2]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,1]
-; AVX2-NEXT: vpblendvb %ymm9, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpblendvb %ymm12, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovdqa {{.*#+}} ymm15 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,19,20,128,22,128,24,128,22,23,128,25,128,23]
-; AVX2-NEXT: vpshufb %ymm15, %ymm10, %ymm1
-; AVX2-NEXT: vmovdqa %ymm10, %ymm11
-; AVX2-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128]
-; AVX2-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX2-NEXT: vpshufb %ymm5, %ymm2, %ymm3
-; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
+; AVX2-NEXT: vpshufb %ymm15, %ymm4, %ymm1
+; AVX2-NEXT: vmovdqa %ymm4, %ymm13
+; AVX2-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128]
+; AVX2-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX2-NEXT: vpshufb %ymm4, %ymm11, %ymm3
; AVX2-NEXT: vpor %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vmovdqa 32(%rcx), %ymm7
+; AVX2-NEXT: vmovdqa 32(%rdx), %ymm12
+; AVX2-NEXT: vmovdqa 32(%rcx), %ymm14
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
; AVX2-NEXT: # ymm3 = mem[0,1,0,1]
-; AVX2-NEXT: vpshufb %ymm3, %ymm7, %ymm4
-; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm4[2,2,3,3]
-; AVX2-NEXT: vmovdqa 32(%rdx), %ymm13
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25]
-; AVX2-NEXT: vpshufb %ymm4, %ymm13, %ymm12
-; AVX2-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
-; AVX2-NEXT: vpor %ymm9, %ymm12, %ymm9
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm12 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0]
-; AVX2-NEXT: vpblendvb %ymm12, %ymm1, %ymm9, %ymm9
-; AVX2-NEXT: vmovdqa (%rdi), %ymm6
-; AVX2-NEXT: vpshufb %ymm15, %ymm6, %ymm1
-; AVX2-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT: vpshufb %ymm3, %ymm14, %ymm8
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25]
+; AVX2-NEXT: vpshufb %ymm5, %ymm12, %ymm10
+; AVX2-NEXT: vpor %ymm8, %ymm10, %ymm8
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
+; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm10 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0]
+; AVX2-NEXT: vpblendvb %ymm10, %ymm1, %ymm8, %ymm2
+; AVX2-NEXT: vmovdqa (%rdi), %ymm9
+; AVX2-NEXT: vpshufb %ymm15, %ymm9, %ymm1
; AVX2-NEXT: vmovdqa (%rsi), %ymm15
-; AVX2-NEXT: vpshufb %ymm5, %ymm15, %ymm5
-; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX2-NEXT: vpor %ymm1, %ymm5, %ymm5
-; AVX2-NEXT: vmovdqa (%rcx), %ymm10
-; AVX2-NEXT: vpshufb %ymm3, %ymm10, %ymm3
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm3[2,2,3,3]
-; AVX2-NEXT: vmovdqa (%rdx), %ymm8
-; AVX2-NEXT: vpshufb %ymm4, %ymm8, %ymm4
+; AVX2-NEXT: vpshufb %ymm4, %ymm15, %ymm4
+; AVX2-NEXT: vpor %ymm1, %ymm4, %ymm4
+; AVX2-NEXT: vmovdqa (%rcx), %ymm7
+; AVX2-NEXT: vpshufb %ymm3, %ymm7, %ymm0
+; AVX2-NEXT: vmovdqa (%rdx), %ymm3
+; AVX2-NEXT: vpshufb %ymm5, %ymm3, %ymm5
+; AVX2-NEXT: vpor %ymm0, %ymm5, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX2-NEXT: vpor %ymm0, %ymm4, %ymm0
-; AVX2-NEXT: vpblendvb %ymm12, %ymm5, %ymm0, %ymm0
-; AVX2-NEXT: vmovdqa 32(%r8), %ymm12
-; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm12[0,2,1,1,4,6,5,5]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
+; AVX2-NEXT: vpblendvb %ymm10, %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vmovdqa 32(%r8), %ymm10
+; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm10[0,2,1,1,4,6,5,5]
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,3,2]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm5, %ymm9, %ymm4, %ymm1
+; AVX2-NEXT: vpblendvb %ymm5, %ymm2, %ymm4, %ymm1
; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vmovdqa (%r8), %ymm9
-; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm9[0,2,1,1,4,6,5,5]
+; AVX2-NEXT: vmovdqa (%r8), %ymm8
+; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm8[0,2,1,1,4,6,5,5]
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,3,2]
; AVX2-NEXT: vpblendvb %ymm5, %ymm0, %ymm4, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vpmovsxbd {{.*#+}} ymm3 = [3,3,3,0,4,4,4,4]
-; AVX2-NEXT: vpermd %ymm11, %ymm3, %ymm4
+; AVX2-NEXT: vpmovsxbd {{.*#+}} ymm2 = [3,3,3,0,4,4,4,4]
+; AVX2-NEXT: vpermd %ymm13, %ymm2, %ymm4
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm5 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
-; AVX2-NEXT: vpshufb %ymm5, %ymm2, %ymm0
+; AVX2-NEXT: vpshufb %ymm5, %ymm11, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255]
; AVX2-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
-; AVX2-NEXT: vpermd %ymm6, %ymm3, %ymm2
+; AVX2-NEXT: vpermd %ymm9, %ymm2, %ymm2
; AVX2-NEXT: vpshufb %ymm5, %ymm15, %ymm4
; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm4, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX2-NEXT: vmovdqa %ymm7, %ymm3
-; AVX2-NEXT: vpshufb %ymm2, %ymm7, %ymm4
+; AVX2-NEXT: vpshufb %ymm2, %ymm14, %ymm4
; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128]
-; AVX2-NEXT: vpshufb %ymm5, %ymm13, %ymm11
-; AVX2-NEXT: vpor %ymm4, %ymm11, %ymm4
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm11 = [u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255]
-; AVX2-NEXT: vpblendvb %ymm11, %ymm0, %ymm4, %ymm0
-; AVX2-NEXT: vpshufb %ymm2, %ymm10, %ymm2
-; AVX2-NEXT: vpshufb %ymm5, %ymm8, %ymm4
+; AVX2-NEXT: vpshufb %ymm5, %ymm12, %ymm13
+; AVX2-NEXT: vpor %ymm4, %ymm13, %ymm4
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm13 = [u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255]
+; AVX2-NEXT: vpblendvb %ymm13, %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpshufb %ymm2, %ymm7, %ymm2
+; AVX2-NEXT: vpshufb %ymm5, %ymm3, %ymm4
; AVX2-NEXT: vpor %ymm2, %ymm4, %ymm2
-; AVX2-NEXT: vpblendvb %ymm11, %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpblendvb %ymm13, %ymm1, %ymm2, %ymm1
; AVX2-NEXT: vpmovsxbd {{.*#+}} ymm2 = [3,3,3,3,0,4,4,4]
-; AVX2-NEXT: vpermd %ymm12, %ymm2, %ymm4
+; AVX2-NEXT: vpermd %ymm10, %ymm2, %ymm4
; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
; AVX2-NEXT: vpblendvb %ymm5, %ymm0, %ymm4, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vpermd %ymm9, %ymm2, %ymm0
+; AVX2-NEXT: vpermd %ymm8, %ymm2, %ymm0
; AVX2-NEXT: vpblendvb %ymm5, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
+; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX2-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
; AVX2-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX2-NEXT: vpshufb %xmm14, %xmm0, %xmm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm7 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX2-NEXT: vpshufb %xmm7, %xmm1, %xmm1
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1]
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255]
-; AVX2-NEXT: vpblendvb %ymm11, %ymm0, %ymm1, %ymm6
-; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
-; AVX2-NEXT: vpshufb %xmm14, %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm13 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX2-NEXT: vpshufb %xmm13, %xmm0, %xmm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm0[0,0,1,1]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX2-NEXT: vpshufb %xmm6, %xmm1, %xmm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,0,1,1]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255]
+; AVX2-NEXT: vpblendvb %ymm0, %ymm2, %ymm1, %ymm4
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX2-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
; AVX2-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX2-NEXT: vpshufb %xmm7, %xmm1, %xmm1
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1]
+; AVX2-NEXT: vpshufb %xmm13, %xmm1, %xmm1
+; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX2-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; AVX2-NEXT: # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3],xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7]
+; AVX2-NEXT: vpshufb %xmm6, %xmm2, %xmm2
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1]
-; AVX2-NEXT: vpblendvb %ymm11, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,0,1,1]
+; AVX2-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
; AVX2-NEXT: vpshufd $80, (%rsp), %xmm1 # 16-byte Folded Reload
; AVX2-NEXT: # xmm1 = mem[0,0,1,1]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm6, %ymm1, %ymm6
+; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm4
; AVX2-NEXT: vpshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; AVX2-NEXT: # xmm1 = mem[0,0,1,1]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm7
+; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm6
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm0 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
-; AVX2-NEXT: vpshufb %ymm0, %ymm3, %ymm1
-; AVX2-NEXT: vpshufhw {{.*#+}} ymm2 = ymm13[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14]
+; AVX2-NEXT: vpshufb %ymm0, %ymm14, %ymm1
+; AVX2-NEXT: vpshufhw {{.*#+}} ymm2 = ymm12[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14]
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,2,3,3,6,6,7,7]
-; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [255,0,255,0,0,255,0,255,0,255,0,0,255,0,255,0,255,0,255,0,0,255,0,255,0,255,0,0,255,0,255,0]
-; AVX2-NEXT: # ymm4 = mem[0,1,0,1]
-; AVX2-NEXT: vpblendvb %ymm4, %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpshufb %ymm0, %ymm10, %ymm0
-; AVX2-NEXT: vpshufhw {{.*#+}} ymm2 = ymm8[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14]
+; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [255,0,255,0,0,255,0,255,0,255,0,0,255,0,255,0,255,0,255,0,0,255,0,255,0,255,0,0,255,0,255,0]
+; AVX2-NEXT: # ymm5 = mem[0,1,0,1]
+; AVX2-NEXT: vpblendvb %ymm5, %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpshufb %ymm0, %ymm7, %ymm0
+; AVX2-NEXT: vpshufhw {{.*#+}} ymm2 = ymm3[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14]
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,2,3,3,6,6,7,7]
-; AVX2-NEXT: vpblendvb %ymm4, %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpblendvb %ymm5, %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14]
-; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm3
-; AVX2-NEXT: vpshufhw $230, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX2-NEXT: # ymm4 = mem[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15]
-; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,3,3,6,6,7,7]
-; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [255,0,0,255,0,255,0,0,0,0,255,0,255,0,0,255,255,0,0,255,0,255,0,0,0,0,255,0,255,0,0,255]
-; AVX2-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX2-NEXT: vpblendvb %ymm5, %ymm3, %ymm4, %ymm3
+; AVX2-NEXT: vpshufb %ymm2, %ymm11, %ymm3
+; AVX2-NEXT: vpshufhw $230, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
+; AVX2-NEXT: # ymm5 = mem[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15]
+; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[2,2,3,3,6,6,7,7]
+; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [255,0,0,255,0,255,0,0,0,0,255,0,255,0,0,255,255,0,0,255,0,255,0,0,0,0,255,0,255,0,0,255]
+; AVX2-NEXT: # ymm7 = mem[0,1,0,1]
+; AVX2-NEXT: vpblendvb %ymm7, %ymm3, %ymm5, %ymm3
; AVX2-NEXT: vpshufb %ymm2, %ymm15, %ymm2
-; AVX2-NEXT: vpshufhw $230, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX2-NEXT: # ymm4 = mem[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15]
-; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,3,3,6,6,7,7]
-; AVX2-NEXT: vpblendvb %ymm5, %ymm2, %ymm4, %ymm2
+; AVX2-NEXT: vpshufhw {{.*#+}} ymm5 = ymm9[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15]
+; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[2,2,3,3,6,6,7,7]
+; AVX2-NEXT: vpblendvb %ymm7, %ymm2, %ymm5, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u]
-; AVX2-NEXT: vpblendvb %ymm4, %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u]
+; AVX2-NEXT: vpblendvb %ymm5, %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX2-NEXT: vpblendvb %ymm4, %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm12[2,2,3,3,6,6,7,7]
+; AVX2-NEXT: vpblendvb %ymm5, %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm10[2,2,3,3,6,6,7,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
; AVX2-NEXT: vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm9[2,2,3,3,6,6,7,7]
+; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm8[2,2,3,3,6,6,7,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
; AVX2-NEXT: vpblendvb %ymm3, %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
@@ -4362,21 +4311,21 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm2, 256(%r9)
; AVX2-NEXT: vmovdqa %ymm0, 128(%r9)
-; AVX2-NEXT: vmovdqa %ymm7, 160(%r9)
+; AVX2-NEXT: vmovdqa %ymm6, 160(%r9)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 192(%r9)
; AVX2-NEXT: vmovdqa %ymm1, 288(%r9)
-; AVX2-NEXT: vmovdqa %ymm6, (%r9)
+; AVX2-NEXT: vmovdqa %ymm4, (%r9)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 32(%r9)
-; AVX2-NEXT: addq $312, %rsp # imm = 0x138
+; AVX2-NEXT: addq $248, %rsp
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2-FP-LABEL: store_i8_stride5_vf64:
; AVX2-FP: # %bb.0:
; AVX2-FP-NEXT: subq $200, %rsp
-; AVX2-FP-NEXT: vmovdqa 32(%rdx), %ymm11
+; AVX2-FP-NEXT: vmovdqa 32(%rdx), %ymm12
; AVX2-FP-NEXT: vmovdqa (%rcx), %xmm1
; AVX2-FP-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill
; AVX2-FP-NEXT: vmovdqa 32(%rcx), %xmm8
@@ -4420,7 +4369,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vpshufb %xmm3, %xmm4, %xmm1
; AVX2-FP-NEXT: vpshufb %xmm5, %xmm2, %xmm2
; AVX2-FP-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX2-FP-NEXT: vmovdqa 32(%rcx), %ymm3
+; AVX2-FP-NEXT: vmovdqa 32(%rcx), %ymm2
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1]
; AVX2-FP-NEXT: vpblendvb %ymm6, %ymm0, %ymm1, %ymm0
@@ -4431,106 +4380,98 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vpblendvb %ymm7, %ymm0, %ymm1, %ymm0
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,29,26,128,28,128,30,128,28,29,128,31,128,29]
-; AVX2-FP-NEXT: vpshufb %ymm0, %ymm11, %ymm1
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
+; AVX2-FP-NEXT: vpshufb %ymm0, %ymm12, %ymm1
; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128]
; AVX2-FP-NEXT: # ymm8 = mem[0,1,0,1]
-; AVX2-FP-NEXT: vpshufb %ymm8, %ymm3, %ymm2
-; AVX2-FP-NEXT: vmovdqa %ymm3, %ymm12
-; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX2-FP-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX2-FP-NEXT: vpshufb %ymm8, %ymm2, %ymm3
+; AVX2-FP-NEXT: vmovdqa %ymm2, %ymm14
+; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm4
+; AVX2-FP-NEXT: vmovdqa 32(%rsi), %ymm11
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,29,26,128,28,128,26,27,28,29,128,31,128,29,30,128]
-; AVX2-FP-NEXT: vpshufb %ymm5, %ymm4, %ymm2
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm6 = ymm2[2,2,3,3]
-; AVX2-FP-NEXT: vmovdqa 32(%rsi), %ymm2
+; AVX2-FP-NEXT: vpshufb %ymm5, %ymm4, %ymm6
; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30]
; AVX2-FP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX2-FP-NEXT: vpshufb %ymm9, %ymm2, %ymm7
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX2-FP-NEXT: vpshufb %ymm9, %ymm11, %ymm7
; AVX2-FP-NEXT: vpor %ymm6, %ymm7, %ymm6
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm10 = [255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u]
; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm1, %ymm6, %ymm3
; AVX2-FP-NEXT: vmovdqa (%rdx), %ymm13
; AVX2-FP-NEXT: vpshufb %ymm0, %ymm13, %ymm0
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX2-FP-NEXT: vmovdqa (%rcx), %ymm7
; AVX2-FP-NEXT: vpshufb %ymm8, %ymm7, %ymm1
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
; AVX2-FP-NEXT: vpor %ymm0, %ymm1, %ymm8
-; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX2-FP-NEXT: vpshufb %ymm5, %ymm1, %ymm0
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm5 = ymm0[2,2,3,3]
-; AVX2-FP-NEXT: vmovdqa (%rsi), %ymm0
-; AVX2-FP-NEXT: vpshufb %ymm9, %ymm0, %ymm9
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
+; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX2-FP-NEXT: vpshufb %ymm5, %ymm2, %ymm5
+; AVX2-FP-NEXT: vmovdqa (%rsi), %ymm1
+; AVX2-FP-NEXT: vpshufb %ymm9, %ymm1, %ymm9
; AVX2-FP-NEXT: vpor %ymm5, %ymm9, %ymm5
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm8, %ymm5, %ymm8
; AVX2-FP-NEXT: vmovdqa 32(%r8), %ymm5
; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm9 = ymm5[2,2,3,3,6,6,7,7]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm10 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
-; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm3, %ymm9, %ymm3
-; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm3, %ymm9, %ymm0
+; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovdqa (%r8), %ymm3
; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm9 = ymm3[2,2,3,3,6,6,7,7]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
-; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm6
-; AVX2-FP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm0
+; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,19,20,128,22,128,24,128,22,23,128,25,128,23]
; AVX2-FP-NEXT: vpshufb %ymm8, %ymm4, %ymm9
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128]
; AVX2-FP-NEXT: # ymm10 = mem[0,1,0,1]
-; AVX2-FP-NEXT: vpshufb %ymm10, %ymm2, %ymm15
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
+; AVX2-FP-NEXT: vpshufb %ymm10, %ymm11, %ymm15
; AVX2-FP-NEXT: vpor %ymm9, %ymm15, %ymm9
; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
; AVX2-FP-NEXT: # ymm15 = mem[0,1,0,1]
-; AVX2-FP-NEXT: vpshufb %ymm15, %ymm12, %ymm14
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3]
+; AVX2-FP-NEXT: vpshufb %ymm15, %ymm14, %ymm0
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25]
-; AVX2-FP-NEXT: vpshufb %ymm6, %ymm11, %ymm12
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
-; AVX2-FP-NEXT: vpor %ymm14, %ymm12, %ymm12
+; AVX2-FP-NEXT: vpshufb %ymm6, %ymm12, %ymm14
+; AVX2-FP-NEXT: vpor %ymm0, %ymm14, %ymm0
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm14 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0]
-; AVX2-FP-NEXT: vpblendvb %ymm14, %ymm9, %ymm12, %ymm9
-; AVX2-FP-NEXT: vpshufb %ymm8, %ymm1, %ymm8
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX2-FP-NEXT: vpshufb %ymm10, %ymm0, %ymm10
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
-; AVX2-FP-NEXT: vpor %ymm8, %ymm10, %ymm8
-; AVX2-FP-NEXT: vpshufb %ymm15, %ymm7, %ymm10
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
+; AVX2-FP-NEXT: vpblendvb %ymm14, %ymm9, %ymm0, %ymm0
+; AVX2-FP-NEXT: vpshufb %ymm8, %ymm2, %ymm8
+; AVX2-FP-NEXT: vpshufb %ymm10, %ymm1, %ymm9
+; AVX2-FP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX2-FP-NEXT: vpshufb %ymm15, %ymm7, %ymm9
; AVX2-FP-NEXT: vpshufb %ymm6, %ymm13, %ymm6
+; AVX2-FP-NEXT: vpor %ymm6, %ymm9, %ymm6
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX2-FP-NEXT: vpor %ymm6, %ymm10, %ymm6
; AVX2-FP-NEXT: vpblendvb %ymm14, %ymm8, %ymm6, %ymm6
; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm8 = ymm5[0,2,1,1,4,6,5,5]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,3,2]
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
-; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm9, %ymm8, %ymm10
-; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm8 = ymm3[0,2,1,1,4,6,5,5]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,3,2]
-; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm6, %ymm8, %ymm9
-; AVX2-FP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [3,3,3,0,4,4,4,4]
-; AVX2-FP-NEXT: vpermd %ymm4, %ymm6, %ymm4
-; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} ymm8 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
-; AVX2-FP-NEXT: vpshufb %ymm8, %ymm2, %ymm2
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255]
-; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm4, %ymm2, %ymm2
-; AVX2-FP-NEXT: vpermd %ymm1, %ymm6, %ymm1
-; AVX2-FP-NEXT: vpshufb %ymm8, %ymm0, %ymm0
-; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm1, %ymm0, %ymm0
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
+; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm0, %ymm8, %ymm10
+; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm0 = ymm3[0,2,1,1,4,6,5,5]
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,3,2]
+; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm6, %ymm0, %ymm9
+; AVX2-FP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [3,3,3,0,4,4,4,4]
+; AVX2-FP-NEXT: vpermd %ymm4, %ymm0, %ymm4
+; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} ymm6 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
+; AVX2-FP-NEXT: vpshufb %ymm6, %ymm11, %ymm8
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm11 = [u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255]
+; AVX2-FP-NEXT: vpblendvb %ymm11, %ymm4, %ymm8, %ymm4
+; AVX2-FP-NEXT: vpermd %ymm2, %ymm0, %ymm0
+; AVX2-FP-NEXT: vpshufb %ymm6, %ymm1, %ymm1
+; AVX2-FP-NEXT: vpblendvb %ymm11, %ymm0, %ymm1, %ymm0
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm1, %ymm4, %ymm4
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm1, %ymm2, %ymm2
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128]
-; AVX2-FP-NEXT: vpshufb %ymm6, %ymm11, %ymm8
-; AVX2-FP-NEXT: vpor %ymm4, %ymm8, %ymm4
+; AVX2-FP-NEXT: vpshufb %ymm6, %ymm12, %ymm8
+; AVX2-FP-NEXT: vpor %ymm2, %ymm8, %ymm2
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm8 = [u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255]
-; AVX2-FP-NEXT: vpblendvb %ymm8, %ymm2, %ymm4, %ymm2
+; AVX2-FP-NEXT: vpblendvb %ymm8, %ymm4, %ymm2, %ymm2
; AVX2-FP-NEXT: vpshufb %ymm1, %ymm7, %ymm1
; AVX2-FP-NEXT: vpshufb %ymm6, %ymm13, %ymm4
; AVX2-FP-NEXT: vpor %ymm1, %ymm4, %ymm1
@@ -4596,8 +4537,8 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-LABEL: store_i8_stride5_vf64:
; AVX2-FCP: # %bb.0:
; AVX2-FCP-NEXT: subq $168, %rsp
-; AVX2-FCP-NEXT: vmovdqa 32(%rdx), %ymm13
-; AVX2-FCP-NEXT: vmovdqa 32(%rcx), %ymm10
+; AVX2-FCP-NEXT: vmovdqa 32(%rdx), %ymm14
+; AVX2-FCP-NEXT: vmovdqa 32(%rcx), %ymm9
; AVX2-FCP-NEXT: vmovdqa (%r8), %ymm11
; AVX2-FCP-NEXT: vmovdqa (%rcx), %xmm1
; AVX2-FCP-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -4643,96 +4584,88 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
; AVX2-FCP-NEXT: vpblendvb %ymm4, %ymm1, %ymm2, %ymm1
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 32(%r8), %ymm12
-; AVX2-FCP-NEXT: vpermd %ymm12, %ymm3, %ymm1
+; AVX2-FCP-NEXT: vmovdqa 32(%r8), %ymm13
+; AVX2-FCP-NEXT: vpermd %ymm13, %ymm3, %ymm1
; AVX2-FCP-NEXT: vpblendvb %ymm4, %ymm0, %ymm1, %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,29,26,128,28,128,30,128,28,29,128,31,128,29]
-; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm13, %ymm1
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128]
-; AVX2-FCP-NEXT: # ymm8 = mem[0,1,0,1]
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm10, %ymm2
-; AVX2-FCP-NEXT: vmovdqa %ymm10, %ymm14
-; AVX2-FCP-NEXT: vmovdqu %ymm10, (%rsp) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
+; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm14, %ymm1
+; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128]
+; AVX2-FCP-NEXT: # ymm10 = mem[0,1,0,1]
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm9, %ymm2
+; AVX2-FCP-NEXT: vmovdqa %ymm9, %ymm15
+; AVX2-FCP-NEXT: vmovdqu %ymm9, (%rsp) # 32-byte Spill
; AVX2-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
+; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
+; AVX2-FCP-NEXT: vmovdqa 32(%rsi), %ymm3
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,29,26,128,28,128,26,27,28,29,128,31,128,29,30,128]
-; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm2
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm2[2,2,3,3]
-; AVX2-FCP-NEXT: vmovdqa 32(%rsi), %ymm2
-; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30]
-; AVX2-FCP-NEXT: # ymm15 = mem[0,1,0,1]
-; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm2, %ymm5
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
+; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm2, %ymm4
+; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30]
+; AVX2-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm5
; AVX2-FCP-NEXT: vpor %ymm4, %ymm5, %ymm4
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u]
; AVX2-FCP-NEXT: vpblendvb %ymm7, %ymm1, %ymm4, %ymm4
-; AVX2-FCP-NEXT: vmovdqa (%rdx), %ymm10
-; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm10, %ymm0
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
+; AVX2-FCP-NEXT: vmovdqa (%rdx), %ymm12
+; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm12, %ymm0
; AVX2-FCP-NEXT: vmovdqa (%rcx), %ymm5
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm1
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX2-FCP-NEXT: vpor %ymm0, %ymm1, %ymm8
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm5, %ymm1
+; AVX2-FCP-NEXT: vpor %ymm0, %ymm1, %ymm10
; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm1, %ymm0
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm0[2,2,3,3]
+; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm1, %ymm9
; AVX2-FCP-NEXT: vmovdqa (%rsi), %ymm0
-; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm0, %ymm15
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
-; AVX2-FCP-NEXT: vpor %ymm9, %ymm15, %ymm9
-; AVX2-FCP-NEXT: vpblendvb %ymm7, %ymm8, %ymm9, %ymm7
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm8
+; AVX2-FCP-NEXT: vpor %ymm9, %ymm8, %ymm8
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm10[2,2,3,3]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
+; AVX2-FCP-NEXT: vpblendvb %ymm7, %ymm9, %ymm8, %ymm7
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [6,6,6,6,7,7,7,7]
-; AVX2-FCP-NEXT: vpermd %ymm12, %ymm8, %ymm9
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
-; AVX2-FCP-NEXT: vpblendvb %ymm15, %ymm4, %ymm9, %ymm4
+; AVX2-FCP-NEXT: vpermd %ymm13, %ymm8, %ymm9
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
+; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm4, %ymm9, %ymm4
; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpermd %ymm11, %ymm8, %ymm4
-; AVX2-FCP-NEXT: vpblendvb %ymm15, %ymm7, %ymm4, %ymm4
+; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm7, %ymm4, %ymm4
; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,19,20,128,22,128,24,128,22,23,128,25,128,23]
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm7
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm7
; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128]
; AVX2-FCP-NEXT: # ymm8 = mem[0,1,0,1]
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm2, %ymm9
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm9
; AVX2-FCP-NEXT: vpor %ymm7, %ymm9, %ymm7
; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
; AVX2-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm14, %ymm15
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
+; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm15, %ymm10
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25]
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm13, %ymm14
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3]
-; AVX2-FCP-NEXT: vpor %ymm15, %ymm14, %ymm14
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm14, %ymm15
+; AVX2-FCP-NEXT: vpor %ymm10, %ymm15, %ymm10
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0]
-; AVX2-FCP-NEXT: vpblendvb %ymm15, %ymm7, %ymm14, %ymm7
+; AVX2-FCP-NEXT: vpblendvb %ymm15, %ymm7, %ymm10, %ymm7
; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm6
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm8
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FCP-NEXT: vpor %ymm6, %ymm8, %ymm6
; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm8
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm4
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm12, %ymm4
; AVX2-FCP-NEXT: vpor %ymm4, %ymm8, %ymm4
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
; AVX2-FCP-NEXT: vpblendvb %ymm15, %ymm6, %ymm4, %ymm4
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [4,6,5,5,5,5,4,6]
-; AVX2-FCP-NEXT: vpermd %ymm12, %ymm6, %ymm8
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
-; AVX2-FCP-NEXT: vpblendvb %ymm14, %ymm7, %ymm8, %ymm9
+; AVX2-FCP-NEXT: vpermd %ymm13, %ymm6, %ymm8
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
+; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm7, %ymm8, %ymm9
; AVX2-FCP-NEXT: vpermd %ymm11, %ymm6, %ymm6
-; AVX2-FCP-NEXT: vpblendvb %ymm14, %ymm4, %ymm6, %ymm7
+; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm4, %ymm6, %ymm7
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [3,3,3,0,4,4,4,4]
-; AVX2-FCP-NEXT: vpermd %ymm3, %ymm4, %ymm3
+; AVX2-FCP-NEXT: vpermd %ymm2, %ymm4, %ymm2
; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm6 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm3
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255]
-; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm3, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm2, %ymm3, %ymm2
; AVX2-FCP-NEXT: vpermd %ymm1, %ymm4, %ymm1
; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm0
; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm1, %ymm0, %ymm0
@@ -4740,16 +4673,16 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovdqu (%rsp), %ymm3 # 32-byte Reload
; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm3, %ymm3
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128]
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm13, %ymm6
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm14, %ymm6
; AVX2-FCP-NEXT: vpor %ymm3, %ymm6, %ymm3
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255]
; AVX2-FCP-NEXT: vpblendvb %ymm6, %ymm2, %ymm3, %ymm2
; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm5, %ymm1
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm3
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm12, %ymm3
; AVX2-FCP-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX2-FCP-NEXT: vpblendvb %ymm6, %ymm0, %ymm1, %ymm1
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [3,3,3,3,0,4,4,4]
-; AVX2-FCP-NEXT: vpermd %ymm12, %ymm3, %ymm0
+; AVX2-FCP-NEXT: vpermd %ymm13, %ymm3, %ymm0
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
; AVX2-FCP-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
; AVX2-FCP-NEXT: vpermd %ymm11, %ymm3, %ymm2
@@ -4783,7 +4716,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vpermd %ymm11, %ymm4, %ymm5
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255]
; AVX2-FCP-NEXT: vpblendvb %ymm6, %ymm2, %ymm5, %ymm2
-; AVX2-FCP-NEXT: vpermd %ymm12, %ymm4, %ymm4
+; AVX2-FCP-NEXT: vpermd %ymm13, %ymm4, %ymm4
; AVX2-FCP-NEXT: vpblendvb %ymm6, %ymm3, %ymm4, %ymm3
; AVX2-FCP-NEXT: vmovdqa %ymm1, 64(%r9)
; AVX2-FCP-NEXT: vmovdqa %ymm0, 224(%r9)
@@ -4805,766 +4738,740 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX512-LABEL: store_i8_stride5_vf64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovdqa 32(%rsi), %ymm3
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm15 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
-; AVX512-NEXT: vpshufb %ymm15, %ymm3, %ymm0
-; AVX512-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm9 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19]
-; AVX512-NEXT: vpshufb %ymm9, %ymm2, %ymm1
+; AVX512-NEXT: vmovdqa 32(%rsi), %ymm11
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
+; AVX512-NEXT: vpshufb %ymm1, %ymm11, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm1, %ymm18
+; AVX512-NEXT: vmovdqa 32(%rdi), %ymm5
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19]
+; AVX512-NEXT: vpshufb %ymm2, %ymm5, %ymm1
+; AVX512-NEXT: vmovdqa64 %ymm2, %ymm19
+; AVX512-NEXT: vporq %ymm0, %ymm1, %ymm20
+; AVX512-NEXT: vmovdqa 32(%rdi), %xmm12
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
+; AVX512-NEXT: vpshufb %xmm1, %xmm12, %xmm0
+; AVX512-NEXT: vmovdqa64 %xmm1, %xmm28
+; AVX512-NEXT: vmovdqa 32(%rsi), %xmm10
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
+; AVX512-NEXT: vpshufb %xmm2, %xmm10, %xmm1
+; AVX512-NEXT: vmovdqa64 %xmm2, %xmm29
+; AVX512-NEXT: vporq %xmm0, %xmm1, %xmm21
+; AVX512-NEXT: vmovdqa 32(%rcx), %ymm15
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
+; AVX512-NEXT: vpshufb %ymm8, %ymm15, %ymm0
+; AVX512-NEXT: vmovdqa 32(%rdx), %ymm13
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u]
+; AVX512-NEXT: vpshufb %ymm3, %ymm13, %ymm1
+; AVX512-NEXT: vporq %ymm0, %ymm1, %ymm22
+; AVX512-NEXT: vmovdqa 32(%rcx), %xmm6
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
+; AVX512-NEXT: vpshufb %xmm1, %xmm6, %xmm0
+; AVX512-NEXT: vmovdqa64 %xmm1, %xmm30
+; AVX512-NEXT: vmovdqa 32(%rdx), %xmm7
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
+; AVX512-NEXT: vpshufb %xmm2, %xmm7, %xmm1
+; AVX512-NEXT: vmovdqa64 %xmm2, %xmm31
+; AVX512-NEXT: vporq %xmm0, %xmm1, %xmm23
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
+; AVX512-NEXT: # ymm9 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm9, %ymm5, %ymm0
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128]
+; AVX512-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm4, %ymm5, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm24
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30,27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30]
+; AVX512-NEXT: # ymm5 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm5, %ymm11, %ymm1
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0,19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0]
+; AVX512-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm2, %ymm11, %ymm11
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm11, %zmm26
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
+; AVX512-NEXT: # ymm11 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm11, %ymm13, %ymm1
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
+; AVX512-NEXT: # ymm0 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm0, %ymm15, %ymm14
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm14, %zmm25
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0,25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0]
+; AVX512-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm1, %ymm15, %ymm14
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
+; AVX512-NEXT: # ymm15 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm15, %ymm13, %ymm13
+; AVX512-NEXT: vinserti64x4 $1, %ymm14, %zmm13, %zmm27
+; AVX512-NEXT: vmovdqa (%rcx), %ymm13
+; AVX512-NEXT: vpshufb %ymm8, %ymm13, %ymm8
+; AVX512-NEXT: vmovdqa (%rdx), %ymm14
+; AVX512-NEXT: vpshufb %ymm3, %ymm14, %ymm3
+; AVX512-NEXT: vporq %ymm8, %ymm3, %ymm16
+; AVX512-NEXT: vpshufb %ymm0, %ymm13, %ymm0
+; AVX512-NEXT: vpshufb %ymm15, %ymm14, %ymm3
+; AVX512-NEXT: vporq %ymm0, %ymm3, %ymm17
+; AVX512-NEXT: vmovdqa (%rsi), %ymm3
+; AVX512-NEXT: vmovdqa64 %ymm18, %ymm0
+; AVX512-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512-NEXT: vmovdqa (%rdi), %ymm8
+; AVX512-NEXT: vmovdqa64 %ymm19, %ymm15
+; AVX512-NEXT: vpshufb %ymm15, %ymm8, %ymm15
+; AVX512-NEXT: vporq %ymm0, %ymm15, %ymm18
+; AVX512-NEXT: vpshufb %ymm4, %ymm8, %ymm0
+; AVX512-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512-NEXT: vporq %ymm0, %ymm2, %ymm19
+; AVX512-NEXT: vpshufb %ymm11, %ymm14, %ymm0
+; AVX512-NEXT: vpshufb %ymm1, %ymm13, %ymm1
; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
-; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vmovdqa 32(%rdi), %xmm1
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm5 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
-; AVX512-NEXT: vpshufb %xmm5, %xmm1, %xmm0
-; AVX512-NEXT: vmovdqa64 %xmm1, %xmm16
-; AVX512-NEXT: vmovdqa 32(%rsi), %xmm4
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm14 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
-; AVX512-NEXT: vpshufb %xmm14, %xmm4, %xmm1
-; AVX512-NEXT: vmovdqa64 %xmm4, %xmm31
-; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vmovdqa 32(%rcx), %ymm8
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512-NEXT: vpshufb %ymm0, %ymm8, %ymm4
-; AVX512-NEXT: vmovdqa 32(%rdx), %ymm11
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u]
-; AVX512-NEXT: vpshufb %ymm1, %ymm11, %ymm10
-; AVX512-NEXT: vpor %ymm4, %ymm10, %ymm4
-; AVX512-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vmovdqa 32(%rcx), %xmm13
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
-; AVX512-NEXT: vpshufb %xmm6, %xmm13, %xmm4
-; AVX512-NEXT: vmovdqa64 %xmm6, %xmm25
-; AVX512-NEXT: vmovdqa 32(%rdx), %xmm10
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
-; AVX512-NEXT: vpshufb %xmm6, %xmm10, %xmm12
-; AVX512-NEXT: vmovdqa64 %xmm6, %xmm26
-; AVX512-NEXT: vporq %xmm4, %xmm12, %xmm20
-; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm22
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[26],zero,ymm2[28],zero,zero,ymm2[27],zero,ymm2[29],zero,ymm2[31],zero,zero,ymm2[30],zero
-; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero,ymm2[25],zero,zero
-; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm23
-; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[27],zero,zero,ymm11[26],zero,ymm11[28],zero,ymm11[30],zero,zero,ymm11[29],zero,ymm11[31],zero,zero
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm8[19],zero,ymm8[21],zero,zero,ymm8[20],zero,ymm8[22],zero,ymm8[24],zero,zero,ymm8[23],zero
-; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm24
-; AVX512-NEXT: vmovdqa (%rcx), %ymm12
-; AVX512-NEXT: vpshufb %ymm0, %ymm12, %ymm0
-; AVX512-NEXT: vmovdqa (%rdx), %ymm6
-; AVX512-NEXT: vpshufb %ymm1, %ymm6, %ymm1
-; AVX512-NEXT: vporq %ymm0, %ymm1, %ymm19
-; AVX512-NEXT: vmovdqa (%rsi), %ymm7
-; AVX512-NEXT: vpshufb %ymm15, %ymm7, %ymm2
-; AVX512-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512-NEXT: vpshufb %ymm9, %ymm4, %ymm3
-; AVX512-NEXT: vporq %ymm2, %ymm3, %ymm21
-; AVX512-NEXT: vmovdqa (%rdi), %xmm0
-; AVX512-NEXT: vpshufb %xmm5, %xmm0, %xmm3
-; AVX512-NEXT: vmovdqa64 %xmm0, %xmm17
-; AVX512-NEXT: vmovdqa (%rsi), %xmm5
-; AVX512-NEXT: vpshufb %xmm14, %xmm5, %xmm9
-; AVX512-NEXT: vporq %xmm3, %xmm9, %xmm27
-; AVX512-NEXT: vmovdqa (%rcx), %xmm1
-; AVX512-NEXT: vmovdqa64 %xmm25, %xmm0
-; AVX512-NEXT: vpshufb %xmm0, %xmm1, %xmm0
-; AVX512-NEXT: vmovdqa64 %xmm1, %xmm18
-; AVX512-NEXT: vmovdqa (%rdx), %xmm9
-; AVX512-NEXT: vmovdqa64 %xmm26, %xmm1
-; AVX512-NEXT: vpshufb %xmm1, %xmm9, %xmm15
-; AVX512-NEXT: vporq %xmm0, %xmm15, %xmm29
-; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,1,2,2]
-; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,1]
+; AVX512-NEXT: vpshufb %ymm9, %ymm8, %ymm1
+; AVX512-NEXT: vpshufb %ymm5, %ymm3, %ymm2
+; AVX512-NEXT: vmovdqa (%rdi), %xmm5
+; AVX512-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512-NEXT: vmovdqa (%rsi), %xmm9
+; AVX512-NEXT: vmovdqa (%rcx), %xmm8
+; AVX512-NEXT: vmovdqa64 %xmm28, %xmm2
+; AVX512-NEXT: vpshufb %xmm2, %xmm5, %xmm2
+; AVX512-NEXT: vmovdqa64 %xmm29, %xmm3
+; AVX512-NEXT: vpshufb %xmm3, %xmm9, %xmm3
+; AVX512-NEXT: vpor %xmm2, %xmm3, %xmm4
+; AVX512-NEXT: vmovdqa (%rdx), %xmm3
+; AVX512-NEXT: vmovdqa 32(%r8), %ymm11
+; AVX512-NEXT: vmovdqa64 %xmm30, %xmm2
+; AVX512-NEXT: vpshufb %xmm2, %xmm8, %xmm2
+; AVX512-NEXT: vmovdqa64 %xmm31, %xmm13
+; AVX512-NEXT: vpshufb %xmm13, %xmm3, %xmm13
+; AVX512-NEXT: vpor %xmm2, %xmm13, %xmm13
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm14 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
+; AVX512-NEXT: vpshufb %ymm14, %ymm11, %ymm2
+; AVX512-NEXT: vpshufd {{.*#+}} xmm15 = mem[1,1,2,2]
+; AVX512-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,1,1,1]
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm28 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
-; AVX512-NEXT: vpandnq %ymm0, %ymm28, %ymm0
-; AVX512-NEXT: vmovdqa 32(%r8), %ymm15
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
-; AVX512-NEXT: vpshufb %ymm1, %ymm15, %ymm14
-; AVX512-NEXT: vinserti64x4 $1, %ymm14, %zmm0, %zmm26
-; AVX512-NEXT: vmovdqa (%r8), %ymm0
-; AVX512-NEXT: vpshufb %ymm1, %ymm0, %ymm1
-; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,1,4,6,5,5]
-; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,3,2]
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm30 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
-; AVX512-NEXT: vpandnq %ymm0, %ymm30, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm25
-; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm0 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
-; AVX512-NEXT: vpshufb %ymm0, %ymm8, %ymm8
-; AVX512-NEXT: vpshufb {{.*#+}} ymm14 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm12[19],zero,ymm12[21],zero,zero,ymm12[20],zero,ymm12[22],zero,ymm12[24],zero,zero,ymm12[23],zero
-; AVX512-NEXT: vpshufb %ymm0, %ymm12, %ymm12
-; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
-; AVX512-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm0, %ymm11, %ymm11
-; AVX512-NEXT: vpshufb %ymm0, %ymm6, %ymm2
-; AVX512-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,ymm6[26],zero,ymm6[28],zero,ymm6[30],zero,zero,ymm6[29],zero,ymm6[31],zero,zero
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3],xmm13[4],xmm10[4],xmm13[5],xmm10[5],xmm13[6],xmm10[6],xmm13[7],xmm10[7]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm13 = ymm7[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,ymm4[27],zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30],zero
-; AVX512-NEXT: vmovdqa64 %xmm16, %xmm1
-; AVX512-NEXT: vmovdqa64 %xmm31, %xmm4
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm1 = [4,0,5,5,5,5,0,6,6,6,6,0,7,7,7,7]
-; AVX512-NEXT: vpermd %zmm15, %zmm1, %zmm31
-; AVX512-NEXT: vmovdqa64 (%r8), %zmm16
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm1 = [6,6,6,0,7,7,7,7,0,16,16,16,16,0,17,17]
-; AVX512-NEXT: vpermi2d %zmm15, %zmm16, %zmm1
-; AVX512-NEXT: vmovdqa64 %xmm17, %xmm15
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm15[0],xmm5[0],xmm15[1],xmm5[1],xmm15[2],xmm5[2],xmm15[3],xmm5[3],xmm15[4],xmm5[4],xmm15[5],xmm5[5],xmm15[6],xmm5[6],xmm15[7],xmm5[7]
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512-NEXT: vpshufb %xmm15, %xmm4, %xmm4
-; AVX512-NEXT: vpshufb %xmm15, %xmm5, %xmm5
-; AVX512-NEXT: vinserti32x4 $2, %xmm27, %zmm5, %zmm5
-; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512-NEXT: vmovdqa64 %xmm18, %xmm15
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm15[0],xmm9[0],xmm15[1],xmm9[1],xmm15[2],xmm9[2],xmm15[3],xmm9[3],xmm15[4],xmm9[4],xmm15[5],xmm9[5],xmm15[6],xmm9[6],xmm15[7],xmm9[7]
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm15 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512-NEXT: vpshufb %xmm15, %xmm10, %xmm10
+; AVX512-NEXT: vpandnq %ymm15, %ymm28, %ymm15
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm15, %zmm2
+; AVX512-NEXT: vmovdqa (%r8), %ymm15
+; AVX512-NEXT: vpshufb %ymm14, %ymm15, %ymm14
+; AVX512-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[0,2,1,1,4,6,5,5]
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm29 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
+; AVX512-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,3,2]
+; AVX512-NEXT: vpandnq %ymm15, %ymm29, %ymm15
+; AVX512-NEXT: vinserti64x4 $1, %ymm15, %zmm14, %zmm14
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm7 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX512-NEXT: vpshufb %xmm7, %xmm6, %xmm6
+; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,1,1]
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3],xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm12 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512-NEXT: vpshufb %xmm12, %xmm10, %xmm10
; AVX512-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1]
-; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
-; AVX512-NEXT: vpshufb %xmm15, %xmm9, %xmm9
-; AVX512-NEXT: vinserti32x4 $2, %xmm29, %zmm9, %zmm9
-; AVX512-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm15 = mem[0,0,1,1]
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm15 # 32-byte Folded Reload
-; AVX512-NEXT: vpermq {{.*#+}} ymm17 = ymm20[0,0,1,1]
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm17 # 32-byte Folded Reload
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm18 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
-; AVX512-NEXT: vpternlogq $226, %zmm15, %zmm18, %zmm17
-; AVX512-NEXT: vpternlogq $248, %zmm28, %zmm17, %zmm26
-; AVX512-NEXT: vpermq {{.*#+}} zmm15 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512-NEXT: vpermq {{.*#+}} zmm17 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm20 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400,72056498804555775,72056498804555775,18374967950370078975,18374967950370078975]
-; AVX512-NEXT: vpternlogq $248, %zmm20, %zmm15, %zmm17
-; AVX512-NEXT: vpandq %ymm20, %ymm8, %ymm8
-; AVX512-NEXT: vinserti64x4 $1, %ymm8, %zmm11, %zmm8
-; AVX512-NEXT: vpermq {{.*#+}} zmm11 = zmm24[2,2,3,3,6,6,7,7]
-; AVX512-NEXT: vporq %zmm11, %zmm8, %zmm8
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512-NEXT: vpternlogq $226, %zmm17, %zmm11, %zmm8
-; AVX512-NEXT: vpternlogd $184, %zmm8, %zmm30, %zmm31
-; AVX512-NEXT: vpor %ymm2, %ymm14, %ymm2
-; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm19, %zmm2
-; AVX512-NEXT: vpternlogq $248, %ymm20, %ymm13, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm21, %zmm0
-; AVX512-NEXT: vpternlogq $226, %zmm2, %zmm18, %zmm0
-; AVX512-NEXT: vpternlogq $248, %ymm20, %ymm12, %ymm6
-; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm6, %zmm2
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm3
-; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm3
-; AVX512-NEXT: vpternlogq $226, %zmm2, %zmm11, %zmm3
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm25
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm1
-; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm5[0,0,1,1,4,4,5,5]
-; AVX512-NEXT: vpermq {{.*#+}} zmm2 = zmm9[0,0,1,1,4,4,5,5]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
+; AVX512-NEXT: vmovdqa64 (%r8), %zmm15
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm30 = [4,0,5,5,5,5,0,6,6,6,6,0,7,7,7,7]
+; AVX512-NEXT: vpermd %zmm11, %zmm30, %zmm30
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm31 = [6,6,6,0,7,7,7,7,0,16,16,16,16,0,17,17]
+; AVX512-NEXT: vpermi2d %zmm11, %zmm15, %zmm31
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3],xmm5[4],xmm9[4],xmm5[5],xmm9[5],xmm5[6],xmm9[6],xmm5[7],xmm9[7]
+; AVX512-NEXT: vpshufb %xmm12, %xmm5, %xmm5
+; AVX512-NEXT: vinserti32x4 $2, %xmm4, %zmm5, %zmm4
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm8[0],xmm3[0],xmm8[1],xmm3[1],xmm8[2],xmm3[2],xmm8[3],xmm3[3],xmm8[4],xmm3[4],xmm8[5],xmm3[5],xmm8[6],xmm3[6],xmm8[7],xmm3[7]
+; AVX512-NEXT: vpshufb %xmm7, %xmm3, %xmm3
+; AVX512-NEXT: vinserti32x4 $2, %xmm13, %zmm3, %zmm3
+; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm21[0,0,1,1]
+; AVX512-NEXT: vinserti64x4 $1, %ymm20, %zmm5, %zmm5
+; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm23[0,0,1,1]
+; AVX512-NEXT: vinserti64x4 $1, %ymm22, %zmm7, %zmm7
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
+; AVX512-NEXT: vpternlogq $226, %zmm5, %zmm8, %zmm7
+; AVX512-NEXT: vpternlogq $248, %zmm28, %zmm7, %zmm2
+; AVX512-NEXT: vpermq {{.*#+}} zmm5 = zmm24[2,2,3,3,6,6,7,7]
+; AVX512-NEXT: vpermq {{.*#+}} zmm7 = zmm26[2,2,3,3,6,6,7,7]
+; AVX512-NEXT: vporq %zmm5, %zmm7, %zmm5
+; AVX512-NEXT: vpermq {{.*#+}} zmm7 = zmm25[2,2,3,3,6,6,7,7]
+; AVX512-NEXT: vpermq {{.*#+}} zmm9 = zmm27[2,2,3,3,6,6,7,7]
+; AVX512-NEXT: vporq %zmm7, %zmm9, %zmm7
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm9 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
+; AVX512-NEXT: vpternlogq $226, %zmm5, %zmm9, %zmm7
+; AVX512-NEXT: vpternlogd $184, %zmm7, %zmm29, %zmm30
+; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm17[2,2,3,3]
+; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm16, %zmm5
+; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm19[2,2,3,3]
+; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm18, %zmm7
+; AVX512-NEXT: vpternlogq $226, %zmm5, %zmm8, %zmm7
+; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
+; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm0, %zmm0
+; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
+; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm1, %zmm1
+; AVX512-NEXT: vpternlogq $226, %zmm0, %zmm9, %zmm1
+; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm14
+; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm31
+; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm4[0,0,1,1,4,4,5,5]
+; AVX512-NEXT: vpermq {{.*#+}} zmm1 = zmm3[0,0,1,1,4,4,5,5]
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm0 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
-; AVX512-NEXT: vpermd %zmm16, %zmm0, %zmm0
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512-NEXT: vmovdqa64 %zmm25, 64(%r9)
+; AVX512-NEXT: vpermd %zmm15, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512-NEXT: vmovdqa64 %zmm14, 64(%r9)
; AVX512-NEXT: vmovdqa64 %zmm0, (%r9)
-; AVX512-NEXT: vmovdqa64 %zmm1, 128(%r9)
-; AVX512-NEXT: vmovdqa64 %zmm31, 256(%r9)
-; AVX512-NEXT: vmovdqa64 %zmm26, 192(%r9)
+; AVX512-NEXT: vmovdqa64 %zmm31, 128(%r9)
+; AVX512-NEXT: vmovdqa64 %zmm30, 256(%r9)
+; AVX512-NEXT: vmovdqa64 %zmm2, 192(%r9)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512-FCP-LABEL: store_i8_stride5_vf64:
; AVX512-FCP: # %bb.0:
-; AVX512-FCP-NEXT: subq $24, %rsp
-; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %ymm3
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm0
+; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %ymm0
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
+; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm0, %ymm2
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19]
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm2
-; AVX512-FCP-NEXT: vpor %ymm0, %ymm2, %ymm0
-; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm0
-; AVX512-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
-; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm0
-; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %xmm4
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
-; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm4, %xmm2
-; AVX512-FCP-NEXT: vmovdqa64 %xmm4, %xmm19
-; AVX512-FCP-NEXT: vpor %xmm0, %xmm2, %xmm0
-; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %ymm7
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm7, %ymm2
-; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %ymm9
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u]
-; AVX512-FCP-NEXT: vpshufb %ymm15, %ymm9, %ymm4
-; AVX512-FCP-NEXT: vpor %ymm2, %ymm4, %ymm2
-; AVX512-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %xmm12
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
-; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm12, %xmm2
-; AVX512-FCP-NEXT: vmovdqa64 %xmm4, %xmm25
-; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %xmm10
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm10, %xmm4
-; AVX512-FCP-NEXT: vmovdqa64 %xmm6, %xmm26
-; AVX512-FCP-NEXT: vpor %xmm2, %xmm4, %xmm2
-; AVX512-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm21
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm22
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm9[27],zero,zero,ymm9[26],zero,ymm9[28],zero,ymm9[30],zero,zero,ymm9[29],zero,ymm9[31],zero,zero
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm7[19],zero,ymm7[21],zero,zero,ymm7[20],zero,ymm7[22],zero,ymm7[24],zero,zero,ymm7[23],zero
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm23
-; AVX512-FCP-NEXT: vmovdqa (%rcx), %ymm4
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm0
-; AVX512-FCP-NEXT: vmovdqa (%rdx), %ymm14
-; AVX512-FCP-NEXT: vpshufb %ymm15, %ymm14, %ymm1
-; AVX512-FCP-NEXT: vporq %ymm0, %ymm1, %ymm24
-; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm15
-; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm15, %ymm0
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm6
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm6, %ymm1
-; AVX512-FCP-NEXT: vporq %ymm0, %ymm1, %ymm20
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm0
-; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm1
-; AVX512-FCP-NEXT: vmovdqa64 %xmm0, %xmm16
-; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm3
-; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm3, %xmm2
-; AVX512-FCP-NEXT: vporq %xmm1, %xmm2, %xmm28
-; AVX512-FCP-NEXT: vmovdqa (%rcx), %xmm1
-; AVX512-FCP-NEXT: vmovdqa64 %xmm25, %xmm0
-; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm1, %xmm8
-; AVX512-FCP-NEXT: vmovdqa64 %xmm1, %xmm18
-; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm1
-; AVX512-FCP-NEXT: vmovdqa64 %xmm26, %xmm0
-; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm1, %xmm11
-; AVX512-FCP-NEXT: vmovdqa64 %xmm1, %xmm17
-; AVX512-FCP-NEXT: vporq %xmm8, %xmm11, %xmm29
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [1,1,2,2,2,2,2,2]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19]
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm1, %ymm3
+; AVX512-FCP-NEXT: vporq %ymm2, %ymm3, %ymm17
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm3
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm3[8],zero,xmm3[u,7],zero,xmm3[9],zero,xmm3[u],zero,xmm3[u,10],zero,xmm3[12],zero,xmm3[u,11]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm31
+; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %xmm15
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,xmm15[8,u],zero,xmm15[7],zero,xmm15[9,u,11,u],zero,xmm15[10],zero,xmm15[12,u],zero
+; AVX512-FCP-NEXT: vporq %xmm2, %xmm4, %xmm18
+; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %ymm2
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
+; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm4
+; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %ymm11
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u]
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm11, %ymm6
+; AVX512-FCP-NEXT: vporq %ymm4, %ymm6, %ymm19
+; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %xmm6
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
+; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm6, %xmm4
+; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm30
+; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %xmm7
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
+; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm7, %xmm8
+; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm29
+; AVX512-FCP-NEXT: vporq %xmm4, %xmm8, %xmm20
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128]
+; AVX512-FCP-NEXT: # ymm13 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm13, %ymm1, %ymm1
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm1, %zmm21
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30,27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30]
+; AVX512-FCP-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm4
+; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm28
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0,19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0]
+; AVX512-FCP-NEXT: # ymm5 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm22
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
+; AVX512-FCP-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm11, %ymm0
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
+; AVX512-FCP-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm8
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm8, %zmm24
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0,25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0]
+; AVX512-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm2, %ymm0
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
+; AVX512-FCP-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm11, %ymm2
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm26
+; AVX512-FCP-NEXT: vmovdqa (%rcx), %ymm2
+; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm0
+; AVX512-FCP-NEXT: vmovdqa (%rdx), %ymm11
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm11, %ymm10
+; AVX512-FCP-NEXT: vporq %ymm0, %ymm10, %ymm23
+; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm11, %ymm1
+; AVX512-FCP-NEXT: vporq %ymm0, %ymm1, %ymm25
+; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm10
+; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm10, %ymm0
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm12
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm12, %ymm1
+; AVX512-FCP-NEXT: vporq %ymm0, %ymm1, %ymm27
+; AVX512-FCP-NEXT: vpshufb %ymm13, %ymm12, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm10, %ymm1
+; AVX512-FCP-NEXT: vporq %ymm0, %ymm1, %ymm16
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm9
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm9[8],zero,xmm9[u,7],zero,xmm9[9],zero,xmm9[u],zero,xmm9[u,10],zero,xmm9[12],zero,xmm9[u,11]
+; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm14
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,xmm14[8,u],zero,xmm14[7],zero,xmm14[9,u,11,u],zero,xmm14[10],zero,xmm14[12,u],zero
+; AVX512-FCP-NEXT: vpor %xmm0, %xmm1, %xmm3
+; AVX512-FCP-NEXT: vmovdqa (%rcx), %xmm13
+; AVX512-FCP-NEXT: vmovdqa64 %xmm30, %xmm0
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm13, %xmm1
+; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm0
+; AVX512-FCP-NEXT: vmovdqa64 %xmm29, %xmm5
+; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm5
+; AVX512-FCP-NEXT: vpor %xmm1, %xmm5, %xmm5
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm11, %ymm1
+; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm2, %ymm2
+; AVX512-FCP-NEXT: vpor %ymm1, %ymm2, %ymm11
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm12[26],zero,ymm12[28],zero,zero,ymm12[27],zero,ymm12[29],zero,ymm12[31],zero,zero,ymm12[30],zero
+; AVX512-FCP-NEXT: vmovdqa64 %ymm28, %ymm2
+; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm10, %ymm2
; AVX512-FCP-NEXT: vmovdqa 32(%r8), %ymm8
-; AVX512-FCP-NEXT: vpermd %ymm8, %ymm11, %ymm11
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm25 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
-; AVX512-FCP-NEXT: vpandnq %ymm11, %ymm25, %ymm11
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm8, %ymm13
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm11, %zmm26
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm31 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm30 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
-; AVX512-FCP-NEXT: vmovdqa (%r8), %ymm11
-; AVX512-FCP-NEXT: vpermd %ymm11, %ymm31, %ymm27
-; AVX512-FCP-NEXT: vpandnq %ymm27, %ymm30, %ymm27
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm11, %ymm0
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm27, %zmm0, %zmm27
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm0 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm7, %ymm7
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm5
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
-; AVX512-FCP-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm9, %ymm9
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm14, %ymm4
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,ymm14[26],zero,ymm14[28],zero,ymm14[30],zero,zero,ymm14[29],zero,ymm14[31],zero,zero
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3],xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm15[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm15[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[21],zero,zero,ymm6[20],zero,ymm6[22],zero,ymm6[24],zero,zero,ymm6[23],zero,ymm6[25],zero,zero
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[26],zero,ymm6[28],zero,zero,ymm6[27],zero,ymm6[29],zero,ymm6[31],zero,zero,ymm6[30],zero
-; AVX512-FCP-NEXT: vmovdqa64 %xmm19, %xmm1
-; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm16, %xmm1
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
-; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm3, %xmm1
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm28, %zmm1, %zmm28
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm7[2,2,3,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm9[2,2,3,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm13[2,2,3,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm5[2,2,3,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm18, %xmm3
-; AVX512-FCP-NEXT: vmovdqa64 %xmm17, %xmm5
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm10, %xmm10
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1]
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm5, %xmm3
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm29, %zmm3, %zmm3
+; AVX512-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm8, %ymm2
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [1,1,2,2,2,2,2,2]
+; AVX512-FCP-NEXT: vpermd %ymm8, %ymm10, %ymm10
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm12 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
+; AVX512-FCP-NEXT: vpandn %ymm10, %ymm12, %ymm10
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm10, %zmm2
+; AVX512-FCP-NEXT: vmovdqa (%r8), %ymm10
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm28 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm29 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
+; AVX512-FCP-NEXT: vpermd %ymm10, %ymm28, %ymm30
+; AVX512-FCP-NEXT: vpandnq %ymm30, %ymm29, %ymm30
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm4
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm30, %zmm4, %zmm4
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm9[0],xmm14[0],xmm9[1],xmm14[1],xmm9[2],xmm14[2],xmm9[3],xmm14[3],xmm9[4],xmm14[4],xmm9[5],xmm14[5],xmm9[6],xmm14[6],xmm9[7],xmm14[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm9, %xmm9
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm3, %zmm9, %zmm3
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm13[0],xmm0[0],xmm13[1],xmm0[1],xmm13[2],xmm0[2],xmm13[3],xmm0[3],xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm0
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm5, %zmm0, %zmm0
; AVX512-FCP-NEXT: vmovdqa64 (%r8), %zmm5
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm5, %zmm11
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm21 = zmm21[2,2,3,3,6,6,7,7]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm22 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm29 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400,72056498804555775,72056498804555775,18374967950370078975,18374967950370078975]
-; AVX512-FCP-NEXT: vpternlogq $248, %zmm29, %zmm21, %zmm22
-; AVX512-FCP-NEXT: vpandq %ymm29, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm7, %zmm1
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512-FCP-NEXT: vporq %zmm7, %zmm1, %zmm1
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm22, %zmm7, %zmm1
-; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm31, %zmm8
-; AVX512-FCP-NEXT: vpternlogd $184, %zmm1, %zmm30, %zmm8
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm5, %zmm10
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm6, %xmm6
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm7 = [2,2,3,3,8,8,9,9]
+; AVX512-FCP-NEXT: vpermt2q %zmm6, %zmm7, %zmm11
+; AVX512-FCP-NEXT: vmovdqa64 %xmm31, %xmm6
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3],xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7]
+; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm6, %xmm6
+; AVX512-FCP-NEXT: vpermt2q %zmm6, %zmm7, %zmm1
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm6 = zmm21[2,2,3,3,6,6,7,7]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm22[2,2,3,3,6,6,7,7]
+; AVX512-FCP-NEXT: vporq %zmm6, %zmm7, %zmm6
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm24[2,2,3,3,6,6,7,7]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm9 = zmm26[2,2,3,3,6,6,7,7]
+; AVX512-FCP-NEXT: vporq %zmm7, %zmm9, %zmm7
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm9 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
+; AVX512-FCP-NEXT: vpternlogq $226, %zmm6, %zmm9, %zmm7
+; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm28, %zmm8
+; AVX512-FCP-NEXT: vpternlogd $184, %zmm7, %zmm29, %zmm8
; AVX512-FCP-NEXT: vmovdqa64 %zmm8, 256(%r9)
-; AVX512-FCP-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: # ymm1 = mem[0,0,1,1]
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: # ymm8 = mem[0,0,1,1]
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm16 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm1, %zmm16, %zmm8
-; AVX512-FCP-NEXT: vpternlogq $248, %zmm25, %zmm8, %zmm26
-; AVX512-FCP-NEXT: vpor %ymm4, %ymm9, %ymm1
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm24, %zmm1
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm29, %ymm12, %ymm2
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm20, %zmm2
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm1, %zmm16, %zmm2
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm29, %ymm13, %ymm14
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm14, %zmm1
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm6
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm6, %zmm0
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm1, %zmm7, %zmm0
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm27
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm1 = [6,6,6,0,7,7,7,7,0,8,8,8,8,0,9,9]
-; AVX512-FCP-NEXT: vpermd %zmm5, %zmm1, %zmm1
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm28[0,0,1,1,4,4,5,5]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm3[0,0,1,1,4,4,5,5]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm0 = [0,0,0,0,0,0,1,1,9,9,0,10,10,10,10,0]
-; AVX512-FCP-NEXT: vpermd %zmm11, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512-FCP-NEXT: vmovdqa64 %zmm27, 64(%r9)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm0, (%r9)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm1, 128(%r9)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm26, 192(%r9)
-; AVX512-FCP-NEXT: addq $24, %rsp
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm18[0,0,1,1]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm6, %zmm6
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm20[0,0,1,1]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm19, %zmm7, %zmm7
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm8 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
+; AVX512-FCP-NEXT: vpternlogq $226, %zmm6, %zmm8, %zmm7
+; AVX512-FCP-NEXT: vpternlogq $248, %zmm12, %zmm7, %zmm2
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm25[2,2,3,3]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm23, %zmm6
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm16[2,2,3,3]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm27, %zmm7
+; AVX512-FCP-NEXT: vpternlogq $226, %zmm6, %zmm8, %zmm7
+; AVX512-FCP-NEXT: vpternlogq $226, %zmm11, %zmm9, %zmm1
+; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm4
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[0,0,1,1,4,4,5,5]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,0,1,1,4,4,5,5]
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm0
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm3 = [0,0,0,0,0,0,1,1,9,9,0,10,10,10,10,0]
+; AVX512-FCP-NEXT: vpermd %zmm10, %zmm3, %zmm3
+; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm3
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm0 = [6,6,6,0,7,7,7,7,0,8,8,8,8,0,9,9]
+; AVX512-FCP-NEXT: vpermd %zmm5, %zmm0, %zmm0
+; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 128(%r9)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm4, 64(%r9)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm3, (%r9)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm2, 192(%r9)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
; AVX512DQ-LABEL: store_i8_stride5_vf64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vmovdqa 32(%rsi), %ymm3
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm15 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
-; AVX512DQ-NEXT: vpshufb %ymm15, %ymm3, %ymm0
-; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm9 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19]
-; AVX512DQ-NEXT: vpshufb %ymm9, %ymm2, %ymm1
+; AVX512DQ-NEXT: vmovdqa 32(%rsi), %ymm11
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm11, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm18
+; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm5
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19]
+; AVX512DQ-NEXT: vpshufb %ymm2, %ymm5, %ymm1
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm19
+; AVX512DQ-NEXT: vporq %ymm0, %ymm1, %ymm20
+; AVX512DQ-NEXT: vmovdqa 32(%rdi), %xmm12
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm1 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
+; AVX512DQ-NEXT: vpshufb %xmm1, %xmm12, %xmm0
+; AVX512DQ-NEXT: vmovdqa64 %xmm1, %xmm28
+; AVX512DQ-NEXT: vmovdqa 32(%rsi), %xmm10
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm2 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
+; AVX512DQ-NEXT: vpshufb %xmm2, %xmm10, %xmm1
+; AVX512DQ-NEXT: vmovdqa64 %xmm2, %xmm29
+; AVX512DQ-NEXT: vporq %xmm0, %xmm1, %xmm21
+; AVX512DQ-NEXT: vmovdqa 32(%rcx), %ymm15
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
+; AVX512DQ-NEXT: vpshufb %ymm8, %ymm15, %ymm0
+; AVX512DQ-NEXT: vmovdqa 32(%rdx), %ymm13
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm13, %ymm1
+; AVX512DQ-NEXT: vporq %ymm0, %ymm1, %ymm22
+; AVX512DQ-NEXT: vmovdqa 32(%rcx), %xmm6
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm1 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
+; AVX512DQ-NEXT: vpshufb %xmm1, %xmm6, %xmm0
+; AVX512DQ-NEXT: vmovdqa64 %xmm1, %xmm30
+; AVX512DQ-NEXT: vmovdqa 32(%rdx), %xmm7
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm2 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
+; AVX512DQ-NEXT: vpshufb %xmm2, %xmm7, %xmm1
+; AVX512DQ-NEXT: vmovdqa64 %xmm2, %xmm31
+; AVX512DQ-NEXT: vporq %xmm0, %xmm1, %xmm23
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
+; AVX512DQ-NEXT: # ymm9 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm9, %ymm5, %ymm0
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128]
+; AVX512DQ-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm5, %ymm1
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm24
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30,27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30]
+; AVX512DQ-NEXT: # ymm5 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm5, %ymm11, %ymm1
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0,19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0]
+; AVX512DQ-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm2, %ymm11, %ymm11
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm11, %zmm26
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
+; AVX512DQ-NEXT: # ymm11 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm13, %ymm1
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
+; AVX512DQ-NEXT: # ymm0 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm15, %ymm14
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm14, %zmm25
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0,25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0]
+; AVX512DQ-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm15, %ymm14
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
+; AVX512DQ-NEXT: # ymm15 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm15, %ymm13, %ymm13
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm14, %zmm13, %zmm27
+; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm13
+; AVX512DQ-NEXT: vpshufb %ymm8, %ymm13, %ymm8
+; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm14
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm14, %ymm3
+; AVX512DQ-NEXT: vporq %ymm8, %ymm3, %ymm16
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm13, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm15, %ymm14, %ymm3
+; AVX512DQ-NEXT: vporq %ymm0, %ymm3, %ymm17
+; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm3
+; AVX512DQ-NEXT: vmovdqa64 %ymm18, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm8
+; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm15
+; AVX512DQ-NEXT: vpshufb %ymm15, %ymm8, %ymm15
+; AVX512DQ-NEXT: vporq %ymm0, %ymm15, %ymm18
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm8, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512DQ-NEXT: vporq %ymm0, %ymm2, %ymm19
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm14, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm13, %ymm1
; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
-; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vmovdqa 32(%rdi), %xmm1
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm5 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
-; AVX512DQ-NEXT: vpshufb %xmm5, %xmm1, %xmm0
-; AVX512DQ-NEXT: vmovdqa64 %xmm1, %xmm16
-; AVX512DQ-NEXT: vmovdqa 32(%rsi), %xmm4
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm14 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm4, %xmm1
-; AVX512DQ-NEXT: vmovdqa64 %xmm4, %xmm31
-; AVX512DQ-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vmovdqa 32(%rcx), %ymm8
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm8, %ymm4
-; AVX512DQ-NEXT: vmovdqa 32(%rdx), %ymm11
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u]
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm11, %ymm10
-; AVX512DQ-NEXT: vpor %ymm4, %ymm10, %ymm4
-; AVX512DQ-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vmovdqa 32(%rcx), %xmm13
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm6 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
-; AVX512DQ-NEXT: vpshufb %xmm6, %xmm13, %xmm4
-; AVX512DQ-NEXT: vmovdqa64 %xmm6, %xmm25
-; AVX512DQ-NEXT: vmovdqa 32(%rdx), %xmm10
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm6 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
-; AVX512DQ-NEXT: vpshufb %xmm6, %xmm10, %xmm12
-; AVX512DQ-NEXT: vmovdqa64 %xmm6, %xmm26
-; AVX512DQ-NEXT: vporq %xmm4, %xmm12, %xmm20
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm22
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[26],zero,ymm2[28],zero,zero,ymm2[27],zero,ymm2[29],zero,ymm2[31],zero,zero,ymm2[30],zero
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero,ymm2[25],zero,zero
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm23
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[27],zero,zero,ymm11[26],zero,ymm11[28],zero,ymm11[30],zero,zero,ymm11[29],zero,ymm11[31],zero,zero
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm8[19],zero,ymm8[21],zero,zero,ymm8[20],zero,ymm8[22],zero,ymm8[24],zero,zero,ymm8[23],zero
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm24
-; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm12
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm12, %ymm0
-; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm6
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm6, %ymm1
-; AVX512DQ-NEXT: vporq %ymm0, %ymm1, %ymm19
-; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm7
-; AVX512DQ-NEXT: vpshufb %ymm15, %ymm7, %ymm2
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512DQ-NEXT: vpshufb %ymm9, %ymm4, %ymm3
-; AVX512DQ-NEXT: vporq %ymm2, %ymm3, %ymm21
-; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0
-; AVX512DQ-NEXT: vpshufb %xmm5, %xmm0, %xmm3
-; AVX512DQ-NEXT: vmovdqa64 %xmm0, %xmm17
-; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm5
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm5, %xmm9
-; AVX512DQ-NEXT: vporq %xmm3, %xmm9, %xmm27
-; AVX512DQ-NEXT: vmovdqa (%rcx), %xmm1
-; AVX512DQ-NEXT: vmovdqa64 %xmm25, %xmm0
-; AVX512DQ-NEXT: vpshufb %xmm0, %xmm1, %xmm0
-; AVX512DQ-NEXT: vmovdqa64 %xmm1, %xmm18
-; AVX512DQ-NEXT: vmovdqa (%rdx), %xmm9
-; AVX512DQ-NEXT: vmovdqa64 %xmm26, %xmm1
-; AVX512DQ-NEXT: vpshufb %xmm1, %xmm9, %xmm15
-; AVX512DQ-NEXT: vporq %xmm0, %xmm15, %xmm29
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,1,2,2]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,1]
+; AVX512DQ-NEXT: vpshufb %ymm9, %ymm8, %ymm1
+; AVX512DQ-NEXT: vpshufb %ymm5, %ymm3, %ymm2
+; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm5
+; AVX512DQ-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm9
+; AVX512DQ-NEXT: vmovdqa (%rcx), %xmm8
+; AVX512DQ-NEXT: vmovdqa64 %xmm28, %xmm2
+; AVX512DQ-NEXT: vpshufb %xmm2, %xmm5, %xmm2
+; AVX512DQ-NEXT: vmovdqa64 %xmm29, %xmm3
+; AVX512DQ-NEXT: vpshufb %xmm3, %xmm9, %xmm3
+; AVX512DQ-NEXT: vpor %xmm2, %xmm3, %xmm4
+; AVX512DQ-NEXT: vmovdqa (%rdx), %xmm3
+; AVX512DQ-NEXT: vmovdqa 32(%r8), %ymm11
+; AVX512DQ-NEXT: vmovdqa64 %xmm30, %xmm2
+; AVX512DQ-NEXT: vpshufb %xmm2, %xmm8, %xmm2
+; AVX512DQ-NEXT: vmovdqa64 %xmm31, %xmm13
+; AVX512DQ-NEXT: vpshufb %xmm13, %xmm3, %xmm13
+; AVX512DQ-NEXT: vpor %xmm2, %xmm13, %xmm13
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm14 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
+; AVX512DQ-NEXT: vpshufb %ymm14, %ymm11, %ymm2
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm15 = mem[1,1,2,2]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,1,1,1]
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm28 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
-; AVX512DQ-NEXT: vpandnq %ymm0, %ymm28, %ymm0
-; AVX512DQ-NEXT: vmovdqa 32(%r8), %ymm15
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm15, %ymm14
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm14, %zmm0, %zmm26
-; AVX512DQ-NEXT: vmovdqa (%r8), %ymm0
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm0, %ymm1
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,1,4,6,5,5]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,3,2]
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm30 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
-; AVX512DQ-NEXT: vpandnq %ymm0, %ymm30, %ymm0
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm25
-; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} ymm0 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm8, %ymm8
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm14 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm12[19],zero,ymm12[21],zero,zero,ymm12[20],zero,ymm12[22],zero,ymm12[24],zero,zero,ymm12[23],zero
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm12, %ymm12
-; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
-; AVX512DQ-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm11, %ymm11
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm6, %ymm2
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,ymm6[26],zero,ymm6[28],zero,ymm6[30],zero,zero,ymm6[29],zero,ymm6[31],zero,zero
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3],xmm13[4],xmm10[4],xmm13[5],xmm10[5],xmm13[6],xmm10[6],xmm13[7],xmm10[7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm13 = ymm7[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,ymm4[27],zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30],zero
-; AVX512DQ-NEXT: vmovdqa64 %xmm16, %xmm1
-; AVX512DQ-NEXT: vmovdqa64 %xmm31, %xmm4
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm1 = [4,0,5,5,5,5,0,6,6,6,6,0,7,7,7,7]
-; AVX512DQ-NEXT: vpermd %zmm15, %zmm1, %zmm31
-; AVX512DQ-NEXT: vmovdqa64 (%r8), %zmm16
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm1 = [6,6,6,0,7,7,7,7,0,16,16,16,16,0,17,17]
-; AVX512DQ-NEXT: vpermi2d %zmm15, %zmm16, %zmm1
-; AVX512DQ-NEXT: vmovdqa64 %xmm17, %xmm15
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm15[0],xmm5[0],xmm15[1],xmm5[1],xmm15[2],xmm5[2],xmm15[3],xmm5[3],xmm15[4],xmm5[4],xmm15[5],xmm5[5],xmm15[6],xmm5[6],xmm15[7],xmm5[7]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512DQ-NEXT: vpshufb %xmm15, %xmm4, %xmm4
-; AVX512DQ-NEXT: vpshufb %xmm15, %xmm5, %xmm5
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm27, %zmm5, %zmm5
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512DQ-NEXT: vmovdqa64 %xmm18, %xmm15
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm15[0],xmm9[0],xmm15[1],xmm9[1],xmm15[2],xmm9[2],xmm15[3],xmm9[3],xmm15[4],xmm9[4],xmm15[5],xmm9[5],xmm15[6],xmm9[6],xmm15[7],xmm9[7]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm15 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512DQ-NEXT: vpshufb %xmm15, %xmm10, %xmm10
+; AVX512DQ-NEXT: vpandnq %ymm15, %ymm28, %ymm15
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm15, %zmm2
+; AVX512DQ-NEXT: vmovdqa (%r8), %ymm15
+; AVX512DQ-NEXT: vpshufb %ymm14, %ymm15, %ymm14
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[0,2,1,1,4,6,5,5]
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm29 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,3,2]
+; AVX512DQ-NEXT: vpandnq %ymm15, %ymm29, %ymm15
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm15, %zmm14, %zmm14
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm7 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX512DQ-NEXT: vpshufb %xmm7, %xmm6, %xmm6
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,1,1]
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3],xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm12 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512DQ-NEXT: vpshufb %xmm12, %xmm10, %xmm10
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
-; AVX512DQ-NEXT: vpshufb %xmm15, %xmm9, %xmm9
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm29, %zmm9, %zmm9
-; AVX512DQ-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm15 = mem[0,0,1,1]
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm15 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm17 = ymm20[0,0,1,1]
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm17 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm18 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm15, %zmm18, %zmm17
-; AVX512DQ-NEXT: vpternlogq $248, %zmm28, %zmm17, %zmm26
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm15 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm17 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm20 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400,72056498804555775,72056498804555775,18374967950370078975,18374967950370078975]
-; AVX512DQ-NEXT: vpternlogq $248, %zmm20, %zmm15, %zmm17
-; AVX512DQ-NEXT: vpandq %ymm20, %ymm8, %ymm8
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm8, %zmm11, %zmm8
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm11 = zmm24[2,2,3,3,6,6,7,7]
-; AVX512DQ-NEXT: vporq %zmm11, %zmm8, %zmm8
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm17, %zmm11, %zmm8
-; AVX512DQ-NEXT: vpternlogd $184, %zmm8, %zmm30, %zmm31
-; AVX512DQ-NEXT: vpor %ymm2, %ymm14, %ymm2
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm19, %zmm2
-; AVX512DQ-NEXT: vpternlogq $248, %ymm20, %ymm13, %ymm0
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm21, %zmm0
-; AVX512DQ-NEXT: vpternlogq $226, %zmm2, %zmm18, %zmm0
-; AVX512DQ-NEXT: vpternlogq $248, %ymm20, %ymm12, %ymm6
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm6, %zmm2
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm3
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm3
-; AVX512DQ-NEXT: vpternlogq $226, %zmm2, %zmm11, %zmm3
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm25
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm1
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm5[0,0,1,1,4,4,5,5]
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm2 = zmm9[0,0,1,1,4,4,5,5]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
+; AVX512DQ-NEXT: vmovdqa64 (%r8), %zmm15
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm30 = [4,0,5,5,5,5,0,6,6,6,6,0,7,7,7,7]
+; AVX512DQ-NEXT: vpermd %zmm11, %zmm30, %zmm30
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm31 = [6,6,6,0,7,7,7,7,0,16,16,16,16,0,17,17]
+; AVX512DQ-NEXT: vpermi2d %zmm11, %zmm15, %zmm31
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3],xmm5[4],xmm9[4],xmm5[5],xmm9[5],xmm5[6],xmm9[6],xmm5[7],xmm9[7]
+; AVX512DQ-NEXT: vpshufb %xmm12, %xmm5, %xmm5
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm4, %zmm5, %zmm4
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm8[0],xmm3[0],xmm8[1],xmm3[1],xmm8[2],xmm3[2],xmm8[3],xmm3[3],xmm8[4],xmm3[4],xmm8[5],xmm3[5],xmm8[6],xmm3[6],xmm8[7],xmm3[7]
+; AVX512DQ-NEXT: vpshufb %xmm7, %xmm3, %xmm3
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm13, %zmm3, %zmm3
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm21[0,0,1,1]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm20, %zmm5, %zmm5
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm23[0,0,1,1]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm22, %zmm7, %zmm7
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm8 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
+; AVX512DQ-NEXT: vpternlogq $226, %zmm5, %zmm8, %zmm7
+; AVX512DQ-NEXT: vpternlogq $248, %zmm28, %zmm7, %zmm2
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm5 = zmm24[2,2,3,3,6,6,7,7]
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm7 = zmm26[2,2,3,3,6,6,7,7]
+; AVX512DQ-NEXT: vporq %zmm5, %zmm7, %zmm5
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm7 = zmm25[2,2,3,3,6,6,7,7]
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm9 = zmm27[2,2,3,3,6,6,7,7]
+; AVX512DQ-NEXT: vporq %zmm7, %zmm9, %zmm7
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm9 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
+; AVX512DQ-NEXT: vpternlogq $226, %zmm5, %zmm9, %zmm7
+; AVX512DQ-NEXT: vpternlogd $184, %zmm7, %zmm29, %zmm30
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm17[2,2,3,3]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm16, %zmm5
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm19[2,2,3,3]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm18, %zmm7
+; AVX512DQ-NEXT: vpternlogq $226, %zmm5, %zmm8, %zmm7
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpternlogq $226, %zmm0, %zmm9, %zmm1
+; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm14
+; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm31
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm4[0,0,1,1,4,4,5,5]
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm1 = zmm3[0,0,1,1,4,4,5,5]
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm0 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
-; AVX512DQ-NEXT: vpermd %zmm16, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512DQ-NEXT: vmovdqa64 %zmm25, 64(%r9)
+; AVX512DQ-NEXT: vpermd %zmm15, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512DQ-NEXT: vmovdqa64 %zmm14, 64(%r9)
; AVX512DQ-NEXT: vmovdqa64 %zmm0, (%r9)
-; AVX512DQ-NEXT: vmovdqa64 %zmm1, 128(%r9)
-; AVX512DQ-NEXT: vmovdqa64 %zmm31, 256(%r9)
-; AVX512DQ-NEXT: vmovdqa64 %zmm26, 192(%r9)
+; AVX512DQ-NEXT: vmovdqa64 %zmm31, 128(%r9)
+; AVX512DQ-NEXT: vmovdqa64 %zmm30, 256(%r9)
+; AVX512DQ-NEXT: vmovdqa64 %zmm2, 192(%r9)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ-FCP-LABEL: store_i8_stride5_vf64:
; AVX512DQ-FCP: # %bb.0:
-; AVX512DQ-FCP-NEXT: subq $24, %rsp
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm0, %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm2
-; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm2, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %xmm4
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm4, %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm4, %xmm19
-; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm2, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %ymm7
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm7, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %ymm9
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm15, %ymm9, %ymm4
-; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm4, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %xmm12
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm12, %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm4, %xmm25
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %xmm10
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm10, %xmm4
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm6, %xmm26
-; AVX512DQ-FCP-NEXT: vpor %xmm2, %xmm4, %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm21
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm22
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm9[27],zero,zero,ymm9[26],zero,ymm9[28],zero,ymm9[30],zero,zero,ymm9[29],zero,ymm9[31],zero,zero
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm7[19],zero,ymm7[21],zero,zero,ymm7[20],zero,ymm7[22],zero,ymm7[24],zero,zero,ymm7[23],zero
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm23
-; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %ymm4
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %ymm14
-; AVX512DQ-FCP-NEXT: vpshufb %ymm15, %ymm14, %ymm1
-; AVX512DQ-FCP-NEXT: vporq %ymm0, %ymm1, %ymm24
-; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm15
-; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm15, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm6
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm6, %ymm1
-; AVX512DQ-FCP-NEXT: vporq %ymm0, %ymm1, %ymm20
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm0
-; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm0, %xmm16
-; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm3
-; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm3, %xmm2
-; AVX512DQ-FCP-NEXT: vporq %xmm1, %xmm2, %xmm28
-; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm25, %xmm0
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm1, %xmm8
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm1, %xmm18
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm26, %xmm0
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm1, %xmm11
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm1, %xmm17
-; AVX512DQ-FCP-NEXT: vporq %xmm8, %xmm11, %xmm29
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [1,1,2,2,2,2,2,2]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm1, %ymm3
+; AVX512DQ-FCP-NEXT: vporq %ymm2, %ymm3, %ymm17
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm3
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm3[8],zero,xmm3[u,7],zero,xmm3[9],zero,xmm3[u],zero,xmm3[u,10],zero,xmm3[12],zero,xmm3[u,11]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm3, %xmm31
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %xmm15
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,xmm15[8,u],zero,xmm15[7],zero,xmm15[9,u,11,u],zero,xmm15[10],zero,xmm15[12,u],zero
+; AVX512DQ-FCP-NEXT: vporq %xmm2, %xmm4, %xmm18
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm4
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %ymm11
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm11, %ymm6
+; AVX512DQ-FCP-NEXT: vporq %ymm4, %ymm6, %ymm19
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %xmm6
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm6, %xmm4
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm3, %xmm30
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %xmm7
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm7, %xmm8
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm3, %xmm29
+; AVX512DQ-FCP-NEXT: vporq %xmm4, %xmm8, %xmm20
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128]
+; AVX512DQ-FCP-NEXT: # ymm13 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm13, %ymm1, %ymm1
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm1, %zmm21
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30,27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30]
+; AVX512DQ-FCP-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm4
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm28
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0,19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0]
+; AVX512DQ-FCP-NEXT: # ymm5 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm22
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
+; AVX512DQ-FCP-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm11, %ymm0
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
+; AVX512DQ-FCP-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm8
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm8, %zmm24
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0,25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0]
+; AVX512DQ-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm2, %ymm0
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
+; AVX512DQ-FCP-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm11, %ymm2
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm26
+; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %ymm11
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm11, %ymm10
+; AVX512DQ-FCP-NEXT: vporq %ymm0, %ymm10, %ymm23
+; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm11, %ymm1
+; AVX512DQ-FCP-NEXT: vporq %ymm0, %ymm1, %ymm25
+; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm10
+; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm10, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm12
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm12, %ymm1
+; AVX512DQ-FCP-NEXT: vporq %ymm0, %ymm1, %ymm27
+; AVX512DQ-FCP-NEXT: vpshufb %ymm13, %ymm12, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm10, %ymm1
+; AVX512DQ-FCP-NEXT: vporq %ymm0, %ymm1, %ymm16
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm9
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm9[8],zero,xmm9[u,7],zero,xmm9[9],zero,xmm9[u],zero,xmm9[u,10],zero,xmm9[12],zero,xmm9[u,11]
+; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm14
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,xmm14[8,u],zero,xmm14[7],zero,xmm14[9,u,11,u],zero,xmm14[10],zero,xmm14[12,u],zero
+; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm1, %xmm3
+; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %xmm13
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm30, %xmm0
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm13, %xmm1
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %xmm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm29, %xmm5
+; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm5
+; AVX512DQ-FCP-NEXT: vpor %xmm1, %xmm5, %xmm5
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm11, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vpor %ymm1, %ymm2, %ymm11
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm12[26],zero,ymm12[28],zero,zero,ymm12[27],zero,ymm12[29],zero,ymm12[31],zero,zero,ymm12[30],zero
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm28, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm10, %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %ymm8
-; AVX512DQ-FCP-NEXT: vpermd %ymm8, %ymm11, %ymm11
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm25 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
-; AVX512DQ-FCP-NEXT: vpandnq %ymm11, %ymm25, %ymm11
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm8, %ymm13
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm11, %zmm26
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm31 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm30 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
-; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %ymm11
-; AVX512DQ-FCP-NEXT: vpermd %ymm11, %ymm31, %ymm27
-; AVX512DQ-FCP-NEXT: vpandnq %ymm27, %ymm30, %ymm27
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm11, %ymm0
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm27, %zmm0, %zmm27
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm0 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm7, %ymm7
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm5
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
-; AVX512DQ-FCP-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm9, %ymm9
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm14, %ymm4
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,ymm14[26],zero,ymm14[28],zero,ymm14[30],zero,zero,ymm14[29],zero,ymm14[31],zero,zero
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3],xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm15[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm15[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[21],zero,zero,ymm6[20],zero,ymm6[22],zero,ymm6[24],zero,zero,ymm6[23],zero,ymm6[25],zero,zero
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[26],zero,ymm6[28],zero,zero,ymm6[27],zero,ymm6[29],zero,ymm6[31],zero,zero,ymm6[30],zero
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm19, %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm16, %xmm1
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
-; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm3, %xmm1
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm28, %zmm1, %zmm28
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm7[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm9[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm13[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm5[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm18, %xmm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm17, %xmm5
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm10, %xmm10
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm5, %xmm3
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm29, %zmm3, %zmm3
+; AVX512DQ-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm8, %ymm2
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [1,1,2,2,2,2,2,2]
+; AVX512DQ-FCP-NEXT: vpermd %ymm8, %ymm10, %ymm10
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm12 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
+; AVX512DQ-FCP-NEXT: vpandn %ymm10, %ymm12, %ymm10
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm10, %zmm2
+; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %ymm10
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm28 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm29 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
+; AVX512DQ-FCP-NEXT: vpermd %ymm10, %ymm28, %ymm30
+; AVX512DQ-FCP-NEXT: vpandnq %ymm30, %ymm29, %ymm30
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm4
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm30, %zmm4, %zmm4
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm9[0],xmm14[0],xmm9[1],xmm14[1],xmm9[2],xmm14[2],xmm9[3],xmm14[3],xmm9[4],xmm14[4],xmm9[5],xmm14[5],xmm9[6],xmm14[6],xmm9[7],xmm14[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm9, %xmm9
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm3, %zmm9, %zmm3
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm13[0],xmm0[0],xmm13[1],xmm0[1],xmm13[2],xmm0[2],xmm13[3],xmm0[3],xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm0
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm5, %zmm0, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqa64 (%r8), %zmm5
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm5, %zmm11
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm21 = zmm21[2,2,3,3,6,6,7,7]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm22 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm29 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400,72056498804555775,72056498804555775,18374967950370078975,18374967950370078975]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %zmm29, %zmm21, %zmm22
-; AVX512DQ-FCP-NEXT: vpandq %ymm29, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm7, %zmm1
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512DQ-FCP-NEXT: vporq %zmm7, %zmm1, %zmm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm22, %zmm7, %zmm1
-; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm31, %zmm8
-; AVX512DQ-FCP-NEXT: vpternlogd $184, %zmm1, %zmm30, %zmm8
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm5, %zmm10
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm6, %xmm6
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm7 = [2,2,3,3,8,8,9,9]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm6, %zmm7, %zmm11
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm31, %xmm6
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3],xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm6, %xmm6
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm6, %zmm7, %zmm1
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm6 = zmm21[2,2,3,3,6,6,7,7]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm22[2,2,3,3,6,6,7,7]
+; AVX512DQ-FCP-NEXT: vporq %zmm6, %zmm7, %zmm6
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm24[2,2,3,3,6,6,7,7]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm9 = zmm26[2,2,3,3,6,6,7,7]
+; AVX512DQ-FCP-NEXT: vporq %zmm7, %zmm9, %zmm7
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm9 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm6, %zmm9, %zmm7
+; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm28, %zmm8
+; AVX512DQ-FCP-NEXT: vpternlogd $184, %zmm7, %zmm29, %zmm8
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, 256(%r9)
-; AVX512DQ-FCP-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm1 = mem[0,0,1,1]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm8 = mem[0,0,1,1]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm16 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm1, %zmm16, %zmm8
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %zmm25, %zmm8, %zmm26
-; AVX512DQ-FCP-NEXT: vpor %ymm4, %ymm9, %ymm1
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm24, %zmm1
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm29, %ymm12, %ymm2
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm20, %zmm2
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm1, %zmm16, %zmm2
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm29, %ymm13, %ymm14
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm14, %zmm1
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm6
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm6, %zmm0
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm1, %zmm7, %zmm0
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm27
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm1 = [6,6,6,0,7,7,7,7,0,8,8,8,8,0,9,9]
-; AVX512DQ-FCP-NEXT: vpermd %zmm5, %zmm1, %zmm1
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm28[0,0,1,1,4,4,5,5]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm3[0,0,1,1,4,4,5,5]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm0 = [0,0,0,0,0,0,1,1,9,9,0,10,10,10,10,0]
-; AVX512DQ-FCP-NEXT: vpermd %zmm11, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm27, 64(%r9)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, (%r9)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, 128(%r9)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm26, 192(%r9)
-; AVX512DQ-FCP-NEXT: addq $24, %rsp
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm18[0,0,1,1]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm6, %zmm6
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm20[0,0,1,1]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm19, %zmm7, %zmm7
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm8 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm6, %zmm8, %zmm7
+; AVX512DQ-FCP-NEXT: vpternlogq $248, %zmm12, %zmm7, %zmm2
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm25[2,2,3,3]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm23, %zmm6
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm16[2,2,3,3]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm27, %zmm7
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm6, %zmm8, %zmm7
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm11, %zmm9, %zmm1
+; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm4
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[0,0,1,1,4,4,5,5]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,0,1,1,4,4,5,5]
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm0
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm3 = [0,0,0,0,0,0,1,1,9,9,0,10,10,10,10,0]
+; AVX512DQ-FCP-NEXT: vpermd %zmm10, %zmm3, %zmm3
+; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm3
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm0 = [6,6,6,0,7,7,7,7,0,8,8,8,8,0,9,9]
+; AVX512DQ-FCP-NEXT: vpermd %zmm5, %zmm0, %zmm0
+; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 128(%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, 64(%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, (%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, 192(%r9)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
; AVX512BW-LABEL: store_i8_stride5_vf64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa64 (%r8), %zmm3
+; AVX512BW-NEXT: vmovdqa64 (%r8), %zmm2
; AVX512BW-NEXT: vmovdqa (%rcx), %ymm0
; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm8 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
-; AVX512BW-NEXT: vpshufb %ymm8, %ymm0, %ymm2
+; AVX512BW-NEXT: vpshufb %ymm8, %ymm0, %ymm3
; AVX512BW-NEXT: vmovdqa (%rdx), %ymm1
; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm4 = ymm1[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14]
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,3,3,6,6,7,7]
; AVX512BW-NEXT: movl $693250386, %eax # imm = 0x29522952
; AVX512BW-NEXT: kmovd %eax, %k1
-; AVX512BW-NEXT: vmovdqu8 %ymm4, %ymm2 {%k1}
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
+; AVX512BW-NEXT: vmovdqu8 %ymm4, %ymm3 {%k1}
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
; AVX512BW-NEXT: vmovdqa 32(%rdx), %xmm6
; AVX512BW-NEXT: vmovdqa 32(%rcx), %xmm12
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm12[0],xmm6[0],xmm12[1],xmm6[1],xmm12[2],xmm6[2],xmm12[3],xmm6[3],xmm12[4],xmm6[4],xmm12[5],xmm6[5],xmm12[6],xmm6[6],xmm12[7],xmm6[7]
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm7 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
; AVX512BW-NEXT: vpshufb %xmm7, %xmm4, %xmm4
; AVX512BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm10
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm10
; AVX512BW-NEXT: vmovdqa (%rsi), %ymm4
; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm15 = [11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14]
-; AVX512BW-NEXT: vpshufb %ymm15, %ymm4, %ymm2
+; AVX512BW-NEXT: vpshufb %ymm15, %ymm4, %ymm3
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm5
; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm9 = ymm5[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15]
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[2,2,3,3,6,6,7,7]
; AVX512BW-NEXT: movl $1251232404, %eax # imm = 0x4A944A94
; AVX512BW-NEXT: kmovd %eax, %k5
-; AVX512BW-NEXT: vmovdqu8 %ymm9, %ymm2 {%k5}
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
+; AVX512BW-NEXT: vmovdqu8 %ymm9, %ymm3 {%k5}
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
; AVX512BW-NEXT: vmovdqa 32(%rsi), %xmm13
; AVX512BW-NEXT: vmovdqa 32(%rdi), %xmm14
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
; AVX512BW-NEXT: vpshufb %xmm9, %xmm11, %xmm11
; AVX512BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,0,1,1]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm11, %zmm2, %zmm2
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm11, %zmm3, %zmm3
; AVX512BW-NEXT: movabsq $1785168781326730801, %rax # imm = 0x18C6318C6318C631
; AVX512BW-NEXT: kmovq %rax, %k4
-; AVX512BW-NEXT: vmovdqu8 %zmm10, %zmm2 {%k4}
+; AVX512BW-NEXT: vmovdqu8 %zmm10, %zmm3 {%k4}
; AVX512BW-NEXT: vmovdqa64 32(%r8), %ymm16
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm10 = [6,6,6,6,7,7,7,7,16,16,16,16,16,16,17,17]
-; AVX512BW-NEXT: vpermi2d %zmm16, %zmm3, %zmm10
+; AVX512BW-NEXT: vpermi2d %zmm16, %zmm2, %zmm10
; AVX512BW-NEXT: movabsq $2380225041768974402, %rax # imm = 0x2108421084210842
; AVX512BW-NEXT: kmovq %rax, %k2
-; AVX512BW-NEXT: vmovdqu8 %zmm10, %zmm2 {%k2}
+; AVX512BW-NEXT: vmovdqu8 %zmm10, %zmm3 {%k2}
; AVX512BW-NEXT: vmovdqa64 32(%rdx), %ymm23
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
; AVX512BW-NEXT: vpshufb %ymm10, %ymm23, %ymm17
@@ -5584,21 +5491,21 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm21 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
; AVX512BW-NEXT: vpshufb %xmm21, %xmm13, %xmm13
; AVX512BW-NEXT: vpor %xmm12, %xmm13, %xmm12
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm13 = ymm12[0,0,1,1]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm14 = ymm12[0,0,1,1]
; AVX512BW-NEXT: vmovdqa64 32(%rdi), %ymm25
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm12 = [3,3,3,0,4,4,4,4]
; AVX512BW-NEXT: vpermd %ymm25, %ymm12, %ymm17
; AVX512BW-NEXT: vmovdqa64 32(%rsi), %ymm26
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm14 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm13 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
; AVX512BW-NEXT: movl $138547332, %eax # imm = 0x8421084
; AVX512BW-NEXT: kmovd %eax, %k3
-; AVX512BW-NEXT: vpshufb %ymm14, %ymm26, %ymm17 {%k3}
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm17, %zmm13, %zmm13
+; AVX512BW-NEXT: vpshufb %ymm13, %ymm26, %ymm17 {%k3}
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm17, %zmm14, %zmm14
; AVX512BW-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
; AVX512BW-NEXT: kmovq %rax, %k2
-; AVX512BW-NEXT: vmovdqu8 %zmm13, %zmm6 {%k2}
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm13 = [3,3,3,3,0,4,4,4]
-; AVX512BW-NEXT: vpermd %ymm16, %ymm13, %ymm17
+; AVX512BW-NEXT: vmovdqu8 %zmm14, %zmm6 {%k2}
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm14 = [3,3,3,3,0,4,4,4]
+; AVX512BW-NEXT: vpermd %ymm16, %ymm14, %ymm17
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm18 = mem[1,1,2,2]
; AVX512BW-NEXT: vpermq {{.*#+}} ymm18 = ymm18[0,1,1,1]
; AVX512BW-NEXT: vinserti64x4 $1, %ymm17, %zmm18, %zmm17
@@ -5607,32 +5514,28 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: vmovdqu8 %zmm17, %zmm6 {%k6}
; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm17 = [19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128]
; AVX512BW-NEXT: # ymm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT: vpshufb %ymm17, %ymm26, %ymm18
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm27 = ymm18[2,2,3,3]
+; AVX512BW-NEXT: vpshufb %ymm17, %ymm26, %ymm27
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm18 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128]
; AVX512BW-NEXT: vpshufb %ymm18, %ymm25, %ymm28
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm28 = ymm28[2,2,3,3]
; AVX512BW-NEXT: vporq %ymm27, %ymm28, %ymm27
; AVX512BW-NEXT: vpshufb %ymm15, %ymm26, %ymm15
; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm25 = ymm25[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15]
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm25 = ymm25[2,2,3,3,6,6,7,7]
; AVX512BW-NEXT: vmovdqu8 %ymm25, %ymm15 {%k5}
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
; AVX512BW-NEXT: vinserti64x4 $1, %ymm15, %zmm27, %zmm15
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm25 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
; AVX512BW-NEXT: vpshufb %ymm25, %ymm23, %ymm26
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,2,3,3]
; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm27 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
; AVX512BW-NEXT: # ymm27 = mem[0,1,2,3,0,1,2,3]
; AVX512BW-NEXT: vpshufb %ymm27, %ymm24, %ymm28
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm28 = ymm28[2,2,3,3]
; AVX512BW-NEXT: vporq %ymm26, %ymm28, %ymm26
; AVX512BW-NEXT: vpshufb %ymm8, %ymm24, %ymm8
; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm23 = ymm23[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14]
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm23 = ymm23[2,2,3,3,6,6,7,7]
; AVX512BW-NEXT: vmovdqu8 %ymm23, %ymm8 {%k1}
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm26, %zmm8
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm15 = zmm15[2,2,3,3,6,6,7,7]
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,2,3,3,6,6,7,7]
; AVX512BW-NEXT: vmovdqu8 %zmm15, %zmm8 {%k4}
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm15 = [4,6,5,5,5,5,4,6,6,6,6,6,7,7,7,7]
; AVX512BW-NEXT: vpermd %zmm16, %zmm15, %zmm15
@@ -5661,33 +5564,31 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: kmovq %rax, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm7, %zmm9 {%k1}
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,1,1,2,2,2,2,2,2]
-; AVX512BW-NEXT: vpermd %zmm3, %zmm7, %zmm3
+; AVX512BW-NEXT: vpermd %zmm2, %zmm7, %zmm2
; AVX512BW-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210
; AVX512BW-NEXT: kmovq %rax, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm9 {%k1}
-; AVX512BW-NEXT: vpshufb %ymm25, %ymm1, %ymm3
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512BW-NEXT: vpshufb %ymm27, %ymm0, %ymm7
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512BW-NEXT: vpor %ymm3, %ymm7, %ymm3
-; AVX512BW-NEXT: vpshufb %ymm10, %ymm1, %ymm1
-; AVX512BW-NEXT: vpshufb %ymm11, %ymm0, %ymm0
+; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm9 {%k1}
+; AVX512BW-NEXT: vpshufb %ymm10, %ymm1, %ymm2
+; AVX512BW-NEXT: vpshufb %ymm11, %ymm0, %ymm7
+; AVX512BW-NEXT: vpor %ymm2, %ymm7, %ymm2
+; AVX512BW-NEXT: vpshufb %ymm25, %ymm1, %ymm1
+; AVX512BW-NEXT: vpshufb %ymm27, %ymm0, %ymm0
; AVX512BW-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
; AVX512BW-NEXT: vpermd %ymm5, %ymm12, %ymm1
-; AVX512BW-NEXT: vpshufb %ymm14, %ymm4, %ymm1 {%k3}
-; AVX512BW-NEXT: vpshufb %ymm17, %ymm4, %ymm3
+; AVX512BW-NEXT: vpshufb %ymm13, %ymm4, %ymm1 {%k3}
+; AVX512BW-NEXT: vpshufb %ymm17, %ymm4, %ymm2
; AVX512BW-NEXT: vpshufb %ymm18, %ymm5, %ymm4
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX512BW-NEXT: vpor %ymm3, %ymm4, %ymm3
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512BW-NEXT: vpor %ymm2, %ymm4, %ymm2
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm1 {%k2}
; AVX512BW-NEXT: vmovdqa (%r8), %ymm0
-; AVX512BW-NEXT: vpermd %ymm0, %ymm13, %ymm3
+; AVX512BW-NEXT: vpermd %ymm0, %ymm14, %ymm2
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,1,4,6,5,5]
; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,3,2]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
; AVX512BW-NEXT: movabsq $1190112520884487201, %rax # imm = 0x1084210842108421
; AVX512BW-NEXT: kmovq %rax, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm1 {%k1}
@@ -5695,215 +5596,206 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: vmovdqa64 %zmm9, (%r9)
; AVX512BW-NEXT: vmovdqa64 %zmm8, 256(%r9)
; AVX512BW-NEXT: vmovdqa64 %zmm6, 192(%r9)
-; AVX512BW-NEXT: vmovdqa64 %zmm2, 128(%r9)
+; AVX512BW-NEXT: vmovdqa64 %zmm3, 128(%r9)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: store_i8_stride5_vf64:
; AVX512BW-FCP: # %bb.0:
-; AVX512BW-FCP-NEXT: vmovdqa64 (%r8), %zmm5
+; AVX512BW-FCP-NEXT: vmovdqa64 (%r8), %zmm0
; AVX512BW-FCP-NEXT: vmovdqa64 32(%rdx), %ymm21
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm1, %ymm21, %ymm0
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm13
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm2, %ymm13, %ymm3
-; AVX512BW-FCP-NEXT: vpor %ymm0, %ymm3, %ymm0
-; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %xmm6
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rcx), %xmm18
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
-; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm18, %xmm3
-; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %xmm7
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rdx), %xmm20
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
-; AVX512BW-FCP-NEXT: vpshufb %xmm10, %xmm20, %xmm4
-; AVX512BW-FCP-NEXT: vpor %xmm3, %xmm4, %xmm3
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm6, %ymm21, %ymm1
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm8
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm8, %ymm2
+; AVX512BW-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %xmm11
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rcx), %xmm2
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
+; AVX512BW-FCP-NEXT: vpshufb %xmm14, %xmm2, %xmm3
+; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %xmm12
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdx), %xmm4
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
+; AVX512BW-FCP-NEXT: vpshufb %xmm15, %xmm4, %xmm5
+; AVX512BW-FCP-NEXT: vpor %xmm3, %xmm5, %xmm3
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,1,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm9
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rdi), %xmm17
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
-; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm17, %xmm3
-; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %xmm11
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rsi), %xmm19
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
-; AVX512BW-FCP-NEXT: vpshufb %xmm14, %xmm19, %xmm4
-; AVX512BW-FCP-NEXT: vpor %xmm3, %xmm4, %xmm3
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm3[0,0,1,1]
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rdi), %ymm16
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [3,3,3,0,4,4,4,4]
-; AVX512BW-FCP-NEXT: vpermd %ymm16, %ymm3, %ymm22
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm13
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm3
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
+; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm3, %xmm9
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %xmm16
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rsi), %xmm5
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm20 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
+; AVX512BW-FCP-NEXT: vpshufb %xmm20, %xmm5, %xmm10
+; AVX512BW-FCP-NEXT: vpor %xmm9, %xmm10, %xmm9
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm18 = ymm9[0,0,1,1]
+; AVX512BW-FCP-NEXT: vmovdqa64 32(%rdi), %ymm19
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [3,3,3,0,4,4,4,4]
+; AVX512BW-FCP-NEXT: vpermd %ymm19, %ymm9, %ymm22
; AVX512BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm23
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm4 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
+; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm10 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
; AVX512BW-FCP-NEXT: movl $138547332, %eax # imm = 0x8421084
; AVX512BW-FCP-NEXT: kmovd %eax, %k1
-; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm23, %ymm22 {%k1}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm15, %zmm15
+; AVX512BW-FCP-NEXT: vpshufb %ymm10, %ymm23, %ymm22 {%k1}
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm18, %zmm18
; AVX512BW-FCP-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
; AVX512BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm0 {%k2}
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm24
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm15 = [1,1,2,2,2,2,2,2,27,27,27,27,0,28,28,28]
-; AVX512BW-FCP-NEXT: vpermi2d %zmm5, %zmm24, %zmm15
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm1 {%k2}
+; AVX512BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm22
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm18 = [1,1,2,2,2,2,2,2,27,27,27,27,0,28,28,28]
+; AVX512BW-FCP-NEXT: vpermi2d %zmm0, %zmm22, %zmm18
; AVX512BW-FCP-NEXT: movabsq $4760450083537948804, %rax # imm = 0x4210842108421084
; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm0 {%k3}
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm23[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm15 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30]
-; AVX512BW-FCP-NEXT: vpshufb %zmm15, %zmm22, %zmm22
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm22 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm23 = zmm16[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm16 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
-; AVX512BW-FCP-NEXT: vpshufb %zmm16, %zmm23, %zmm23
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm1 {%k3}
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm23 = zmm23[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm18 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30]
+; AVX512BW-FCP-NEXT: vpshufb %zmm18, %zmm23, %zmm23
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm24 = zmm19[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm19 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
+; AVX512BW-FCP-NEXT: vpshufb %zmm19, %zmm24, %zmm24
+; AVX512BW-FCP-NEXT: vporq %zmm23, %zmm24, %zmm23
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm23 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512BW-FCP-NEXT: vporq %zmm22, %zmm23, %zmm23
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm21[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm21 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128]
-; AVX512BW-FCP-NEXT: vpshufb %zmm21, %zmm22, %zmm22
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm25 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm22 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
-; AVX512BW-FCP-NEXT: vpshufb %zmm22, %zmm13, %zmm13
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm13 = zmm13[2,2,3,3,6,6,7,7]
-; AVX512BW-FCP-NEXT: vporq %zmm25, %zmm13, %zmm13
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm21[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm24 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128]
+; AVX512BW-FCP-NEXT: vpshufb %zmm24, %zmm21, %zmm21
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm25 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %zmm25, %zmm8, %zmm8
+; AVX512BW-FCP-NEXT: vporq %zmm21, %zmm8, %zmm8
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,2,3,3,6,6,7,7]
; AVX512BW-FCP-NEXT: movabsq $1785168781326730801, %rax # imm = 0x18C6318C6318C631
; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm23, %zmm13 {%k3}
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm23 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
-; AVX512BW-FCP-NEXT: vpermi2d %zmm5, %zmm24, %zmm23
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm23, %zmm8 {%k3}
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm21 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
+; AVX512BW-FCP-NEXT: vpermi2d %zmm0, %zmm22, %zmm21
; AVX512BW-FCP-NEXT: movabsq $-8925843906633654008, %rax # imm = 0x8421084210842108
; AVX512BW-FCP-NEXT: kmovq %rax, %k4
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm23, %zmm13 {%k4}
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rcx), %ymm23
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm24 = ymm23[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm23[27],zero,zero,ymm23[26],zero,ymm23[28],zero,ymm23[30],zero,zero,ymm23[29],zero,ymm23[31],zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm24[2,2,3,3]
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rdx), %ymm24
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm26 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm24[27],zero,zero,ymm24[26],zero,ymm24[28],zero,ymm24[30],zero,zero,ymm24[29],zero,ymm24[31],zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,2,3,3]
-; AVX512BW-FCP-NEXT: vporq %ymm25, %ymm26, %ymm25
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm18 = xmm18[0],xmm20[0],xmm18[1],xmm20[1],xmm18[2],xmm20[2],xmm18[3],xmm20[3],xmm18[4],xmm20[4],xmm18[5],xmm20[5],xmm18[6],xmm20[6],xmm18[7],xmm20[7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm26 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512BW-FCP-NEXT: vpshufb %xmm26, %xmm18, %xmm18
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm18 = ymm18[0,0,1,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm18, %zmm25, %zmm25
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %ymm18
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm20 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm18[26],zero,ymm18[28],zero,zero,zero,zero,ymm18[29],zero,ymm18[31],zero,zero,ymm18[30]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm27 = ymm20[2,2,3,3]
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %ymm20
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm28 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm20[26],zero,ymm20[28],zero,zero,ymm20[27],zero,ymm20[29],zero,ymm20[31],zero,zero,ymm20[30],zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm28 = ymm28[2,2,3,3]
-; AVX512BW-FCP-NEXT: vporq %ymm27, %ymm28, %ymm27
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm17 = xmm17[0],xmm19[0],xmm17[1],xmm19[1],xmm17[2],xmm19[2],xmm17[3],xmm19[3],xmm17[4],xmm19[4],xmm17[5],xmm19[5],xmm17[6],xmm19[6],xmm17[7],xmm19[7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm19 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512BW-FCP-NEXT: vpshufb %xmm19, %xmm17, %xmm17
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm17 = ymm17[0,0,1,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm27, %zmm17
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm25, %zmm17 {%k3}
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm25 = [6,6,6,6,7,7,7,7,8,8,8,8,8,8,9,9]
-; AVX512BW-FCP-NEXT: vpermd %zmm5, %zmm25, %zmm5
-; AVX512BW-FCP-NEXT: movabsq $2380225041768974402, %rax # imm = 0x2108421084210842
-; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm17 {%k3}
-; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm6, %xmm5
-; AVX512BW-FCP-NEXT: vpshufb %xmm10, %xmm7, %xmm8
-; AVX512BW-FCP-NEXT: vpor %xmm5, %xmm8, %xmm5
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
-; AVX512BW-FCP-NEXT: vpshufb %xmm26, %xmm6, %xmm6
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm5, %zmm6, %zmm5
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm5[0,0,1,1,4,4,5,5]
-; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm9, %xmm6
-; AVX512BW-FCP-NEXT: vpshufb %xmm14, %xmm11, %xmm7
-; AVX512BW-FCP-NEXT: vpor %xmm6, %xmm7, %xmm6
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3],xmm9[4],xmm11[4],xmm9[5],xmm11[5],xmm9[6],xmm11[6],xmm9[7],xmm11[7]
-; AVX512BW-FCP-NEXT: vpshufb %xmm19, %xmm7, %xmm7
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm6, %zmm7, %zmm6
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm6 = zmm6[0,0,1,1,4,4,5,5]
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm21, %zmm8 {%k4}
+; AVX512BW-FCP-NEXT: vpshufb %xmm14, %xmm11, %xmm14
+; AVX512BW-FCP-NEXT: vpshufb %xmm15, %xmm12, %xmm15
+; AVX512BW-FCP-NEXT: vpor %xmm14, %xmm15, %xmm14
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3],xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm11, %xmm11
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm14, %zmm11, %zmm11
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm14 = zmm11[0,0,1,1,4,4,5,5]
+; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm13, %xmm11
+; AVX512BW-FCP-NEXT: vpshufb %xmm20, %xmm16, %xmm15
+; AVX512BW-FCP-NEXT: vpor %xmm11, %xmm15, %xmm11
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm13[0],xmm16[0],xmm13[1],xmm16[1],xmm13[2],xmm16[2],xmm13[3],xmm16[3],xmm13[4],xmm16[4],xmm13[5],xmm16[5],xmm13[6],xmm16[6],xmm13[7],xmm16[7]
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512BW-FCP-NEXT: vpshufb %xmm15, %xmm13, %xmm13
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm11, %zmm13, %zmm11
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm11 = zmm11[0,0,1,1,4,4,5,5]
; AVX512BW-FCP-NEXT: movabsq $-4165393823095705204, %rax # imm = 0xC6318C6318C6318C
-; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm6 {%k3}
-; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,9,9,10,10,10,10,10,10]
-; AVX512BW-FCP-NEXT: vpermd %zmm5, %zmm7, %zmm7
+; AVX512BW-FCP-NEXT: kmovq %rax, %k4
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm14, %zmm11 {%k4}
+; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm14 = [0,0,0,0,0,0,1,1,9,9,10,10,10,10,10,10]
+; AVX512BW-FCP-NEXT: vpermd %zmm13, %zmm14, %zmm14
; AVX512BW-FCP-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210
-; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm6 {%k3}
-; AVX512BW-FCP-NEXT: vpshufb %ymm21, %ymm24, %ymm7
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512BW-FCP-NEXT: vpshufb %ymm22, %ymm23, %ymm8
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512BW-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512BW-FCP-NEXT: vpshufb %ymm1, %ymm24, %ymm1
-; AVX512BW-FCP-NEXT: vpshufb %ymm2, %ymm23, %ymm2
-; AVX512BW-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm1, %zmm1
-; AVX512BW-FCP-NEXT: vpshufb %ymm15, %ymm18, %ymm2
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512BW-FCP-NEXT: vpshufb %ymm16, %ymm20, %ymm7
+; AVX512BW-FCP-NEXT: kmovq %rax, %k4
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm14, %zmm11 {%k4}
+; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %ymm14
+; AVX512BW-FCP-NEXT: vpshufb %ymm6, %ymm14, %ymm6
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rcx), %ymm16
+; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm16, %ymm7
+; AVX512BW-FCP-NEXT: vpor %ymm6, %ymm7, %ymm6
+; AVX512BW-FCP-NEXT: vpshufb %ymm24, %ymm14, %ymm7
+; AVX512BW-FCP-NEXT: vpshufb %ymm25, %ymm16, %ymm17
+; AVX512BW-FCP-NEXT: vporq %ymm7, %ymm17, %ymm7
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512BW-FCP-NEXT: vpor %ymm2, %ymm7, %ymm2
-; AVX512BW-FCP-NEXT: vpermd %ymm20, %ymm3, %ymm3
-; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm18, %ymm3 {%k1}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm2 {%k2}
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm1 = [3,3,3,3,0,4,4,4,12,14,13,13,13,13,12,14]
-; AVX512BW-FCP-NEXT: vpermd %zmm5, %zmm1, %zmm1
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm7
+; AVX512BW-FCP-NEXT: vpshufb %ymm18, %ymm7, %ymm17
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %ymm18
+; AVX512BW-FCP-NEXT: vpshufb %ymm19, %ymm18, %ymm19
+; AVX512BW-FCP-NEXT: vporq %ymm17, %ymm19, %ymm17
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm17 = ymm17[2,2,3,3]
+; AVX512BW-FCP-NEXT: vpermd %ymm18, %ymm9, %ymm9
+; AVX512BW-FCP-NEXT: vpshufb %ymm10, %ymm7, %ymm9 {%k1}
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm9, %zmm9
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm9 {%k2}
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [3,3,3,3,0,4,4,4,12,14,13,13,13,13,12,14]
+; AVX512BW-FCP-NEXT: vpermd %zmm13, %zmm6, %zmm6
; AVX512BW-FCP-NEXT: movabsq $1190112520884487201, %rax # imm = 0x1084210842108421
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm2 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, 64(%r9)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm6, (%r9)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm17, 128(%r9)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm13, 256(%r9)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, 192(%r9)
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm9 {%k1}
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm16[27],zero,zero,ymm16[26],zero,ymm16[28],zero,ymm16[30],zero,zero,ymm16[29],zero,ymm16[31],zero
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,ymm14[26],zero,ymm14[28],zero,ymm14[30],zero,zero,ymm14[29],zero,ymm14[31],zero,zero
+; AVX512BW-FCP-NEXT: vpor %ymm4, %ymm6, %ymm4
+; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm6 = [2,2,3,3,8,8,9,9]
+; AVX512BW-FCP-NEXT: vpermt2q %zmm2, %zmm6, %zmm4
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; AVX512BW-FCP-NEXT: vpshufb %xmm15, %xmm2, %xmm2
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm7[26],zero,ymm7[28],zero,zero,zero,zero,ymm7[29],zero,ymm7[31],zero,zero,ymm7[30]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm18[26],zero,ymm18[28],zero,zero,ymm18[27],zero,ymm18[29],zero,ymm18[31],zero,zero,ymm18[30],zero
+; AVX512BW-FCP-NEXT: vpor %ymm3, %ymm5, %ymm3
+; AVX512BW-FCP-NEXT: vpermt2q %zmm2, %zmm6, %zmm3
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm3 {%k3}
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm2 = [6,6,6,6,7,7,7,7,8,8,8,8,8,8,9,9]
+; AVX512BW-FCP-NEXT: vpermd %zmm0, %zmm2, %zmm0
+; AVX512BW-FCP-NEXT: movabsq $2380225041768974402, %rax # imm = 0x2108421084210842
+; AVX512BW-FCP-NEXT: kmovq %rax, %k1
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm3 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, 128(%r9)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, 64(%r9)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm11, (%r9)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, 256(%r9)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm1, 192(%r9)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: store_i8_stride5_vf64:
; AVX512DQ-BW: # %bb.0:
-; AVX512DQ-BW-NEXT: vmovdqa64 (%r8), %zmm3
+; AVX512DQ-BW-NEXT: vmovdqa64 (%r8), %zmm2
; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %ymm0
; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm8 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
-; AVX512DQ-BW-NEXT: vpshufb %ymm8, %ymm0, %ymm2
+; AVX512DQ-BW-NEXT: vpshufb %ymm8, %ymm0, %ymm3
; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %ymm1
; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm4 = ymm1[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14]
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,3,3,6,6,7,7]
; AVX512DQ-BW-NEXT: movl $693250386, %eax # imm = 0x29522952
; AVX512DQ-BW-NEXT: kmovd %eax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %ymm4, %ymm2 {%k1}
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
+; AVX512DQ-BW-NEXT: vmovdqu8 %ymm4, %ymm3 {%k1}
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
; AVX512DQ-BW-NEXT: vmovdqa 32(%rdx), %xmm6
; AVX512DQ-BW-NEXT: vmovdqa 32(%rcx), %xmm12
; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm12[0],xmm6[0],xmm12[1],xmm6[1],xmm12[2],xmm6[2],xmm12[3],xmm6[3],xmm12[4],xmm6[4],xmm12[5],xmm6[5],xmm12[6],xmm6[6],xmm12[7],xmm6[7]
; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm7 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
; AVX512DQ-BW-NEXT: vpshufb %xmm7, %xmm4, %xmm4
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm10
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm10
; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %ymm4
; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm15 = [11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14]
-; AVX512DQ-BW-NEXT: vpshufb %ymm15, %ymm4, %ymm2
+; AVX512DQ-BW-NEXT: vpshufb %ymm15, %ymm4, %ymm3
; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm5
; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm9 = ymm5[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15]
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[2,2,3,3,6,6,7,7]
; AVX512DQ-BW-NEXT: movl $1251232404, %eax # imm = 0x4A944A94
; AVX512DQ-BW-NEXT: kmovd %eax, %k5
-; AVX512DQ-BW-NEXT: vmovdqu8 %ymm9, %ymm2 {%k5}
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
+; AVX512DQ-BW-NEXT: vmovdqu8 %ymm9, %ymm3 {%k5}
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
; AVX512DQ-BW-NEXT: vmovdqa 32(%rsi), %xmm13
; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %xmm14
; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
; AVX512DQ-BW-NEXT: vpshufb %xmm9, %xmm11, %xmm11
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,0,1,1]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm11, %zmm2, %zmm2
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm11, %zmm3, %zmm3
; AVX512DQ-BW-NEXT: movabsq $1785168781326730801, %rax # imm = 0x18C6318C6318C631
; AVX512DQ-BW-NEXT: kmovq %rax, %k4
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm10, %zmm2 {%k4}
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm10, %zmm3 {%k4}
; AVX512DQ-BW-NEXT: vmovdqa64 32(%r8), %ymm16
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm10 = [6,6,6,6,7,7,7,7,16,16,16,16,16,16,17,17]
-; AVX512DQ-BW-NEXT: vpermi2d %zmm16, %zmm3, %zmm10
+; AVX512DQ-BW-NEXT: vpermi2d %zmm16, %zmm2, %zmm10
; AVX512DQ-BW-NEXT: movabsq $2380225041768974402, %rax # imm = 0x2108421084210842
; AVX512DQ-BW-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm10, %zmm2 {%k2}
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm10, %zmm3 {%k2}
; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdx), %ymm23
; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
; AVX512DQ-BW-NEXT: vpshufb %ymm10, %ymm23, %ymm17
@@ -5923,21 +5815,21 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm21 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
; AVX512DQ-BW-NEXT: vpshufb %xmm21, %xmm13, %xmm13
; AVX512DQ-BW-NEXT: vpor %xmm12, %xmm13, %xmm12
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm13 = ymm12[0,0,1,1]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm14 = ymm12[0,0,1,1]
; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdi), %ymm25
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm12 = [3,3,3,0,4,4,4,4]
; AVX512DQ-BW-NEXT: vpermd %ymm25, %ymm12, %ymm17
; AVX512DQ-BW-NEXT: vmovdqa64 32(%rsi), %ymm26
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm14 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
+; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm13 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
; AVX512DQ-BW-NEXT: movl $138547332, %eax # imm = 0x8421084
; AVX512DQ-BW-NEXT: kmovd %eax, %k3
-; AVX512DQ-BW-NEXT: vpshufb %ymm14, %ymm26, %ymm17 {%k3}
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm17, %zmm13, %zmm13
+; AVX512DQ-BW-NEXT: vpshufb %ymm13, %ymm26, %ymm17 {%k3}
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm17, %zmm14, %zmm14
; AVX512DQ-BW-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
; AVX512DQ-BW-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm13, %zmm6 {%k2}
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm13 = [3,3,3,3,0,4,4,4]
-; AVX512DQ-BW-NEXT: vpermd %ymm16, %ymm13, %ymm17
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm14, %zmm6 {%k2}
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm14 = [3,3,3,3,0,4,4,4]
+; AVX512DQ-BW-NEXT: vpermd %ymm16, %ymm14, %ymm17
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm18 = mem[1,1,2,2]
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm18 = ymm18[0,1,1,1]
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm17, %zmm18, %zmm17
@@ -5946,32 +5838,28 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm17, %zmm6 {%k6}
; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm17 = [19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128]
; AVX512DQ-BW-NEXT: # ymm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-BW-NEXT: vpshufb %ymm17, %ymm26, %ymm18
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm27 = ymm18[2,2,3,3]
+; AVX512DQ-BW-NEXT: vpshufb %ymm17, %ymm26, %ymm27
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm18 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128]
; AVX512DQ-BW-NEXT: vpshufb %ymm18, %ymm25, %ymm28
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm28 = ymm28[2,2,3,3]
; AVX512DQ-BW-NEXT: vporq %ymm27, %ymm28, %ymm27
; AVX512DQ-BW-NEXT: vpshufb %ymm15, %ymm26, %ymm15
; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm25 = ymm25[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15]
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm25 = ymm25[2,2,3,3,6,6,7,7]
; AVX512DQ-BW-NEXT: vmovdqu8 %ymm25, %ymm15 {%k5}
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm15, %zmm27, %zmm15
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm25 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm23, %ymm26
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,2,3,3]
; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm27 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
; AVX512DQ-BW-NEXT: # ymm27 = mem[0,1,2,3,0,1,2,3]
; AVX512DQ-BW-NEXT: vpshufb %ymm27, %ymm24, %ymm28
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm28 = ymm28[2,2,3,3]
; AVX512DQ-BW-NEXT: vporq %ymm26, %ymm28, %ymm26
; AVX512DQ-BW-NEXT: vpshufb %ymm8, %ymm24, %ymm8
; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm23 = ymm23[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14]
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm23 = ymm23[2,2,3,3,6,6,7,7]
; AVX512DQ-BW-NEXT: vmovdqu8 %ymm23, %ymm8 {%k1}
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm26, %zmm8
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm15 = zmm15[2,2,3,3,6,6,7,7]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,2,3,3,6,6,7,7]
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm15, %zmm8 {%k4}
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm15 = [4,6,5,5,5,5,4,6,6,6,6,6,7,7,7,7]
; AVX512DQ-BW-NEXT: vpermd %zmm16, %zmm15, %zmm15
@@ -6000,33 +5888,31 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm7, %zmm9 {%k1}
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,1,1,2,2,2,2,2,2]
-; AVX512DQ-BW-NEXT: vpermd %zmm3, %zmm7, %zmm3
+; AVX512DQ-BW-NEXT: vpermd %zmm2, %zmm7, %zmm2
; AVX512DQ-BW-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm3, %zmm9 {%k1}
-; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm1, %ymm3
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512DQ-BW-NEXT: vpshufb %ymm27, %ymm0, %ymm7
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512DQ-BW-NEXT: vpor %ymm3, %ymm7, %ymm3
-; AVX512DQ-BW-NEXT: vpshufb %ymm10, %ymm1, %ymm1
-; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm0, %ymm0
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm2, %zmm9 {%k1}
+; AVX512DQ-BW-NEXT: vpshufb %ymm10, %ymm1, %ymm2
+; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm0, %ymm7
+; AVX512DQ-BW-NEXT: vpor %ymm2, %ymm7, %ymm2
+; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm1, %ymm1
+; AVX512DQ-BW-NEXT: vpshufb %ymm27, %ymm0, %ymm0
; AVX512DQ-BW-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
; AVX512DQ-BW-NEXT: vpermd %ymm5, %ymm12, %ymm1
-; AVX512DQ-BW-NEXT: vpshufb %ymm14, %ymm4, %ymm1 {%k3}
-; AVX512DQ-BW-NEXT: vpshufb %ymm17, %ymm4, %ymm3
+; AVX512DQ-BW-NEXT: vpshufb %ymm13, %ymm4, %ymm1 {%k3}
+; AVX512DQ-BW-NEXT: vpshufb %ymm17, %ymm4, %ymm2
; AVX512DQ-BW-NEXT: vpshufb %ymm18, %ymm5, %ymm4
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX512DQ-BW-NEXT: vpor %ymm3, %ymm4, %ymm3
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512DQ-BW-NEXT: vpor %ymm2, %ymm4, %ymm2
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm1 {%k2}
; AVX512DQ-BW-NEXT: vmovdqa (%r8), %ymm0
-; AVX512DQ-BW-NEXT: vpermd %ymm0, %ymm13, %ymm3
+; AVX512DQ-BW-NEXT: vpermd %ymm0, %ymm14, %ymm2
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,1,4,6,5,5]
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,3,2]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
; AVX512DQ-BW-NEXT: movabsq $1190112520884487201, %rax # imm = 0x1084210842108421
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm1 {%k1}
@@ -6034,166 +5920,157 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, (%r9)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm8, 256(%r9)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm6, 192(%r9)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, 128(%r9)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm3, 128(%r9)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: store_i8_stride5_vf64:
; AVX512DQ-BW-FCP: # %bb.0:
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r8), %zmm5
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r8), %zmm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rdx), %ymm21
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm1, %ymm21, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm13
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm2, %ymm13, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm0, %ymm3, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %xmm6
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rcx), %xmm18
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm18, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %xmm7
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rdx), %xmm20
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm10, %xmm20, %xmm4
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm3, %xmm4, %xmm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm6, %ymm21, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm8
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm8, %ymm2
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %xmm11
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rcx), %xmm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm14, %xmm2, %xmm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %xmm12
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdx), %xmm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm15, %xmm4, %xmm5
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm3, %xmm5, %xmm3
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,1,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm9
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rdi), %xmm17
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm17, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %xmm11
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rsi), %xmm19
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm14, %xmm19, %xmm4
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm3, %xmm4, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm3[0,0,1,1]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rdi), %ymm16
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [3,3,3,0,4,4,4,4]
-; AVX512DQ-BW-FCP-NEXT: vpermd %ymm16, %ymm3, %ymm22
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm13
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm3, %xmm9
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %xmm16
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rsi), %xmm5
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm20 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm20, %xmm5, %xmm10
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm9, %xmm10, %xmm9
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm18 = ymm9[0,0,1,1]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rdi), %ymm19
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [3,3,3,0,4,4,4,4]
+; AVX512DQ-BW-FCP-NEXT: vpermd %ymm19, %ymm9, %ymm22
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm23
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm4 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm10 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
; AVX512DQ-BW-FCP-NEXT: movl $138547332, %eax # imm = 0x8421084
; AVX512DQ-BW-FCP-NEXT: kmovd %eax, %k1
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm23, %ymm22 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm15, %zmm15
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm10, %ymm23, %ymm22 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm18, %zmm18
; AVX512DQ-BW-FCP-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm0 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm24
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm15 = [1,1,2,2,2,2,2,2,27,27,27,27,0,28,28,28]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm5, %zmm24, %zmm15
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm1 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm22
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm18 = [1,1,2,2,2,2,2,2,27,27,27,27,0,28,28,28]
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm0, %zmm22, %zmm18
; AVX512DQ-BW-FCP-NEXT: movabsq $4760450083537948804, %rax # imm = 0x4210842108421084
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm0 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm23[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm15 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm15, %zmm22, %zmm22
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm22 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm23 = zmm16[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm16 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm16, %zmm23, %zmm23
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm1 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm23 = zmm23[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm18 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm18, %zmm23, %zmm23
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm24 = zmm19[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm19 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm19, %zmm24, %zmm24
+; AVX512DQ-BW-FCP-NEXT: vporq %zmm23, %zmm24, %zmm23
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm23 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512DQ-BW-FCP-NEXT: vporq %zmm22, %zmm23, %zmm23
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm21[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm21 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm21, %zmm22, %zmm22
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm25 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm22 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm22, %zmm13, %zmm13
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm13 = zmm13[2,2,3,3,6,6,7,7]
-; AVX512DQ-BW-FCP-NEXT: vporq %zmm25, %zmm13, %zmm13
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm21[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm24 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm24, %zmm21, %zmm21
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm25 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm25, %zmm8, %zmm8
+; AVX512DQ-BW-FCP-NEXT: vporq %zmm21, %zmm8, %zmm8
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,2,3,3,6,6,7,7]
; AVX512DQ-BW-FCP-NEXT: movabsq $1785168781326730801, %rax # imm = 0x18C6318C6318C631
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm23, %zmm13 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm23 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm5, %zmm24, %zmm23
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm23, %zmm8 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm21 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm0, %zmm22, %zmm21
; AVX512DQ-BW-FCP-NEXT: movabsq $-8925843906633654008, %rax # imm = 0x8421084210842108
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k4
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm23, %zmm13 {%k4}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rcx), %ymm23
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm24 = ymm23[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm23[27],zero,zero,ymm23[26],zero,ymm23[28],zero,ymm23[30],zero,zero,ymm23[29],zero,ymm23[31],zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm24[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdx), %ymm24
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm26 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm24[27],zero,zero,ymm24[26],zero,ymm24[28],zero,ymm24[30],zero,zero,ymm24[29],zero,ymm24[31],zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm25, %ymm26, %ymm25
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm18 = xmm18[0],xmm20[0],xmm18[1],xmm20[1],xmm18[2],xmm20[2],xmm18[3],xmm20[3],xmm18[4],xmm20[4],xmm18[5],xmm20[5],xmm18[6],xmm20[6],xmm18[7],xmm20[7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm26 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm26, %xmm18, %xmm18
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm18 = ymm18[0,0,1,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm18, %zmm25, %zmm25
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %ymm18
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm20 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm18[26],zero,ymm18[28],zero,zero,zero,zero,ymm18[29],zero,ymm18[31],zero,zero,ymm18[30]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm27 = ymm20[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %ymm20
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm28 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm20[26],zero,ymm20[28],zero,zero,ymm20[27],zero,ymm20[29],zero,ymm20[31],zero,zero,ymm20[30],zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm28 = ymm28[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm27, %ymm28, %ymm27
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm17 = xmm17[0],xmm19[0],xmm17[1],xmm19[1],xmm17[2],xmm19[2],xmm17[3],xmm19[3],xmm17[4],xmm19[4],xmm17[5],xmm19[5],xmm17[6],xmm19[6],xmm17[7],xmm19[7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm19 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm19, %xmm17, %xmm17
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm17 = ymm17[0,0,1,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm27, %zmm17
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm25, %zmm17 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm25 = [6,6,6,6,7,7,7,7,8,8,8,8,8,8,9,9]
-; AVX512DQ-BW-FCP-NEXT: vpermd %zmm5, %zmm25, %zmm5
-; AVX512DQ-BW-FCP-NEXT: movabsq $2380225041768974402, %rax # imm = 0x2108421084210842
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm17 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm6, %xmm5
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm10, %xmm7, %xmm8
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm5, %xmm8, %xmm5
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm26, %xmm6, %xmm6
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm5, %zmm6, %zmm5
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm5[0,0,1,1,4,4,5,5]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm9, %xmm6
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm14, %xmm11, %xmm7
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm6, %xmm7, %xmm6
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3],xmm9[4],xmm11[4],xmm9[5],xmm11[5],xmm9[6],xmm11[6],xmm9[7],xmm11[7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm19, %xmm7, %xmm7
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm6, %zmm7, %zmm6
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm6 = zmm6[0,0,1,1,4,4,5,5]
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm21, %zmm8 {%k4}
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm14, %xmm11, %xmm14
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm15, %xmm12, %xmm15
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm14, %xmm15, %xmm14
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3],xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm11, %xmm11
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm14, %zmm11, %zmm11
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm14 = zmm11[0,0,1,1,4,4,5,5]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm13, %xmm11
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm20, %xmm16, %xmm15
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm11, %xmm15, %xmm11
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm13[0],xmm16[0],xmm13[1],xmm16[1],xmm13[2],xmm16[2],xmm13[3],xmm16[3],xmm13[4],xmm16[4],xmm13[5],xmm16[5],xmm13[6],xmm16[6],xmm13[7],xmm16[7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm15, %xmm13, %xmm13
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm11, %zmm13, %zmm11
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm11 = zmm11[0,0,1,1,4,4,5,5]
; AVX512DQ-BW-FCP-NEXT: movabsq $-4165393823095705204, %rax # imm = 0xC6318C6318C6318C
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm6 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,9,9,10,10,10,10,10,10]
-; AVX512DQ-BW-FCP-NEXT: vpermd %zmm5, %zmm7, %zmm7
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k4
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm14, %zmm11 {%k4}
+; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm14 = [0,0,0,0,0,0,1,1,9,9,10,10,10,10,10,10]
+; AVX512DQ-BW-FCP-NEXT: vpermd %zmm13, %zmm14, %zmm14
; AVX512DQ-BW-FCP-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm6 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm21, %ymm24, %ymm7
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm22, %ymm23, %ymm8
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm1, %ymm24, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm2, %ymm23, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm1, %zmm1
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm15, %ymm18, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm16, %ymm20, %ymm7
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k4
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm14, %zmm11 {%k4}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %ymm14
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm6, %ymm14, %ymm6
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rcx), %ymm16
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm16, %ymm7
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm6, %ymm7, %ymm6
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm24, %ymm14, %ymm7
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm25, %ymm16, %ymm17
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm7, %ymm17, %ymm7
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm2, %ymm7, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpermd %ymm20, %ymm3, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm18, %ymm3 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm2 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm1 = [3,3,3,3,0,4,4,4,12,14,13,13,13,13,12,14]
-; AVX512DQ-BW-FCP-NEXT: vpermd %zmm5, %zmm1, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm7
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm18, %ymm7, %ymm17
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %ymm18
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm19, %ymm18, %ymm19
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm17, %ymm19, %ymm17
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm17 = ymm17[2,2,3,3]
+; AVX512DQ-BW-FCP-NEXT: vpermd %ymm18, %ymm9, %ymm9
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm10, %ymm7, %ymm9 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm9, %zmm9
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm9 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [3,3,3,3,0,4,4,4,12,14,13,13,13,13,12,14]
+; AVX512DQ-BW-FCP-NEXT: vpermd %zmm13, %zmm6, %zmm6
; AVX512DQ-BW-FCP-NEXT: movabsq $1190112520884487201, %rax # imm = 0x1084210842108421
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm2 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, 64(%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm6, (%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm17, 128(%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm13, 256(%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, 192(%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm9 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm16[27],zero,zero,ymm16[26],zero,ymm16[28],zero,ymm16[30],zero,zero,ymm16[29],zero,ymm16[31],zero
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,ymm14[26],zero,ymm14[28],zero,ymm14[30],zero,zero,ymm14[29],zero,ymm14[31],zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm4, %ymm6, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm6 = [2,2,3,3,8,8,9,9]
+; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm2, %zmm6, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm15, %xmm2, %xmm2
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm7[26],zero,ymm7[28],zero,zero,zero,zero,ymm7[29],zero,ymm7[31],zero,zero,ymm7[30]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm18[26],zero,ymm18[28],zero,zero,ymm18[27],zero,ymm18[29],zero,ymm18[31],zero,zero,ymm18[30],zero
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm3, %ymm5, %ymm3
+; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm2, %zmm6, %zmm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm3 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm2 = [6,6,6,6,7,7,7,7,8,8,8,8,8,8,9,9]
+; AVX512DQ-BW-FCP-NEXT: vpermd %zmm0, %zmm2, %zmm0
+; AVX512DQ-BW-FCP-NEXT: movabsq $2380225041768974402, %rax # imm = 0x2108421084210842
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm3 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, 128(%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, 64(%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm11, (%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, 256(%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm1, 192(%r9)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <64 x i8>, ptr %in.vecptr0, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
index 88144e7..de34e48 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
@@ -3004,428 +3004,412 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-LABEL: store_i8_stride6_vf32:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512BW-NEXT: vmovdqa (%rsi), %ymm5
-; AVX512BW-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512BW-NEXT: vmovdqa (%rcx), %ymm4
-; AVX512BW-NEXT: vmovdqa (%r8), %ymm0
-; AVX512BW-NEXT: vmovdqa (%r9), %ymm1
-; AVX512BW-NEXT: vmovdqa (%rsi), %xmm9
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} xmm6 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-NEXT: vpshufb %xmm6, %xmm9, %xmm7
-; AVX512BW-NEXT: vmovdqa (%rdi), %xmm10
-; AVX512BW-NEXT: vpshufb %xmm6, %xmm10, %xmm6
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm6[0,0,0,1]
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm6 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512BW-NEXT: vmovdqa (%rcx), %xmm11
-; AVX512BW-NEXT: vpshufb %xmm6, %xmm11, %xmm8
-; AVX512BW-NEXT: vmovdqa (%rdx), %xmm12
-; AVX512BW-NEXT: vpshufb %xmm6, %xmm12, %xmm13
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm13[0],xmm8[0],xmm13[1],xmm8[1],xmm13[2],xmm8[2],xmm13[3],xmm8[3],xmm13[4],xmm8[4],xmm13[5],xmm8[5],xmm13[6],xmm8[6],xmm13[7],xmm8[7]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1]
-; AVX512BW-NEXT: movw $18724, %cx # imm = 0x4924
+; AVX512BW-NEXT: vmovdqa (%rdi), %ymm9
+; AVX512BW-NEXT: vmovdqa (%rsi), %ymm10
+; AVX512BW-NEXT: vmovdqa (%rdx), %ymm11
+; AVX512BW-NEXT: vmovdqa (%rcx), %ymm12
+; AVX512BW-NEXT: vmovdqa (%r8), %ymm7
+; AVX512BW-NEXT: vmovdqa (%r9), %ymm8
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm9[0],ymm10[0],ymm9[1],ymm10[1],ymm9[2],ymm10[2],ymm9[3],ymm10[3],ymm9[4],ymm10[4],ymm9[5],ymm10[5],ymm9[6],ymm10[6],ymm9[7],ymm10[7],ymm9[16],ymm10[16],ymm9[17],ymm10[17],ymm9[18],ymm10[18],ymm9[19],ymm10[19],ymm9[20],ymm10[20],ymm9[21],ymm10[21],ymm9[22],ymm10[22],ymm9[23],ymm10[23]
+; AVX512BW-NEXT: vmovdqa (%rsi), %xmm1
+; AVX512BW-NEXT: vmovdqa (%rdi), %xmm2
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm3 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,24,27,26,25,24,27,26,25,24,27,26,25,28,29,30,29]
+; AVX512BW-NEXT: vpermw %zmm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vmovdqa (%rcx), %xmm3
+; AVX512BW-NEXT: vmovdqa (%rdx), %xmm4
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm6 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
+; AVX512BW-NEXT: vpermw %ymm5, %ymm6, %ymm5
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm6 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[4],ymm12[4],ymm11[5],ymm12[5],ymm11[6],ymm12[6],ymm11[7],ymm12[7],ymm11[16],ymm12[16],ymm11[17],ymm12[17],ymm11[18],ymm12[18],ymm11[19],ymm12[19],ymm11[20],ymm12[20],ymm11[21],ymm12[21],ymm11[22],ymm12[22],ymm11[23],ymm12[23]
+; AVX512BW-NEXT: vprold $16, %ymm6, %ymm6
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm6, %zmm5, %zmm5
+; AVX512BW-NEXT: movl $613566756, %ecx # imm = 0x24924924
; AVX512BW-NEXT: kmovd %ecx, %k1
-; AVX512BW-NEXT: vmovdqu16 %ymm7, %ymm8 {%k1}
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm7
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm13 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
-; AVX512BW-NEXT: vpermw %ymm8, %ymm13, %ymm8
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512BW-NEXT: vprold $16, %xmm13, %xmm13
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
-; AVX512BW-NEXT: movw $9362, %cx # imm = 0x2492
+; AVX512BW-NEXT: vmovdqu16 %zmm5, %zmm0 {%k1}
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm13 = ymm7[0],ymm8[0],ymm7[1],ymm8[1],ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[4],ymm8[4],ymm7[5],ymm8[5],ymm7[6],ymm8[6],ymm7[7],ymm8[7],ymm7[16],ymm8[16],ymm7[17],ymm8[17],ymm7[18],ymm8[18],ymm7[19],ymm8[19],ymm7[20],ymm8[20],ymm7[21],ymm8[21],ymm7[22],ymm8[22],ymm7[23],ymm8[23]
+; AVX512BW-NEXT: vmovdqa (%r9), %xmm5
+; AVX512BW-NEXT: vmovdqa (%r8), %xmm6
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm14 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm13, %zmm14, %zmm13
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm14 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
+; AVX512BW-NEXT: movl $1227133513, %ecx # imm = 0x49249249
; AVX512BW-NEXT: kmovd %ecx, %k2
-; AVX512BW-NEXT: vmovdqu16 %ymm13, %ymm8 {%k2}
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm8[0,1,2,3],zmm7[4,5,6,7]
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm8 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512BW-NEXT: vmovdqa (%r9), %xmm13
-; AVX512BW-NEXT: vpshufb %xmm8, %xmm13, %xmm14
-; AVX512BW-NEXT: vmovdqa (%r8), %xmm15
-; AVX512BW-NEXT: vpshufb %xmm8, %xmm15, %xmm16
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm16[0],xmm14[0],xmm16[1],xmm14[1],xmm16[2],xmm14[2],xmm16[3],xmm14[3],xmm16[4],xmm14[4],xmm16[5],xmm14[5],xmm16[6],xmm14[6],xmm16[7],xmm14[7]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm16 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3],xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm17 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
-; AVX512BW-NEXT: vpermw %ymm16, %ymm17, %ymm16
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm14, %zmm16, %zmm14
-; AVX512BW-NEXT: movl $613566756, %ecx # imm = 0x24924924
-; AVX512BW-NEXT: kmovd %ecx, %k3
-; AVX512BW-NEXT: vmovdqu16 %zmm14, %zmm7 {%k3}
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm14 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm16 = [8,11,10,9,8,11,10,9,8,11,10,9,12,13,14,13]
-; AVX512BW-NEXT: vpermw %ymm14, %ymm16, %ymm14
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm16 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512BW-NEXT: vprold $16, %ymm16, %ymm16
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm16 = ymm16[2,2,2,3]
-; AVX512BW-NEXT: vmovdqu16 %ymm16, %ymm14 {%k2}
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm14, %zmm0, %zmm14
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm10 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
+; AVX512BW-NEXT: vpermw %zmm13, %zmm14, %zmm0 {%k2}
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm13 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-NEXT: vpshufb %ymm13, %ymm10, %ymm14
+; AVX512BW-NEXT: vpshufb %ymm13, %ymm9, %ymm13
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm13 = ymm13[0],ymm14[0],ymm13[1],ymm14[1],ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[4],ymm14[4],ymm13[5],ymm14[5],ymm13[6],ymm14[6],ymm13[7],ymm14[7],ymm13[16],ymm14[16],ymm13[17],ymm14[17],ymm13[18],ymm14[18],ymm13[19],ymm14[19],ymm13[20],ymm14[20],ymm13[21],ymm14[21],ymm13[22],ymm14[22],ymm13[23],ymm14[23]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm14 = ymm11[8],ymm12[8],ymm11[9],ymm12[9],ymm11[10],ymm12[10],ymm11[11],ymm12[11],ymm11[12],ymm12[12],ymm11[13],ymm12[13],ymm11[14],ymm12[14],ymm11[15],ymm12[15],ymm11[24],ymm12[24],ymm11[25],ymm12[25],ymm11[26],ymm12[26],ymm11[27],ymm12[27],ymm11[28],ymm12[28],ymm11[29],ymm12[29],ymm11[30],ymm12[30],ymm11[31],ymm12[31]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm15 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512BW-NEXT: vpermw %ymm14, %ymm15, %ymm14
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm14, %zmm13, %zmm13
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm14 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512BW-NEXT: vpshufb %ymm14, %ymm12, %ymm12
+; AVX512BW-NEXT: vpshufb %ymm14, %ymm11, %ymm11
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm11 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[4],ymm12[4],ymm11[5],ymm12[5],ymm11[6],ymm12[6],ymm11[7],ymm12[7],ymm11[16],ymm12[16],ymm11[17],ymm12[17],ymm11[18],ymm12[18],ymm11[19],ymm12[19],ymm11[20],ymm12[20],ymm11[21],ymm12[21],ymm11[22],ymm12[22],ymm11[23],ymm12[23]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm9 = ymm9[8],ymm10[8],ymm9[9],ymm10[9],ymm9[10],ymm10[10],ymm9[11],ymm10[11],ymm9[12],ymm10[12],ymm9[13],ymm10[13],ymm9[14],ymm10[14],ymm9[15],ymm10[15],ymm9[24],ymm10[24],ymm9[25],ymm10[25],ymm9[26],ymm10[26],ymm9[27],ymm10[27],ymm9[28],ymm10[28],ymm9[29],ymm10[29],ymm9[30],ymm10[30],ymm9[31],ymm10[31]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm10 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
; AVX512BW-NEXT: vpermw %ymm9, %ymm10, %ymm9
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm10 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
-; AVX512BW-NEXT: vpermw %ymm11, %ymm10, %ymm9 {%k1}
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm9[0,1,2,3],zmm14[4,5,6,7]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm10 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm11 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
-; AVX512BW-NEXT: movl $1227133513, %ecx # imm = 0x49249249
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm9, %zmm11, %zmm9
+; AVX512BW-NEXT: movl $1227114788, %ecx # imm = 0x49244924
; AVX512BW-NEXT: kmovd %ecx, %k2
-; AVX512BW-NEXT: vpermw %zmm10, %zmm11, %zmm9 {%k2}
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15],ymm2[24],ymm4[24],ymm2[25],ymm4[25],ymm2[26],ymm4[26],ymm2[27],ymm4[27],ymm2[28],ymm4[28],ymm2[29],ymm4[29],ymm2[30],ymm4[30],ymm2[31],ymm4[31]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm11 = ymm3[8],ymm5[8],ymm3[9],ymm5[9],ymm3[10],ymm5[10],ymm3[11],ymm5[11],ymm3[12],ymm5[12],ymm3[13],ymm5[13],ymm3[14],ymm5[14],ymm3[15],ymm5[15],ymm3[24],ymm5[24],ymm3[25],ymm5[25],ymm3[26],ymm5[26],ymm3[27],ymm5[27],ymm3[28],ymm5[28],ymm3[29],ymm5[29],ymm3[30],ymm5[30],ymm3[31],ymm5[31]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm12 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512BW-NEXT: vpermw %ymm11, %ymm12, %ymm11
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm12 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512BW-NEXT: vpermw %ymm10, %ymm12, %ymm11 {%k1}
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm10
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm11 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-NEXT: vpshufb %ymm11, %ymm5, %ymm5
-; AVX512BW-NEXT: vpshufb %ymm11, %ymm3, %ymm3
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512BW-NEXT: vpshufb %ymm6, %ymm4, %ymm4
-; AVX512BW-NEXT: vpshufb %ymm6, %ymm2, %ymm2
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512BW-NEXT: vmovdqu16 %ymm3, %ymm2 {%k1}
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm10[4,5,6,7]
-; AVX512BW-NEXT: vpshufb %ymm8, %ymm1, %ymm3
-; AVX512BW-NEXT: vpshufb %ymm8, %ymm0, %ymm4
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
-; AVX512BW-NEXT: vpermw %ymm0, %ymm1, %ymm0
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vmovdqu16 %zmm13, %zmm9 {%k2}
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm10 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
+; AVX512BW-NEXT: vpshufb %ymm10, %ymm8, %ymm11
+; AVX512BW-NEXT: vpshufb %ymm10, %ymm7, %ymm12
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm11 = ymm12[0],ymm11[0],ymm12[1],ymm11[1],ymm12[2],ymm11[2],ymm12[3],ymm11[3],ymm12[4],ymm11[4],ymm12[5],ymm11[5],ymm12[6],ymm11[6],ymm12[7],ymm11[7],ymm12[16],ymm11[16],ymm12[17],ymm11[17],ymm12[18],ymm11[18],ymm12[19],ymm11[19],ymm12[20],ymm11[20],ymm12[21],ymm11[21],ymm12[22],ymm11[22],ymm12[23],ymm11[23]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm7 = ymm7[8],ymm8[8],ymm7[9],ymm8[9],ymm7[10],ymm8[10],ymm7[11],ymm8[11],ymm7[12],ymm8[12],ymm7[13],ymm8[13],ymm7[14],ymm8[14],ymm7[15],ymm8[15],ymm7[24],ymm8[24],ymm7[25],ymm8[25],ymm7[26],ymm8[26],ymm7[27],ymm8[27],ymm7[28],ymm8[28],ymm7[29],ymm8[29],ymm7[30],ymm8[30],ymm7[31],ymm8[31]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm8 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
+; AVX512BW-NEXT: vpermw %ymm7, %ymm8, %ymm7
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm7, %zmm11, %zmm7
; AVX512BW-NEXT: movl $-1840700270, %ecx # imm = 0x92492492
-; AVX512BW-NEXT: kmovd %ecx, %k1
-; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm2 {%k1}
-; AVX512BW-NEXT: vmovdqa64 %zmm2, 128(%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm9, 64(%rax)
+; AVX512BW-NEXT: kmovd %ecx, %k2
+; AVX512BW-NEXT: vmovdqu16 %zmm7, %zmm9 {%k2}
+; AVX512BW-NEXT: vpshufb %xmm14, %xmm3, %xmm7
+; AVX512BW-NEXT: vpshufb %xmm14, %xmm4, %xmm8
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,0,0,1]
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm11 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
+; AVX512BW-NEXT: vpermw %ymm8, %ymm11, %ymm8
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} xmm8 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-NEXT: vpshufb %xmm8, %xmm1, %xmm1
+; AVX512BW-NEXT: vpshufb %xmm8, %xmm2, %xmm2
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; AVX512BW-NEXT: vprold $16, %xmm2, %xmm2
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,0,0,1,4,4,4,5]
+; AVX512BW-NEXT: movl $1227105426, %ecx # imm = 0x49242492
+; AVX512BW-NEXT: kmovd %ecx, %k2
+; AVX512BW-NEXT: vmovdqu16 %zmm1, %zmm7 {%k2}
+; AVX512BW-NEXT: vpshufb %xmm10, %xmm5, %xmm1
+; AVX512BW-NEXT: vpshufb %xmm10, %xmm6, %xmm2
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm3 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
+; AVX512BW-NEXT: vpermw %ymm2, %ymm3, %ymm2
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512BW-NEXT: vmovdqu16 %zmm1, %zmm7 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm7, (%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm9, 128(%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm0, 64(%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: store_i8_stride6_vf32:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm5
-; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %ymm4
+; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm3
+; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %ymm5
+; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %ymm6
; AVX512BW-FCP-NEXT: vmovdqa (%r8), %ymm0
; AVX512BW-FCP-NEXT: vmovdqa (%r9), %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %xmm9
-; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %xmm10
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %xmm11
+; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm4 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm7
+; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm4
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm7[0],ymm4[1],ymm7[1],ymm4[2],ymm7[2],ymm4[3],ymm7[3],ymm4[4],ymm7[4],ymm4[5],ymm7[5],ymm4[6],ymm7[6],ymm4[7],ymm7[7],ymm4[16],ymm7[16],ymm4[17],ymm7[17],ymm4[18],ymm7[18],ymm4[19],ymm7[19],ymm4[20],ymm7[20],ymm4[21],ymm7[21],ymm4[22],ymm7[22],ymm4[23],ymm7[23]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm7 = ymm5[8],ymm6[8],ymm5[9],ymm6[9],ymm5[10],ymm6[10],ymm5[11],ymm6[11],ymm5[12],ymm6[12],ymm5[13],ymm6[13],ymm5[14],ymm6[14],ymm5[15],ymm6[15],ymm5[24],ymm6[24],ymm5[25],ymm6[25],ymm5[26],ymm6[26],ymm5[27],ymm6[27],ymm5[28],ymm6[28],ymm5[29],ymm6[29],ymm5[30],ymm6[30],ymm5[31],ymm6[31]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512BW-FCP-NEXT: vpermw %ymm7, %ymm8, %ymm7
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm4, %zmm7
+; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm8 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512BW-FCP-NEXT: vpshufb %ymm8, %ymm6, %ymm4
+; AVX512BW-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm9
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm9[0],ymm4[0],ymm9[1],ymm4[1],ymm9[2],ymm4[2],ymm9[3],ymm4[3],ymm9[4],ymm4[4],ymm9[5],ymm4[5],ymm9[6],ymm4[6],ymm9[7],ymm4[7],ymm9[16],ymm4[16],ymm9[17],ymm4[17],ymm9[18],ymm4[18],ymm9[19],ymm4[19],ymm9[20],ymm4[20],ymm9[21],ymm4[21],ymm9[22],ymm4[22],ymm9[23],ymm4[23]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm9 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm10 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512BW-FCP-NEXT: vpermw %ymm9, %ymm10, %ymm9
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm4, %zmm4
+; AVX512BW-FCP-NEXT: movl $1227114788, %r10d # imm = 0x49244924
+; AVX512BW-FCP-NEXT: kmovd %r10d, %k1
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm7, %zmm4 {%k1}
+; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
+; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm9
+; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm0, %ymm10
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm9 = ymm10[0],ymm9[0],ymm10[1],ymm9[1],ymm10[2],ymm9[2],ymm10[3],ymm9[3],ymm10[4],ymm9[4],ymm10[5],ymm9[5],ymm10[6],ymm9[6],ymm10[7],ymm9[7],ymm10[16],ymm9[16],ymm10[17],ymm9[17],ymm10[18],ymm9[18],ymm10[19],ymm9[19],ymm10[20],ymm9[20],ymm10[21],ymm9[21],ymm10[22],ymm9[22],ymm10[23],ymm9[23]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
+; AVX512BW-FCP-NEXT: vpermw %ymm10, %ymm11, %ymm10
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512BW-FCP-NEXT: movl $-1840700270, %r10d # imm = 0x92492492
+; AVX512BW-FCP-NEXT: kmovd %r10d, %k1
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm9, %zmm4 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %xmm9
+; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm10 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-FCP-NEXT: vpshufb %xmm10, %xmm9, %xmm11
; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm12
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
-; AVX512BW-FCP-NEXT: vpermw %ymm7, %ymm8, %ymm8
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm7 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6]
-; AVX512BW-FCP-NEXT: movw $9362, %cx # imm = 0x2492
-; AVX512BW-FCP-NEXT: kmovd %ecx, %k2
-; AVX512BW-FCP-NEXT: vpermw %ymm6, %ymm7, %ymm8 {%k2}
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm6 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-FCP-NEXT: vpshufb %xmm6, %xmm11, %xmm7
-; AVX512BW-FCP-NEXT: vpshufb %xmm6, %xmm12, %xmm6
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,0,1]
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm13
-; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm10, %xmm14
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
-; AVX512BW-FCP-NEXT: movw $18724, %cx # imm = 0x4924
+; AVX512BW-FCP-NEXT: vpshufb %xmm10, %xmm12, %xmm10
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm10[8],xmm11[8],xmm10[9],xmm11[9],xmm10[10],xmm11[10],xmm10[11],xmm11[11],xmm10[12],xmm11[12],xmm10[13],xmm11[13],xmm10[14],xmm11[14],xmm10[15],xmm11[15]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,0,1]
+; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %xmm11
+; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %xmm13
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3],xmm13[4],xmm11[4],xmm13[5],xmm11[5],xmm13[6],xmm11[6],xmm13[7],xmm11[7]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm15 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6]
+; AVX512BW-FCP-NEXT: vpermw %ymm14, %ymm15, %ymm14
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm14, %zmm10
+; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm11, %xmm14
+; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm13, %xmm8
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3],xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1]
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm12[0],xmm9[0],xmm12[1],xmm9[1],xmm12[2],xmm9[2],xmm12[3],xmm9[3],xmm12[4],xmm9[4],xmm12[5],xmm9[5],xmm12[6],xmm9[6],xmm12[7],xmm9[7]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm15 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
+; AVX512BW-FCP-NEXT: vpermw %ymm14, %ymm15, %ymm14
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm14, %zmm8
+; AVX512BW-FCP-NEXT: movl $1227105426, %ecx # imm = 0x49242492
; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512BW-FCP-NEXT: vmovdqu16 %ymm6, %ymm13 {%k1}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm0, %zmm6
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm8[0,1,2,3],zmm6[4,5,6,7]
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm8 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512BW-FCP-NEXT: vmovdqa (%r9), %xmm13
-; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm13, %xmm14
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm10, %zmm8 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa (%r9), %xmm10
+; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm10, %xmm14
; AVX512BW-FCP-NEXT: vmovdqa (%r8), %xmm15
-; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm15, %xmm16
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm16[0],xmm14[0],xmm16[1],xmm14[1],xmm16[2],xmm14[2],xmm16[3],xmm14[3],xmm16[4],xmm14[4],xmm16[5],xmm14[5],xmm16[6],xmm14[6],xmm16[7],xmm14[7]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm16 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3],xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm17 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
-; AVX512BW-FCP-NEXT: vpermw %ymm16, %ymm17, %ymm16
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm14, %zmm16, %zmm14
+; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm15, %xmm7
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3],xmm7[4],xmm14[4],xmm7[5],xmm14[5],xmm7[6],xmm14[6],xmm7[7],xmm14[7]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,0,0,1]
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm15[0],xmm10[0],xmm15[1],xmm10[1],xmm15[2],xmm10[2],xmm15[3],xmm10[3],xmm15[4],xmm10[4],xmm15[5],xmm10[5],xmm15[6],xmm10[6],xmm15[7],xmm10[7]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm16 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
+; AVX512BW-FCP-NEXT: vpermw %ymm14, %ymm16, %ymm14
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm14, %zmm7
; AVX512BW-FCP-NEXT: movl $613566756, %ecx # imm = 0x24924924
-; AVX512BW-FCP-NEXT: kmovd %ecx, %k3
-; AVX512BW-FCP-NEXT: vmovdqu16 %zmm14, %zmm6 {%k3}
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm14 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm16 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm17 = [8,11,10,9,8,11,10,9,8,11,10,9,12,13,14,13]
-; AVX512BW-FCP-NEXT: vpermw %ymm16, %ymm17, %ymm16
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm17 = [9,8,11,10,9,8,11,10,9,8,11,10,13,12,15,14]
-; AVX512BW-FCP-NEXT: vpermw %ymm14, %ymm17, %ymm16 {%k2}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm16, %zmm0, %zmm14
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
-; AVX512BW-FCP-NEXT: vpermw %ymm10, %ymm11, %ymm10
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
-; AVX512BW-FCP-NEXT: vpermw %ymm9, %ymm11, %ymm10 {%k1}
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm10[0,1,2,3],zmm14[4,5,6,7]
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm10 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm11 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
+; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm7, %zmm8 {%k1}
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[2],ymm6[2],ymm5[3],ymm6[3],ymm5[4],ymm6[4],ymm5[5],ymm6[5],ymm5[6],ymm6[6],ymm5[7],ymm6[7],ymm5[16],ymm6[16],ymm5[17],ymm6[17],ymm5[18],ymm6[18],ymm5[19],ymm6[19],ymm5[20],ymm6[20],ymm5[21],ymm6[21],ymm5[22],ymm6[22],ymm5[23],ymm6[23]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm13[8],xmm11[8],xmm13[9],xmm11[9],xmm13[10],xmm11[10],xmm13[11],xmm11[11],xmm13[12],xmm11[12],xmm13[13],xmm11[13],xmm13[14],xmm11[14],xmm13[15],xmm11[15]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm6, %zmm5
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm12[8],xmm9[8],xmm12[9],xmm9[9],xmm12[10],xmm9[10],xmm12[11],xmm9[11],xmm12[12],xmm9[12],xmm12[13],xmm9[13],xmm12[14],xmm9[14],xmm12[15],xmm9[15]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,24,27,26,25,24,27,26,25,24,27,26,25,28,29,30,29]
+; AVX512BW-FCP-NEXT: vpermw %zmm2, %zmm3, %zmm2
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,25,24,27,26,25,24,27,26,25,24,27,26,29,28,31,30]
+; AVX512BW-FCP-NEXT: vpermw %zmm5, %zmm3, %zmm2 {%k1}
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm15[8],xmm10[8],xmm15[9],xmm10[9],xmm15[10],xmm10[10],xmm15[11],xmm10[11],xmm15[12],xmm10[12],xmm15[13],xmm10[13],xmm15[14],xmm10[14],xmm15[15],xmm10[15]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm1 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
; AVX512BW-FCP-NEXT: movl $1227133513, %ecx # imm = 0x49249249
-; AVX512BW-FCP-NEXT: kmovd %ecx, %k2
-; AVX512BW-FCP-NEXT: vpermw %zmm10, %zmm11, %zmm9 {%k2}
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15],ymm2[24],ymm4[24],ymm2[25],ymm4[25],ymm2[26],ymm4[26],ymm2[27],ymm4[27],ymm2[28],ymm4[28],ymm2[29],ymm4[29],ymm2[30],ymm4[30],ymm2[31],ymm4[31]
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm11 = ymm3[8],ymm5[8],ymm3[9],ymm5[9],ymm3[10],ymm5[10],ymm3[11],ymm5[11],ymm3[12],ymm5[12],ymm3[13],ymm5[13],ymm3[14],ymm5[14],ymm3[15],ymm5[15],ymm3[24],ymm5[24],ymm3[25],ymm5[25],ymm3[26],ymm5[26],ymm3[27],ymm5[27],ymm3[28],ymm5[28],ymm3[29],ymm5[29],ymm3[30],ymm5[30],ymm3[31],ymm5[31]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm12 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512BW-FCP-NEXT: vpermw %ymm11, %ymm12, %ymm11
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm12 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512BW-FCP-NEXT: vpermw %ymm10, %ymm12, %ymm11 {%k1}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm10
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm11 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-FCP-NEXT: vpshufb %ymm11, %ymm5, %ymm5
-; AVX512BW-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm3
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm4, %ymm4
-; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm2, %ymm2
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512BW-FCP-NEXT: vmovdqu16 %ymm3, %ymm2 {%k1}
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm10[4,5,6,7]
-; AVX512BW-FCP-NEXT: vpshufb %ymm8, %ymm1, %ymm3
-; AVX512BW-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm4
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm1 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
-; AVX512BW-FCP-NEXT: vpermw %ymm0, %ymm1, %ymm0
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512BW-FCP-NEXT: movl $-1840700270, %ecx # imm = 0x92492492
; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512BW-FCP-NEXT: vmovdqu16 %zmm0, %zmm2 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, 128(%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, 64(%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm6, (%rax)
+; AVX512BW-FCP-NEXT: vpermw %zmm0, %zmm1, %zmm2 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, 64(%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, (%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, 128(%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: store_i8_stride6_vf32:
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %ymm5
-; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %ymm4
-; AVX512DQ-BW-NEXT: vmovdqa (%r8), %ymm0
-; AVX512DQ-BW-NEXT: vmovdqa (%r9), %ymm1
-; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %xmm9
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} xmm6 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm9, %xmm7
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm10
-; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm10, %xmm6
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm6[0,0,0,1]
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm6 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %xmm11
-; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm11, %xmm8
-; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %xmm12
-; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm12, %xmm13
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm13[0],xmm8[0],xmm13[1],xmm8[1],xmm13[2],xmm8[2],xmm13[3],xmm8[3],xmm13[4],xmm8[4],xmm13[5],xmm8[5],xmm13[6],xmm8[6],xmm13[7],xmm8[7]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1]
-; AVX512DQ-BW-NEXT: movw $18724, %cx # imm = 0x4924
+; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm9
+; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %ymm10
+; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %ymm11
+; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %ymm12
+; AVX512DQ-BW-NEXT: vmovdqa (%r8), %ymm7
+; AVX512DQ-BW-NEXT: vmovdqa (%r9), %ymm8
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm9[0],ymm10[0],ymm9[1],ymm10[1],ymm9[2],ymm10[2],ymm9[3],ymm10[3],ymm9[4],ymm10[4],ymm9[5],ymm10[5],ymm9[6],ymm10[6],ymm9[7],ymm10[7],ymm9[16],ymm10[16],ymm9[17],ymm10[17],ymm9[18],ymm10[18],ymm9[19],ymm10[19],ymm9[20],ymm10[20],ymm9[21],ymm10[21],ymm9[22],ymm10[22],ymm9[23],ymm10[23]
+; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %xmm1
+; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm2
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm3 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,24,27,26,25,24,27,26,25,24,27,26,25,28,29,30,29]
+; AVX512DQ-BW-NEXT: vpermw %zmm0, %zmm3, %zmm0
+; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %xmm3
+; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %xmm4
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm6 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
+; AVX512DQ-BW-NEXT: vpermw %ymm5, %ymm6, %ymm5
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm6 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[4],ymm12[4],ymm11[5],ymm12[5],ymm11[6],ymm12[6],ymm11[7],ymm12[7],ymm11[16],ymm12[16],ymm11[17],ymm12[17],ymm11[18],ymm12[18],ymm11[19],ymm12[19],ymm11[20],ymm12[20],ymm11[21],ymm12[21],ymm11[22],ymm12[22],ymm11[23],ymm12[23]
+; AVX512DQ-BW-NEXT: vprold $16, %ymm6, %ymm6
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm6, %zmm5, %zmm5
+; AVX512DQ-BW-NEXT: movl $613566756, %ecx # imm = 0x24924924
; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu16 %ymm7, %ymm8 {%k1}
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm7
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm13 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
-; AVX512DQ-BW-NEXT: vpermw %ymm8, %ymm13, %ymm8
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512DQ-BW-NEXT: vprold $16, %xmm13, %xmm13
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
-; AVX512DQ-BW-NEXT: movw $9362, %cx # imm = 0x2492
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm5, %zmm0 {%k1}
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm13 = ymm7[0],ymm8[0],ymm7[1],ymm8[1],ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[4],ymm8[4],ymm7[5],ymm8[5],ymm7[6],ymm8[6],ymm7[7],ymm8[7],ymm7[16],ymm8[16],ymm7[17],ymm8[17],ymm7[18],ymm8[18],ymm7[19],ymm8[19],ymm7[20],ymm8[20],ymm7[21],ymm8[21],ymm7[22],ymm8[22],ymm7[23],ymm8[23]
+; AVX512DQ-BW-NEXT: vmovdqa (%r9), %xmm5
+; AVX512DQ-BW-NEXT: vmovdqa (%r8), %xmm6
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm14 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm13, %zmm14, %zmm13
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm14 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
+; AVX512DQ-BW-NEXT: movl $1227133513, %ecx # imm = 0x49249249
; AVX512DQ-BW-NEXT: kmovd %ecx, %k2
-; AVX512DQ-BW-NEXT: vmovdqu16 %ymm13, %ymm8 {%k2}
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm8[0,1,2,3],zmm7[4,5,6,7]
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm8 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512DQ-BW-NEXT: vmovdqa (%r9), %xmm13
-; AVX512DQ-BW-NEXT: vpshufb %xmm8, %xmm13, %xmm14
-; AVX512DQ-BW-NEXT: vmovdqa (%r8), %xmm15
-; AVX512DQ-BW-NEXT: vpshufb %xmm8, %xmm15, %xmm16
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm16[0],xmm14[0],xmm16[1],xmm14[1],xmm16[2],xmm14[2],xmm16[3],xmm14[3],xmm16[4],xmm14[4],xmm16[5],xmm14[5],xmm16[6],xmm14[6],xmm16[7],xmm14[7]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm16 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3],xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm17 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
-; AVX512DQ-BW-NEXT: vpermw %ymm16, %ymm17, %ymm16
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm14, %zmm16, %zmm14
-; AVX512DQ-BW-NEXT: movl $613566756, %ecx # imm = 0x24924924
-; AVX512DQ-BW-NEXT: kmovd %ecx, %k3
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm14, %zmm7 {%k3}
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm14 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm16 = [8,11,10,9,8,11,10,9,8,11,10,9,12,13,14,13]
-; AVX512DQ-BW-NEXT: vpermw %ymm14, %ymm16, %ymm14
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm16 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512DQ-BW-NEXT: vprold $16, %ymm16, %ymm16
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm16 = ymm16[2,2,2,3]
-; AVX512DQ-BW-NEXT: vmovdqu16 %ymm16, %ymm14 {%k2}
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm14, %zmm0, %zmm14
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm10 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
+; AVX512DQ-BW-NEXT: vpermw %zmm13, %zmm14, %zmm0 {%k2}
+; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm13 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512DQ-BW-NEXT: vpshufb %ymm13, %ymm10, %ymm14
+; AVX512DQ-BW-NEXT: vpshufb %ymm13, %ymm9, %ymm13
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm13 = ymm13[0],ymm14[0],ymm13[1],ymm14[1],ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[4],ymm14[4],ymm13[5],ymm14[5],ymm13[6],ymm14[6],ymm13[7],ymm14[7],ymm13[16],ymm14[16],ymm13[17],ymm14[17],ymm13[18],ymm14[18],ymm13[19],ymm14[19],ymm13[20],ymm14[20],ymm13[21],ymm14[21],ymm13[22],ymm14[22],ymm13[23],ymm14[23]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm14 = ymm11[8],ymm12[8],ymm11[9],ymm12[9],ymm11[10],ymm12[10],ymm11[11],ymm12[11],ymm11[12],ymm12[12],ymm11[13],ymm12[13],ymm11[14],ymm12[14],ymm11[15],ymm12[15],ymm11[24],ymm12[24],ymm11[25],ymm12[25],ymm11[26],ymm12[26],ymm11[27],ymm12[27],ymm11[28],ymm12[28],ymm11[29],ymm12[29],ymm11[30],ymm12[30],ymm11[31],ymm12[31]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm15 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512DQ-BW-NEXT: vpermw %ymm14, %ymm15, %ymm14
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm14, %zmm13, %zmm13
+; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm14 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512DQ-BW-NEXT: vpshufb %ymm14, %ymm12, %ymm12
+; AVX512DQ-BW-NEXT: vpshufb %ymm14, %ymm11, %ymm11
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm11 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[4],ymm12[4],ymm11[5],ymm12[5],ymm11[6],ymm12[6],ymm11[7],ymm12[7],ymm11[16],ymm12[16],ymm11[17],ymm12[17],ymm11[18],ymm12[18],ymm11[19],ymm12[19],ymm11[20],ymm12[20],ymm11[21],ymm12[21],ymm11[22],ymm12[22],ymm11[23],ymm12[23]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm9 = ymm9[8],ymm10[8],ymm9[9],ymm10[9],ymm9[10],ymm10[10],ymm9[11],ymm10[11],ymm9[12],ymm10[12],ymm9[13],ymm10[13],ymm9[14],ymm10[14],ymm9[15],ymm10[15],ymm9[24],ymm10[24],ymm9[25],ymm10[25],ymm9[26],ymm10[26],ymm9[27],ymm10[27],ymm9[28],ymm10[28],ymm9[29],ymm10[29],ymm9[30],ymm10[30],ymm9[31],ymm10[31]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm10 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
; AVX512DQ-BW-NEXT: vpermw %ymm9, %ymm10, %ymm9
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm10 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
-; AVX512DQ-BW-NEXT: vpermw %ymm11, %ymm10, %ymm9 {%k1}
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm9[0,1,2,3],zmm14[4,5,6,7]
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm10 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm11 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
-; AVX512DQ-BW-NEXT: movl $1227133513, %ecx # imm = 0x49249249
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm9, %zmm11, %zmm9
+; AVX512DQ-BW-NEXT: movl $1227114788, %ecx # imm = 0x49244924
; AVX512DQ-BW-NEXT: kmovd %ecx, %k2
-; AVX512DQ-BW-NEXT: vpermw %zmm10, %zmm11, %zmm9 {%k2}
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15],ymm2[24],ymm4[24],ymm2[25],ymm4[25],ymm2[26],ymm4[26],ymm2[27],ymm4[27],ymm2[28],ymm4[28],ymm2[29],ymm4[29],ymm2[30],ymm4[30],ymm2[31],ymm4[31]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm11 = ymm3[8],ymm5[8],ymm3[9],ymm5[9],ymm3[10],ymm5[10],ymm3[11],ymm5[11],ymm3[12],ymm5[12],ymm3[13],ymm5[13],ymm3[14],ymm5[14],ymm3[15],ymm5[15],ymm3[24],ymm5[24],ymm3[25],ymm5[25],ymm3[26],ymm5[26],ymm3[27],ymm5[27],ymm3[28],ymm5[28],ymm3[29],ymm5[29],ymm3[30],ymm5[30],ymm3[31],ymm5[31]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm12 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512DQ-BW-NEXT: vpermw %ymm11, %ymm12, %ymm11
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm12 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512DQ-BW-NEXT: vpermw %ymm10, %ymm12, %ymm11 {%k1}
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm10
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm11 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm5, %ymm5
-; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm3, %ymm3
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512DQ-BW-NEXT: vpshufb %ymm6, %ymm4, %ymm4
-; AVX512DQ-BW-NEXT: vpshufb %ymm6, %ymm2, %ymm2
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512DQ-BW-NEXT: vmovdqu16 %ymm3, %ymm2 {%k1}
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm10[4,5,6,7]
-; AVX512DQ-BW-NEXT: vpshufb %ymm8, %ymm1, %ymm3
-; AVX512DQ-BW-NEXT: vpshufb %ymm8, %ymm0, %ymm4
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
-; AVX512DQ-BW-NEXT: vpermw %ymm0, %ymm1, %ymm0
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm13, %zmm9 {%k2}
+; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm10 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
+; AVX512DQ-BW-NEXT: vpshufb %ymm10, %ymm8, %ymm11
+; AVX512DQ-BW-NEXT: vpshufb %ymm10, %ymm7, %ymm12
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm11 = ymm12[0],ymm11[0],ymm12[1],ymm11[1],ymm12[2],ymm11[2],ymm12[3],ymm11[3],ymm12[4],ymm11[4],ymm12[5],ymm11[5],ymm12[6],ymm11[6],ymm12[7],ymm11[7],ymm12[16],ymm11[16],ymm12[17],ymm11[17],ymm12[18],ymm11[18],ymm12[19],ymm11[19],ymm12[20],ymm11[20],ymm12[21],ymm11[21],ymm12[22],ymm11[22],ymm12[23],ymm11[23]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm7 = ymm7[8],ymm8[8],ymm7[9],ymm8[9],ymm7[10],ymm8[10],ymm7[11],ymm8[11],ymm7[12],ymm8[12],ymm7[13],ymm8[13],ymm7[14],ymm8[14],ymm7[15],ymm8[15],ymm7[24],ymm8[24],ymm7[25],ymm8[25],ymm7[26],ymm8[26],ymm7[27],ymm8[27],ymm7[28],ymm8[28],ymm7[29],ymm8[29],ymm7[30],ymm8[30],ymm7[31],ymm8[31]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm8 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
+; AVX512DQ-BW-NEXT: vpermw %ymm7, %ymm8, %ymm7
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm7, %zmm11, %zmm7
; AVX512DQ-BW-NEXT: movl $-1840700270, %ecx # imm = 0x92492492
-; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm0, %zmm2 {%k1}
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, 128(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, 64(%rax)
+; AVX512DQ-BW-NEXT: kmovd %ecx, %k2
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm7, %zmm9 {%k2}
+; AVX512DQ-BW-NEXT: vpshufb %xmm14, %xmm3, %xmm7
+; AVX512DQ-BW-NEXT: vpshufb %xmm14, %xmm4, %xmm8
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,0,0,1]
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm11 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
+; AVX512DQ-BW-NEXT: vpermw %ymm8, %ymm11, %ymm8
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
+; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} xmm8 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512DQ-BW-NEXT: vpshufb %xmm8, %xmm1, %xmm1
+; AVX512DQ-BW-NEXT: vpshufb %xmm8, %xmm2, %xmm2
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; AVX512DQ-BW-NEXT: vprold $16, %xmm2, %xmm2
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,0,0,1,4,4,4,5]
+; AVX512DQ-BW-NEXT: movl $1227105426, %ecx # imm = 0x49242492
+; AVX512DQ-BW-NEXT: kmovd %ecx, %k2
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm1, %zmm7 {%k2}
+; AVX512DQ-BW-NEXT: vpshufb %xmm10, %xmm5, %xmm1
+; AVX512DQ-BW-NEXT: vpshufb %xmm10, %xmm6, %xmm2
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm3 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
+; AVX512DQ-BW-NEXT: vpermw %ymm2, %ymm3, %ymm2
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm1, %zmm7 {%k1}
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm7, (%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, 128(%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, 64(%rax)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: store_i8_stride6_vf32:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm5
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %ymm5
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %ymm6
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r8), %ymm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r9), %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %xmm9
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %xmm10
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %xmm11
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm4 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm7
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm7[0],ymm4[1],ymm7[1],ymm4[2],ymm7[2],ymm4[3],ymm7[3],ymm4[4],ymm7[4],ymm4[5],ymm7[5],ymm4[6],ymm7[6],ymm4[7],ymm7[7],ymm4[16],ymm7[16],ymm4[17],ymm7[17],ymm4[18],ymm7[18],ymm4[19],ymm7[19],ymm4[20],ymm7[20],ymm4[21],ymm7[21],ymm4[22],ymm7[22],ymm4[23],ymm7[23]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm7 = ymm5[8],ymm6[8],ymm5[9],ymm6[9],ymm5[10],ymm6[10],ymm5[11],ymm6[11],ymm5[12],ymm6[12],ymm5[13],ymm6[13],ymm5[14],ymm6[14],ymm5[15],ymm6[15],ymm5[24],ymm6[24],ymm5[25],ymm6[25],ymm5[26],ymm6[26],ymm5[27],ymm6[27],ymm5[28],ymm6[28],ymm5[29],ymm6[29],ymm5[30],ymm6[30],ymm5[31],ymm6[31]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm7, %ymm8, %ymm7
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm4, %zmm7
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm8 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm8, %ymm6, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm9
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm9[0],ymm4[0],ymm9[1],ymm4[1],ymm9[2],ymm4[2],ymm9[3],ymm4[3],ymm9[4],ymm4[4],ymm9[5],ymm4[5],ymm9[6],ymm4[6],ymm9[7],ymm4[7],ymm9[16],ymm4[16],ymm9[17],ymm4[17],ymm9[18],ymm4[18],ymm9[19],ymm4[19],ymm9[20],ymm4[20],ymm9[21],ymm4[21],ymm9[22],ymm4[22],ymm9[23],ymm4[23]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm9 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm10 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm9, %ymm10, %ymm9
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm4, %zmm4
+; AVX512DQ-BW-FCP-NEXT: movl $1227114788, %r10d # imm = 0x49244924
+; AVX512DQ-BW-FCP-NEXT: kmovd %r10d, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm7, %zmm4 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm9
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm0, %ymm10
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm9 = ymm10[0],ymm9[0],ymm10[1],ymm9[1],ymm10[2],ymm9[2],ymm10[3],ymm9[3],ymm10[4],ymm9[4],ymm10[5],ymm9[5],ymm10[6],ymm9[6],ymm10[7],ymm9[7],ymm10[16],ymm9[16],ymm10[17],ymm9[17],ymm10[18],ymm9[18],ymm10[19],ymm9[19],ymm10[20],ymm9[20],ymm10[21],ymm9[21],ymm10[22],ymm9[22],ymm10[23],ymm9[23]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm10, %ymm11, %ymm10
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512DQ-BW-FCP-NEXT: movl $-1840700270, %r10d # imm = 0x92492492
+; AVX512DQ-BW-FCP-NEXT: kmovd %r10d, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm9, %zmm4 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %xmm9
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm10 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm10, %xmm9, %xmm11
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm12
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm7, %ymm8, %ymm8
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm7 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6]
-; AVX512DQ-BW-FCP-NEXT: movw $9362, %cx # imm = 0x2492
-; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k2
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm6, %ymm7, %ymm8 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm6 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm6, %xmm11, %xmm7
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm6, %xmm12, %xmm6
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm13
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm10, %xmm14
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
-; AVX512DQ-BW-FCP-NEXT: movw $18724, %cx # imm = 0x4924
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm10, %xmm12, %xmm10
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm10[8],xmm11[8],xmm10[9],xmm11[9],xmm10[10],xmm11[10],xmm10[11],xmm11[11],xmm10[12],xmm11[12],xmm10[13],xmm11[13],xmm10[14],xmm11[14],xmm10[15],xmm11[15]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,0,1]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %xmm11
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %xmm13
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3],xmm13[4],xmm11[4],xmm13[5],xmm11[5],xmm13[6],xmm11[6],xmm13[7],xmm11[7]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm15 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm14, %ymm15, %ymm14
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm14, %zmm10
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm11, %xmm14
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm13, %xmm8
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3],xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm12[0],xmm9[0],xmm12[1],xmm9[1],xmm12[2],xmm9[2],xmm12[3],xmm9[3],xmm12[4],xmm9[4],xmm12[5],xmm9[5],xmm12[6],xmm9[6],xmm12[7],xmm9[7]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm15 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm14, %ymm15, %ymm14
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm14, %zmm8
+; AVX512DQ-BW-FCP-NEXT: movl $1227105426, %ecx # imm = 0x49242492
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm6, %ymm13 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm0, %zmm6
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm8[0,1,2,3],zmm6[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm8 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r9), %xmm13
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm13, %xmm14
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm10, %zmm8 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r9), %xmm10
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm10, %xmm14
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r8), %xmm15
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm15, %xmm16
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm16[0],xmm14[0],xmm16[1],xmm14[1],xmm16[2],xmm14[2],xmm16[3],xmm14[3],xmm16[4],xmm14[4],xmm16[5],xmm14[5],xmm16[6],xmm14[6],xmm16[7],xmm14[7]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm16 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3],xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm17 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm16, %ymm17, %ymm16
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm14, %zmm16, %zmm14
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm15, %xmm7
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3],xmm7[4],xmm14[4],xmm7[5],xmm14[5],xmm7[6],xmm14[6],xmm7[7],xmm14[7]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,0,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm15[0],xmm10[0],xmm15[1],xmm10[1],xmm15[2],xmm10[2],xmm15[3],xmm10[3],xmm15[4],xmm10[4],xmm15[5],xmm10[5],xmm15[6],xmm10[6],xmm15[7],xmm10[7]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm16 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm14, %ymm16, %ymm14
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm14, %zmm7
; AVX512DQ-BW-FCP-NEXT: movl $613566756, %ecx # imm = 0x24924924
-; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm14, %zmm6 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm14 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm16 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm17 = [8,11,10,9,8,11,10,9,8,11,10,9,12,13,14,13]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm16, %ymm17, %ymm16
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm17 = [9,8,11,10,9,8,11,10,9,8,11,10,13,12,15,14]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm14, %ymm17, %ymm16 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm16, %zmm0, %zmm14
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm10, %ymm11, %ymm10
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm9, %ymm11, %ymm10 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm10[0,1,2,3],zmm14[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm10 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm11 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
+; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm7, %zmm8 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[2],ymm6[2],ymm5[3],ymm6[3],ymm5[4],ymm6[4],ymm5[5],ymm6[5],ymm5[6],ymm6[6],ymm5[7],ymm6[7],ymm5[16],ymm6[16],ymm5[17],ymm6[17],ymm5[18],ymm6[18],ymm5[19],ymm6[19],ymm5[20],ymm6[20],ymm5[21],ymm6[21],ymm5[22],ymm6[22],ymm5[23],ymm6[23]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm13[8],xmm11[8],xmm13[9],xmm11[9],xmm13[10],xmm11[10],xmm13[11],xmm11[11],xmm13[12],xmm11[12],xmm13[13],xmm11[13],xmm13[14],xmm11[14],xmm13[15],xmm11[15]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm6, %zmm5
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm12[8],xmm9[8],xmm12[9],xmm9[9],xmm12[10],xmm9[10],xmm12[11],xmm9[11],xmm12[12],xmm9[12],xmm12[13],xmm9[13],xmm12[14],xmm9[14],xmm12[15],xmm9[15]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,24,27,26,25,24,27,26,25,24,27,26,25,28,29,30,29]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm2, %zmm3, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,25,24,27,26,25,24,27,26,25,24,27,26,29,28,31,30]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm5, %zmm3, %zmm2 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm15[8],xmm10[8],xmm15[9],xmm10[9],xmm15[10],xmm10[10],xmm15[11],xmm10[11],xmm15[12],xmm10[12],xmm15[13],xmm10[13],xmm15[14],xmm10[14],xmm15[15],xmm10[15]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm1 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
; AVX512DQ-BW-FCP-NEXT: movl $1227133513, %ecx # imm = 0x49249249
-; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k2
-; AVX512DQ-BW-FCP-NEXT: vpermw %zmm10, %zmm11, %zmm9 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15],ymm2[24],ymm4[24],ymm2[25],ymm4[25],ymm2[26],ymm4[26],ymm2[27],ymm4[27],ymm2[28],ymm4[28],ymm2[29],ymm4[29],ymm2[30],ymm4[30],ymm2[31],ymm4[31]
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm11 = ymm3[8],ymm5[8],ymm3[9],ymm5[9],ymm3[10],ymm5[10],ymm3[11],ymm5[11],ymm3[12],ymm5[12],ymm3[13],ymm5[13],ymm3[14],ymm5[14],ymm3[15],ymm5[15],ymm3[24],ymm5[24],ymm3[25],ymm5[25],ymm3[26],ymm5[26],ymm3[27],ymm5[27],ymm3[28],ymm5[28],ymm3[29],ymm5[29],ymm3[30],ymm5[30],ymm3[31],ymm5[31]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm12 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm11, %ymm12, %ymm11
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm12 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm10, %ymm12, %ymm11 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm10
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm11 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm11, %ymm5, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm4, %ymm4
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm2, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm3, %ymm2 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm10[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm8, %ymm1, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm4
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm1 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm0, %ymm1, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512DQ-BW-FCP-NEXT: movl $-1840700270, %ecx # imm = 0x92492492
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm0, %zmm2 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, 128(%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, 64(%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm6, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm0, %zmm1, %zmm2 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, 64(%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, 128(%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <32 x i8>, ptr %in.vecptr0, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
index 0495e24..8b6ba51 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
@@ -3689,10 +3689,9 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vmovdqa {{.*#+}} ymm9 = [255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u]
; AVX2-NEXT: vpblendvb %ymm9, %ymm0, %ymm8, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm8 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm7[27],zero,ymm7[27,28,29,30],zero,ymm7[28],zero,ymm7[26,27,30,31],zero,ymm7[29]
-; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm2[27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
; AVX2-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm9 = [255,0,0,u,255,255,255,255,0,0,u,255,255,255,255,0,0,u,255,255,255,255,0,0,u,255,255,255,255,0,0,u]
; AVX2-NEXT: vpblendvb %ymm9, %ymm0, %ymm8, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
@@ -3772,18 +3771,16 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vpblendvb %ymm7, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm5[18],zero,zero,zero,zero,ymm5[21],zero,ymm5[19],zero,zero,zero,zero,ymm5[22],zero,ymm5[20]
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm7 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22],zero,ymm3[20],zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-NEXT: vpor %ymm1, %ymm7, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u]
; AVX2-NEXT: vpblendvb %ymm7, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[20],zero,ymm2[18],zero,zero,zero,zero,ymm2[21],zero,ymm2[19],zero,zero,zero,zero,ymm2[22]
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
; AVX2-NEXT: vpshufb {{.*#+}} ymm7 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm12[18],zero,zero,zero,zero,ymm12[21],zero,ymm12[19],zero,zero,zero,zero,ymm12[22],zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-NEXT: vpor %ymm1, %ymm7, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-NEXT: vpshuflw {{.*#+}} ymm7 = ymm11[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
; AVX2-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[0,1,1,3,4,5,5,7]
@@ -3793,23 +3790,20 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0]
; AVX2-NEXT: vpblendvb %ymm7, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[23],zero,ymm4[27,20,21,26],zero,ymm4[24],zero,ymm4[26,27,26,27],zero,ymm4[25]
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero,ymm6[27],zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-NEXT: vpor %ymm1, %ymm7, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm7 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm5[25],zero,ymm5[23],zero,zero,zero,zero,ymm5[26],zero,ymm5[24],zero,zero,zero,zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27]
-; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
; AVX2-NEXT: vpor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm8 = [u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0]
; AVX2-NEXT: vpblendvb %ymm8, %ymm1, %ymm7, %ymm1
; AVX2-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm8 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm12[25],zero,ymm12[23],zero,zero,zero,zero,ymm12[26],zero,ymm12[24],zero,zero,zero
; AVX2-NEXT: vmovdqa %ymm12, %ymm13
-; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
; AVX2-NEXT: vpor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm8 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
; AVX2-NEXT: vmovdqa %ymm11, %ymm12
; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
@@ -3919,22 +3913,19 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255]
; AVX2-FP-NEXT: vpblendvb %ymm11, %ymm9, %ymm10, %ymm9
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm10 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[17,18,19,30],zero,ymm0[28],zero,ymm0[28,29,30,31],zero,ymm0[29],zero,ymm0[31]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm10, %ymm11, %ymm10
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm3[30],zero,ymm3[28],zero,zero,zero,zero,ymm3[31],zero,ymm3[29],zero,zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm12 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm11, %ymm12, %ymm11
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u]
; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm10, %ymm11, %ymm10
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm11 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,ymm6[27,28,29,30],zero,ymm6[28],zero,ymm6[26,27,30,31],zero,ymm6[29]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm12 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm8[27],zero,zero,zero,zero,ymm8[30],zero,ymm8[28],zero,zero,zero,zero,ymm8[31],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm11, %ymm12, %ymm11
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [255,0,0,u,255,255,255,255,0,0,u,255,255,255,255,0,0,u,255,255,255,255,0,0,u,255,255,255,255,0,0,u]
; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm10, %ymm11, %ymm10
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm11 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
@@ -3942,22 +3933,19 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm10, %ymm11, %ymm10
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm11 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[23],zero,ymm1[27,20,21,26],zero,ymm1[24],zero,ymm1[26,27,26,27],zero,ymm1[25]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm12 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm11, %ymm12, %ymm11
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm12 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm13 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[25],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero,zero,zero,ymm0[27]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm12, %ymm13, %ymm12
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm13 = [u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0]
; AVX2-FP-NEXT: vpblendvb %ymm13, %ymm11, %ymm12, %ymm11
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm12 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm8[25],zero,ymm8[23],zero,zero,zero,zero,ymm8[26],zero,ymm8[24],zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm13 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm12, %ymm13, %ymm12
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm13 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm14 = [0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u]
@@ -3965,22 +3953,19 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm13 = [0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
; AVX2-FP-NEXT: vpblendvb %ymm13, %ymm11, %ymm12, %ymm11
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm12 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[18],zero,zero,zero,zero,ymm2[21],zero,ymm2[19],zero,zero,zero,zero,ymm2[22],zero,ymm2[20]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm13 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm12, %ymm13, %ymm12
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm13 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22],zero,ymm3[20],zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm14 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[21],zero,ymm1[19],zero,zero,zero,zero,ymm1[22],zero,ymm1[20],zero,zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm13, %ymm14, %ymm13
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm14 = [255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u]
; AVX2-FP-NEXT: vpblendvb %ymm14, %ymm12, %ymm13, %ymm12
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm13 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm8[20],zero,ymm8[18],zero,zero,zero,zero,ymm8[21],zero,ymm8[19],zero,zero,zero,zero,ymm8[22]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm14 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm13, %ymm14, %ymm13
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm14 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,18,19,18,19,20,21,18,19,20,21,28,29,30,31]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,2]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm15 = [u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255]
@@ -4089,46 +4074,40 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255]
; AVX2-FCP-NEXT: vpblendvb %ymm11, %ymm9, %ymm10, %ymm9
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[18],zero,zero,zero,zero,ymm2[21],zero,ymm2[19],zero,zero,zero,zero,ymm2[22],zero,ymm2[20]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm10, %ymm11, %ymm10
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22],zero,ymm3[20],zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[21],zero,ymm1[19],zero,zero,zero,zero,ymm1[22],zero,ymm1[20],zero,zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm11, %ymm12, %ymm11
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u]
; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm10, %ymm11, %ymm10
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} ymm11 = ymm7[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [4,5,4,5,5,7,4,5]
; AVX2-FCP-NEXT: vpermd %ymm11, %ymm12, %ymm11
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm8[20],zero,ymm8[18],zero,zero,zero,zero,ymm8[21],zero,ymm8[19],zero,zero,zero,zero,ymm8[22]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm12, %ymm13, %ymm12
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255]
; AVX2-FCP-NEXT: vpblendvb %ymm13, %ymm12, %ymm11, %ymm11
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0]
; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm10, %ymm11, %ymm10
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[17,18,19,30],zero,ymm0[28],zero,ymm0[28,29,30,31],zero,ymm0[29],zero,ymm0[31]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm11, %ymm12, %ymm11
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm3[30],zero,ymm3[28],zero,zero,zero,zero,ymm3[31],zero,ymm3[29],zero,zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm12, %ymm13, %ymm12
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u]
; AVX2-FCP-NEXT: vpblendvb %ymm13, %ymm11, %ymm12, %ymm11
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,ymm6[27,28,29,30],zero,ymm6[28],zero,ymm6[26,27,30,31],zero,ymm6[29]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm8[27],zero,zero,zero,zero,ymm8[30],zero,ymm8[28],zero,zero,zero,zero,ymm8[31],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm12, %ymm13, %ymm12
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [255,0,0,u,255,255,255,255,0,0,u,255,255,255,255,0,0,u,255,255,255,255,0,0,u,255,255,255,255,0,0,u]
; AVX2-FCP-NEXT: vpblendvb %ymm13, %ymm11, %ymm12, %ymm11
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
@@ -4136,22 +4115,19 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
; AVX2-FCP-NEXT: vpblendvb %ymm13, %ymm11, %ymm12, %ymm11
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[23],zero,ymm1[27,20,21,26],zero,ymm1[24],zero,ymm1[26,27,26,27],zero,ymm1[25]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm12, %ymm13, %ymm12
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[25],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero,zero,zero,ymm0[27]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm13, %ymm14, %ymm13
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0]
; AVX2-FCP-NEXT: vpblendvb %ymm14, %ymm12, %ymm13, %ymm12
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm8[25],zero,ymm8[23],zero,zero,zero,zero,ymm8[26],zero,ymm8[24],zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm13, %ymm14, %ymm13
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u]
@@ -4189,153 +4165,138 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512: # %bb.0:
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512-NEXT: vmovdqa (%rsi), %ymm1
-; AVX512-NEXT: vmovdqa (%rdx), %ymm11
-; AVX512-NEXT: vmovdqa (%rcx), %ymm3
-; AVX512-NEXT: vmovdqa (%r8), %ymm5
-; AVX512-NEXT: vmovdqa (%r9), %ymm6
-; AVX512-NEXT: vmovdqa (%r10), %ymm4
-; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,u,u,u,u,26,27,24,25]
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm8 = ymm4[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm9 = [0,5,4,0,5,0,4,0,20,21,0,23,0,21,0,23]
-; AVX512-NEXT: vpermi2d %zmm7, %zmm8, %zmm9
-; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[20],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero
+; AVX512-NEXT: vmovdqa (%rdi), %ymm3
+; AVX512-NEXT: vmovdqa (%rsi), %ymm4
+; AVX512-NEXT: vmovdqa (%rdx), %ymm5
+; AVX512-NEXT: vmovdqa (%rcx), %ymm6
+; AVX512-NEXT: vmovdqa (%r8), %ymm1
+; AVX512-NEXT: vmovdqa (%r9), %ymm2
+; AVX512-NEXT: vmovdqa (%r10), %ymm0
+; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27],zero,ymm3[25]
; AVX512-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7
; AVX512-NEXT: vpermq {{.*#+}} zmm7 = zmm7[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm5[18],zero,ymm5[20,21,20,21],zero,ymm5[19],zero,ymm5[19,20,21,22],zero
-; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm5[23],zero,ymm5[23,24,25,26],zero,ymm5[24],zero,ymm5[30,31]
-; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm5[18,19,20,21],zero,ymm5[19],zero,ymm5[25,26,27,22],zero,ymm5[20],zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm4[23,u,u,u],zero,ymm4[26],zero,ymm4[24,u,u,u],zero,ymm4[27],zero
+; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm8, %zmm8
; AVX512-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,3,2,3,6,7,6,7]
; AVX512-NEXT: vporq %zmm7, %zmm8, %zmm7
-; AVX512-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm7
-; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
-; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm16 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512-NEXT: # ymm16 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT: vpandq %ymm16, %ymm8, %ymm8
-; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm11[18,19,20,21],zero,ymm11[19],zero,ymm11[25,26,27,22],zero,ymm11[20],zero
-; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512-NEXT: vinserti64x4 $1, %ymm8, %zmm9, %zmm8
-; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[18],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22],zero,ymm3[20]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero,ymm2[25]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[21],zero,ymm4[19],zero,zero,zero,zero,ymm4[22],zero,ymm4[20],zero,zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero
+; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm8, %zmm8
+; AVX512-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,ymm3[19],zero,ymm3[21,20,21,22],zero,ymm3[20],zero,ymm3[22,23]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm5[23],zero,ymm5[21,22,23,26],zero,ymm5[24],zero,ymm5[28,29,26,27]
; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
; AVX512-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vporq %zmm9, %zmm8, %zmm9
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm8 = ymm2[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[0,0,1,1,4,4,5,5]
-; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm17 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
-; AVX512-NEXT: # ymm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT: vpandq %ymm17, %ymm8, %ymm8
-; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm11[23],zero,ymm11[21,22,23,26],zero,ymm11[24],zero,ymm11[28,29,26,27]
-; AVX512-NEXT: vmovdqa64 %ymm11, %ymm20
-; AVX512-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm8, %zmm8
-; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[21],zero,ymm1[19],zero,zero,zero,zero,ymm1[22],zero,ymm1[20],zero,zero
-; AVX512-NEXT: vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero
-; AVX512-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
-; AVX512-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vporq %zmm10, %zmm8, %zmm8
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm8
-; AVX512-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm8
-; AVX512-NEXT: vmovdqa (%rsi), %xmm11
-; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm11[u],zero,xmm11[7],zero,xmm11[5,u,u,u],zero,xmm11[8],zero,xmm11[6,u,u,u],zero
-; AVX512-NEXT: vmovdqa (%rdi), %xmm12
-; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm12[u,7],zero,xmm12[5],zero,xmm12[u,u,u,8],zero,xmm12[6],zero,xmm12[u,u,u,9]
-; AVX512-NEXT: vpor %xmm7, %xmm9, %xmm7
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512-NEXT: vinserti32x4 $2, %xmm7, %zmm9, %zmm7
-; AVX512-NEXT: vpermq {{.*#+}} zmm10 = zmm7[0,1,0,1,4,5,4,5]
-; AVX512-NEXT: vmovdqa (%rcx), %xmm7
-; AVX512-NEXT: vpshufb {{.*#+}} xmm13 = xmm7[u,u,u],zero,xmm7[7],zero,xmm7[5,u,u,u],zero,xmm7[8],zero,xmm7[6,u,u]
-; AVX512-NEXT: vmovdqa (%rdx), %xmm9
-; AVX512-NEXT: vpshufb {{.*#+}} xmm14 = xmm9[u,u,u,7],zero,xmm9[5],zero,xmm9[u,u,u,8],zero,xmm9[6],zero,xmm9[u,u]
-; AVX512-NEXT: vpor %xmm13, %xmm14, %xmm13
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3],xmm9[4],xmm7[4],xmm9[5],xmm7[5],xmm9[6],xmm7[6],xmm9[7],xmm7[7]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm14 = xmm14[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512-NEXT: vinserti32x4 $2, %xmm13, %zmm14, %zmm13
-; AVX512-NEXT: vpermq {{.*#+}} zmm18 = zmm13[0,1,0,1,4,5,4,5]
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm18
-; AVX512-NEXT: vmovdqa (%r9), %xmm13
-; AVX512-NEXT: vpshufb {{.*#+}} xmm10 = zero,xmm13[4,u,u,u],zero,xmm13[7],zero,xmm13[5,u,u,u],zero,xmm13[8],zero,xmm13[6]
-; AVX512-NEXT: vmovdqa (%r8), %xmm14
-; AVX512-NEXT: vpshufb {{.*#+}} xmm15 = xmm14[4],zero,xmm14[u,u,u,7],zero,xmm14[5],zero,xmm14[u,u,u,8],zero,xmm14[6],zero
-; AVX512-NEXT: vpor %xmm10, %xmm15, %xmm10
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm15 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm15 = xmm15[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512-NEXT: vinserti32x4 $2, %xmm10, %zmm15, %zmm10
-; AVX512-NEXT: vpermq {{.*#+}} zmm19 = zmm10[0,1,0,1,4,5,4,5]
-; AVX512-NEXT: vmovdqa (%r10), %xmm15
-; AVX512-NEXT: vpshufb {{.*#+}} xmm10 = xmm15[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512-NEXT: vpshuflw {{.*#+}} xmm0 = xmm15[1,1,0,0,4,5,6,7]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
-; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0
-; AVX512-NEXT: vpermq {{.*#+}} zmm10 = zmm0[0,0,1,0,4,4,5,4]
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm19, %zmm10
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm10
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm1[14,u,u],zero,zero,zero,zero,ymm1[15,u,u],zero,zero,zero,zero,ymm1[16,u,u],zero,zero,zero,zero,ymm1[17,u,u],zero,zero,zero,zero,ymm1[18]
+; AVX512-NEXT: vporq %zmm8, %zmm9, %zmm8
+; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm8
+; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,u,u,u,u,26,27,24,25]
+; AVX512-NEXT: vpshuflw {{.*#+}} ymm9 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512-NEXT: vmovdqa64 %ymm0, %ymm18
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm10 = [0,5,4,0,5,0,4,0,20,21,0,23,0,21,0,23]
+; AVX512-NEXT: vpermi2d %zmm7, %zmm9, %zmm10
+; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[20],zero,ymm2[18],zero,zero,zero,zero,ymm2[21],zero,ymm2[19],zero,zero,zero,zero,ymm2[22]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
+; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512-NEXT: vpermq {{.*#+}} zmm7 = zmm7[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm1[18],zero,ymm1[20,21,20,21],zero,ymm1[19],zero,ymm1[19,20,21,22],zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm11 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm1[23],zero,ymm1[23,24,25,26],zero,ymm1[24],zero,ymm1[30,31]
; AVX512-NEXT: vmovdqa64 %ymm1, %ymm19
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[0,1,14],zero,ymm2[u,u,0,1,14,15],zero,ymm2[u,u,13,2,3,16],zero,ymm2[u,u,28,29,16,17],zero,ymm2[u,u,19,28,29,18],zero
-; AVX512-NEXT: vmovdqa64 %ymm2, %ymm18
+; AVX512-NEXT: vinserti64x4 $1, %ymm11, %zmm9, %zmm9
+; AVX512-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vporq %zmm7, %zmm9, %zmm7
+; AVX512-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm7
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm7
+; AVX512-NEXT: vmovdqa (%rsi), %xmm9
+; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm9[u],zero,xmm9[7],zero,xmm9[5,u,u,u],zero,xmm9[8],zero,xmm9[6,u,u,u],zero
+; AVX512-NEXT: vmovdqa (%rdi), %xmm10
+; AVX512-NEXT: vpshufb {{.*#+}} xmm11 = xmm10[u,7],zero,xmm10[5],zero,xmm10[u,u,u,8],zero,xmm10[6],zero,xmm10[u,u,u,9]
+; AVX512-NEXT: vpor %xmm8, %xmm11, %xmm8
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512-NEXT: vinserti32x4 $2, %xmm8, %zmm11, %zmm8
+; AVX512-NEXT: vpermq {{.*#+}} zmm8 = zmm8[0,1,0,1,4,5,4,5]
+; AVX512-NEXT: vmovdqa (%rcx), %xmm14
+; AVX512-NEXT: vpshufb {{.*#+}} xmm11 = xmm14[u,u,u],zero,xmm14[7],zero,xmm14[5,u,u,u],zero,xmm14[8],zero,xmm14[6,u,u]
+; AVX512-NEXT: vmovdqa (%rdx), %xmm15
+; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm15[u,u,u,7],zero,xmm15[5],zero,xmm15[u,u,u,8],zero,xmm15[6],zero,xmm15[u,u]
+; AVX512-NEXT: vpor %xmm11, %xmm12, %xmm11
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm12 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512-NEXT: vinserti32x4 $2, %xmm11, %zmm12, %zmm11
+; AVX512-NEXT: vpermq {{.*#+}} zmm16 = zmm11[0,1,0,1,4,5,4,5]
+; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm16
+; AVX512-NEXT: vmovdqa (%r9), %xmm11
+; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = zero,xmm11[4,u,u,u],zero,xmm11[7],zero,xmm11[5,u,u,u],zero,xmm11[8],zero,xmm11[6]
+; AVX512-NEXT: vmovdqa (%r8), %xmm12
+; AVX512-NEXT: vpshufb {{.*#+}} xmm13 = xmm12[4],zero,xmm12[u,u,u,7],zero,xmm12[5],zero,xmm12[u,u,u,8],zero,xmm12[6],zero
+; AVX512-NEXT: vpor %xmm8, %xmm13, %xmm8
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512-NEXT: vinserti32x4 $2, %xmm8, %zmm13, %zmm8
+; AVX512-NEXT: vpermq {{.*#+}} zmm17 = zmm8[0,1,0,1,4,5,4,5]
+; AVX512-NEXT: vmovdqa (%r10), %xmm13
+; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm13[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm0 = xmm13[1,1,0,0,4,5,6,7]
+; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; AVX512-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
+; AVX512-NEXT: vpermq {{.*#+}} zmm8 = zmm0[0,0,1,0,4,4,5,4]
+; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm8
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm8
+; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm4[14,u,u],zero,zero,zero,zero,ymm4[15,u,u],zero,zero,zero,zero,ymm4[16,u,u],zero,zero,zero,zero,ymm4[17,u,u],zero,zero,zero,zero,ymm4[18]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[0,1,14],zero,ymm3[u,u,0,1,14,15],zero,ymm3[u,u,13,2,3,16],zero,ymm3[u,u,28,29,16,17],zero,ymm3[u,u,19,28,29,18],zero
; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm7[8],xmm9[8],xmm7[9],xmm9[9],xmm7[10],xmm9[10],xmm7[11],xmm9[11],xmm7[12],xmm9[12],xmm7[13],xmm9[13],xmm7[14],xmm9[14],xmm7[15],xmm9[15]
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm14[8],xmm15[8],xmm14[9],xmm15[9],xmm14[10],xmm15[10],xmm14[11],xmm15[11],xmm14[12],xmm15[12],xmm14[13],xmm15[13],xmm14[14],xmm15[14],xmm14[15],xmm15[15]
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u],zero,ymm3[14,u,u,u,u,u],zero,ymm3[15,u,u,u,u,u],zero,ymm3[16,u,u,u,u,u],zero,ymm3[17,u,u,u,u,u]
-; AVX512-NEXT: vmovdqa64 %ymm20, %ymm2
-; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,14],zero,ymm2[u,u,u,u,u,15],zero,ymm2[u,u,u,u,u,16],zero,ymm2[u,u,u,u,u,17],zero,ymm2[u,u,u,u,u]
-; AVX512-NEXT: vpor %ymm1, %ymm7, %ymm1
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm11[8],xmm12[8],xmm11[9],xmm12[9],xmm11[10],xmm12[10],xmm11[11],xmm12[11],xmm11[12],xmm12[12],xmm11[13],xmm12[13],xmm11[14],xmm12[14],xmm11[15],xmm12[15]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm7, %zmm1
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u],zero,ymm6[14,u,u,u,u,u],zero,ymm6[15,u,u,u,u,u],zero,ymm6[16,u,u,u,u,u],zero,ymm6[17,u,u,u,u,u]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm14 = ymm5[u,u,u,u,14],zero,ymm5[u,u,u,u,u,15],zero,ymm5[u,u,u,u,u,16],zero,ymm5[u,u,u,u,u,17],zero,ymm5[u,u,u,u,u]
+; AVX512-NEXT: vpor %ymm1, %ymm14, %ymm1
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm9[8],xmm10[8],xmm9[9],xmm10[9],xmm9[10],xmm10[10],xmm9[11],xmm10[11],xmm9[12],xmm10[12],xmm9[13],xmm10[13],xmm9[14],xmm10[14],xmm9[15],xmm10[15]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm9, %zmm1
; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm5[u,u,u,u,u,14],zero,ymm5[u,u,u,u,u,15],zero,ymm5[u,u,u,u,u,16],zero,ymm5[u,u,u,u,u,17],zero,ymm5[u,u,u]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[13,u,u,u,u,u],zero,ymm6[14,u,u,u,u,u],zero,ymm6[15,u,u,u,u,u],zero,ymm6[16,u,u,u,u,u],zero,ymm6[17,u,u,u]
-; AVX512-NEXT: vpor %ymm0, %ymm7, %ymm0
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm7, %zmm0
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm7 = xmm15[0,1,2,3,4,5,5,6]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm7
-; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = zero,ymm4[13,u,u,u,u],zero,zero,ymm4[14,u,u,u,u],zero,zero,ymm4[15,u,u,u,u],zero,zero,ymm4[16,u,u,u,u],zero,zero,ymm4[17,u,u]
-; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm7, %zmm7
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm7
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm7
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u]
+; AVX512-NEXT: vmovdqa64 %ymm19, %ymm14
+; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm14[u,u,u,u,u,14],zero,ymm14[u,u,u,u,u,15],zero,ymm14[u,u,u,u,u,16],zero,ymm14[u,u,u,u,u,17],zero,ymm14[u,u,u]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[13,u,u,u,u,u],zero,ymm2[14,u,u,u,u,u],zero,ymm2[15,u,u,u,u,u],zero,ymm2[16,u,u,u,u,u],zero,ymm2[17,u,u,u]
+; AVX512-NEXT: vpor %ymm0, %ymm9, %ymm0
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm9, %zmm0
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm9 = xmm13[0,1,2,3,4,5,5,6]
+; AVX512-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[2,2,3,3]
+; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm9
+; AVX512-NEXT: vmovdqa64 %ymm18, %ymm11
+; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = zero,ymm11[13,u,u,u,u],zero,zero,ymm11[14,u,u,u,u],zero,zero,ymm11[15,u,u,u,u],zero,zero,ymm11[16,u,u,u,u],zero,zero,ymm11[17,u,u]
+; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm9
+; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm6[30],zero,ymm6[28,u,u,u],zero,ymm6[31],zero,ymm6[29,u]
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero,zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
+; AVX512-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512-NEXT: vpternlogq $248, %ymm16, %ymm0, %ymm1
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,zero,zero,ymm14[30],zero,ymm14[28],zero,zero,zero,zero,ymm14[31],zero,ymm14[29]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm2[27,u,u,u],zero,ymm2[30],zero,ymm2[28,u,u,u],zero,ymm2[31],zero
+; AVX512-NEXT: vpor %ymm0, %ymm2, %ymm0
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u]
-; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512-NEXT: vpternlogq $248, %ymm17, %ymm1, %ymm2
-; AVX512-NEXT: vmovdqa64 %ymm19, %ymm1
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero
-; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512-NEXT: vmovdqa64 %ymm18, %ymm3
-; AVX512-NEXT: vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[2,2,3,3,6,6,7,7]
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
-; AVX512-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
-; AVX512-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm3
-; AVX512-NEXT: vmovdqa %ymm3, 192(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm10, (%rax)
-; AVX512-NEXT: vmovdqa64 %zmm8, 128(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm7, 64(%rax)
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
+; AVX512-NEXT: vmovdqa %ymm2, 192(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm8, (%rax)
+; AVX512-NEXT: vmovdqa64 %zmm7, 128(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm9, 64(%rax)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -4343,12 +4304,12 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm3
; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm4
; AVX512-FCP-NEXT: vmovdqa (%rdx), %ymm5
; AVX512-FCP-NEXT: vmovdqa (%rcx), %ymm6
; AVX512-FCP-NEXT: vmovdqa (%r8), %ymm1
-; AVX512-FCP-NEXT: vmovdqa (%r9), %ymm3
+; AVX512-FCP-NEXT: vmovdqa (%r9), %ymm2
; AVX512-FCP-NEXT: vmovdqa64 (%r10), %ymm17
; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm8
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm8[u],zero,xmm8[7],zero,xmm8[5,u,u,u],zero,xmm8[8],zero,xmm8[6,u,u,u],zero
@@ -4388,7 +4349,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm7
; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm15, %zmm7
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm4[14,u,u],zero,zero,zero,zero,ymm4[15,u,u],zero,zero,zero,zero,ymm4[16,u,u],zero,zero,zero,zero,ymm4[17,u,u],zero,zero,zero,zero,ymm4[18]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm2[0,1,14],zero,ymm2[u,u,0,1,14,15],zero,ymm2[u,u,13,2,3,16],zero,ymm2[u,u,28,29,16,17],zero,ymm2[u,u,19,28,29,18],zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm3[0,1,14],zero,ymm3[u,u,0,1,14,15],zero,ymm3[u,u,13,2,3,16],zero,ymm3[u,u,28,29,16,17],zero,ymm3[u,u,19,28,29,18],zero
; AVX512-FCP-NEXT: vpor %ymm0, %ymm15, %ymm0
; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm11[8],xmm12[8],xmm11[9],xmm12[9],xmm11[10],xmm12[10],xmm11[11],xmm12[11],xmm11[12],xmm12[12],xmm11[13],xmm12[13],xmm11[14],xmm12[14],xmm11[15],xmm12[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
@@ -4403,7 +4364,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm8, %zmm9
; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm1[u,u,u,u,u,14],zero,ymm1[u,u,u,u,u,15],zero,ymm1[u,u,u,u,u,16],zero,ymm1[u,u,u,u,u,17],zero,ymm1[u,u,u]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[13,u,u,u,u,u],zero,ymm3[14,u,u,u,u,u],zero,ymm3[15,u,u,u,u,u],zero,ymm3[16,u,u,u,u,u],zero,ymm3[17,u,u,u]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[13,u,u,u,u,u],zero,ymm2[14,u,u,u,u,u],zero,ymm2[15,u,u,u,u,u],zero,ymm2[16,u,u,u,u,u],zero,ymm2[17,u,u,u]
; AVX512-FCP-NEXT: vpor %ymm0, %ymm8, %ymm0
; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm8 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
@@ -4414,74 +4375,67 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: # ymm10 = mem[0,1,0,1]
; AVX512-FCP-NEXT: vpermd %ymm8, %ymm10, %ymm8
; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm8
-; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm13
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,ymm13[13,u,u,u,u],zero,zero,ymm13[14,u,u,u,u],zero,zero,ymm13[15,u,u,u,u],zero,zero,ymm13[16,u,u,u,u],zero,zero,ymm13[17,u,u]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm12
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,ymm12[13,u,u,u,u],zero,zero,ymm12[14,u,u,u,u],zero,zero,ymm12[15,u,u,u,u],zero,zero,ymm12[16,u,u,u,u],zero,zero,ymm12[17,u,u]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm8, %zmm8
; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm8
; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm8
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpand %ymm0, %ymm9, %ymm0
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm5[18,19,20,21],zero,ymm5[19],zero,ymm5[25,26,27,22],zero,ymm5[20],zero
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm10, %zmm0
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero,ymm2[25]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27],zero,ymm3[25]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm0
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm5[18,19,20,21],zero,ymm5[19],zero,ymm5[25,26,27,22],zero,ymm5[20],zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm4[23,u,u,u],zero,ymm4[26],zero,ymm4[24,u,u,u],zero,ymm4[27],zero
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vporq %zmm0, %zmm9, %zmm0
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[21],zero,ymm4[19],zero,zero,zero,zero,ymm4[22],zero,ymm4[20],zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,ymm3[19],zero,ymm3[21,20,21,22],zero,ymm3[20],zero,ymm3[22,23]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm5[23],zero,ymm5[21,22,23,26],zero,ymm5[24],zero,ymm5[28,29,26,27]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vporq %zmm10, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[21],zero,ymm4[19],zero,zero,zero,zero,ymm4[22],zero,ymm4[20],zero,zero
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero
+; AVX512-FCP-NEXT: vporq %zmm9, %zmm10, %zmm9
+; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[20],zero,ymm2[18],zero,zero,zero,zero,ymm2[21],zero,ymm2[19],zero,zero,zero,zero,ymm2[22]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm1[18],zero,ymm1[20,21,20,21],zero,ymm1[19],zero,ymm1[19,20,21,22],zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm1[23],zero,ymm1[23,24,25,26],zero,ymm1[24],zero,ymm1[30,31]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,ymm2[19],zero,ymm2[21,20,21,22],zero,ymm2[20],zero,ymm2[22,23]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm5[23],zero,ymm5[21,22,23,26],zero,ymm5[24],zero,ymm5[28,29,26,27]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm11
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm11 = zmm11[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vporq %zmm10, %zmm11, %zmm10
+; AVX512-FCP-NEXT: vporq %zmm0, %zmm10, %zmm0
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} ymm10 = ymm12[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [0,5,4,0,5,0,4,0]
+; AVX512-FCP-NEXT: vpermd %ymm10, %ymm11, %ymm10
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm10
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[20],zero,ymm3[18],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm1[18],zero,ymm1[20,21,20,21],zero,ymm1[19],zero,ymm1[19,20,21,22],zero
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm1[23],zero,ymm1[23,24,25,26],zero,ymm1[24],zero,ymm1[30,31]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm11
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm11 = zmm11[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vporq %zmm0, %zmm11, %zmm0
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} ymm11 = ymm13[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [0,5,4,0,5,0,4,0]
-; AVX512-FCP-NEXT: vpermd %ymm11, %ymm12, %ymm11
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm11
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm11
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm11
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u]
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm10
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm6[30],zero,ymm6[28,u,u,u],zero,ymm6[31],zero,ymm6[29,u]
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm5, %ymm0
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm5
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
+; AVX512-FCP-NEXT: vpor %ymm4, %ymm3, %ymm3
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm3
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm2[27,u,u,u],zero,ymm2[30],zero,ymm2[28,u,u,u],zero,ymm2[31],zero
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm2[28],zero,ymm2[30,31,30,31],zero,ymm2[29],zero,ymm2[31,28,29]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512-FCP-NEXT: vpor %ymm0, %ymm2, %ymm0
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm0
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm9, %ymm2, %ymm1
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
-; AVX512-FCP-NEXT: vmovdqa %ymm2, 192(%rax)
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm1
+; AVX512-FCP-NEXT: vmovdqa %ymm1, 192(%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm7, (%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm11, 128(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm10, 128(%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm8, 64(%rax)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
@@ -4490,153 +4444,138 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm1
-; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm11
-; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm3
-; AVX512DQ-NEXT: vmovdqa (%r8), %ymm5
-; AVX512DQ-NEXT: vmovdqa (%r9), %ymm6
-; AVX512DQ-NEXT: vmovdqa (%r10), %ymm4
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,u,u,u,u,26,27,24,25]
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm8 = ymm4[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm9 = [0,5,4,0,5,0,4,0,20,21,0,23,0,21,0,23]
-; AVX512DQ-NEXT: vpermi2d %zmm7, %zmm8, %zmm9
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[20],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero
+; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm3
+; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm4
+; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm5
+; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm6
+; AVX512DQ-NEXT: vmovdqa (%r8), %ymm1
+; AVX512DQ-NEXT: vmovdqa (%r9), %ymm2
+; AVX512DQ-NEXT: vmovdqa (%r10), %ymm0
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27],zero,ymm3[25]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm7 = zmm7[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm5[18],zero,ymm5[20,21,20,21],zero,ymm5[19],zero,ymm5[19,20,21,22],zero
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm5[23],zero,ymm5[23,24,25,26],zero,ymm5[24],zero,ymm5[30,31]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm5[18,19,20,21],zero,ymm5[19],zero,ymm5[25,26,27,22],zero,ymm5[20],zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm4[23,u,u,u],zero,ymm4[26],zero,ymm4[24,u,u,u],zero,ymm4[27],zero
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm8, %zmm8
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,3,2,3,6,7,6,7]
; AVX512DQ-NEXT: vporq %zmm7, %zmm8, %zmm7
-; AVX512DQ-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm7
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} ymm16 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512DQ-NEXT: # ymm16 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-NEXT: vpandq %ymm16, %ymm8, %ymm8
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm11[18,19,20,21],zero,ymm11[19],zero,ymm11[25,26,27,22],zero,ymm11[20],zero
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm8, %zmm9, %zmm8
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[18],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22],zero,ymm3[20]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero,ymm2[25]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[21],zero,ymm4[19],zero,zero,zero,zero,ymm4[22],zero,ymm4[20],zero,zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm8, %zmm8
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,ymm3[19],zero,ymm3[21,20,21,22],zero,ymm3[20],zero,ymm3[22,23]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm5[23],zero,ymm5[21,22,23,26],zero,ymm5[24],zero,ymm5[28,29,26,27]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vporq %zmm9, %zmm8, %zmm9
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm8 = ymm2[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[0,0,1,1,4,4,5,5]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} ymm17 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
-; AVX512DQ-NEXT: # ymm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-NEXT: vpandq %ymm17, %ymm8, %ymm8
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm11[23],zero,ymm11[21,22,23,26],zero,ymm11[24],zero,ymm11[28,29,26,27]
-; AVX512DQ-NEXT: vmovdqa64 %ymm11, %ymm20
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm8, %zmm8
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[21],zero,ymm1[19],zero,zero,zero,zero,ymm1[22],zero,ymm1[20],zero,zero
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vporq %zmm10, %zmm8, %zmm8
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm8
-; AVX512DQ-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm8
-; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm11
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm11[u],zero,xmm11[7],zero,xmm11[5,u,u,u],zero,xmm11[8],zero,xmm11[6,u,u,u],zero
-; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm12
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm12[u,7],zero,xmm12[5],zero,xmm12[u,u,u,8],zero,xmm12[6],zero,xmm12[u,u,u,9]
-; AVX512DQ-NEXT: vpor %xmm7, %xmm9, %xmm7
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm7, %zmm9, %zmm7
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm10 = zmm7[0,1,0,1,4,5,4,5]
-; AVX512DQ-NEXT: vmovdqa (%rcx), %xmm7
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm13 = xmm7[u,u,u],zero,xmm7[7],zero,xmm7[5,u,u,u],zero,xmm7[8],zero,xmm7[6,u,u]
-; AVX512DQ-NEXT: vmovdqa (%rdx), %xmm9
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm14 = xmm9[u,u,u,7],zero,xmm9[5],zero,xmm9[u,u,u,8],zero,xmm9[6],zero,xmm9[u,u]
-; AVX512DQ-NEXT: vpor %xmm13, %xmm14, %xmm13
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3],xmm9[4],xmm7[4],xmm9[5],xmm7[5],xmm9[6],xmm7[6],xmm9[7],xmm7[7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm14 = xmm14[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm13, %zmm14, %zmm13
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm18 = zmm13[0,1,0,1,4,5,4,5]
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm18
-; AVX512DQ-NEXT: vmovdqa (%r9), %xmm13
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm10 = zero,xmm13[4,u,u,u],zero,xmm13[7],zero,xmm13[5,u,u,u],zero,xmm13[8],zero,xmm13[6]
-; AVX512DQ-NEXT: vmovdqa (%r8), %xmm14
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm15 = xmm14[4],zero,xmm14[u,u,u,7],zero,xmm14[5],zero,xmm14[u,u,u,8],zero,xmm14[6],zero
-; AVX512DQ-NEXT: vpor %xmm10, %xmm15, %xmm10
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm15 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm15 = xmm15[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm10, %zmm15, %zmm10
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm19 = zmm10[0,1,0,1,4,5,4,5]
-; AVX512DQ-NEXT: vmovdqa (%r10), %xmm15
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm10 = xmm15[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm0 = xmm15[1,1,0,0,4,5,6,7]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm10 = zmm0[0,0,1,0,4,4,5,4]
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm19, %zmm10
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm10
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm1[14,u,u],zero,zero,zero,zero,ymm1[15,u,u],zero,zero,zero,zero,ymm1[16,u,u],zero,zero,zero,zero,ymm1[17,u,u],zero,zero,zero,zero,ymm1[18]
+; AVX512DQ-NEXT: vporq %zmm8, %zmm9, %zmm8
+; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm8
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,u,u,u,u,26,27,24,25]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm9 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm18
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm10 = [0,5,4,0,5,0,4,0,20,21,0,23,0,21,0,23]
+; AVX512DQ-NEXT: vpermi2d %zmm7, %zmm9, %zmm10
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[20],zero,ymm2[18],zero,zero,zero,zero,ymm2[21],zero,ymm2[19],zero,zero,zero,zero,ymm2[22]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm7 = zmm7[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm1[18],zero,ymm1[20,21,20,21],zero,ymm1[19],zero,ymm1[19,20,21,22],zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm11 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm1[23],zero,ymm1[23,24,25,26],zero,ymm1[24],zero,ymm1[30,31]
; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm19
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[0,1,14],zero,ymm2[u,u,0,1,14,15],zero,ymm2[u,u,13,2,3,16],zero,ymm2[u,u,28,29,16,17],zero,ymm2[u,u,19,28,29,18],zero
-; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm18
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm11, %zmm9, %zmm9
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vporq %zmm7, %zmm9, %zmm7
+; AVX512DQ-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm7
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm7
+; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm9
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm9[u],zero,xmm9[7],zero,xmm9[5,u,u,u],zero,xmm9[8],zero,xmm9[6,u,u,u],zero
+; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm10
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm11 = xmm10[u,7],zero,xmm10[5],zero,xmm10[u,u,u,8],zero,xmm10[6],zero,xmm10[u,u,u,9]
+; AVX512DQ-NEXT: vpor %xmm8, %xmm11, %xmm8
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm8, %zmm11, %zmm8
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm8 = zmm8[0,1,0,1,4,5,4,5]
+; AVX512DQ-NEXT: vmovdqa (%rcx), %xmm14
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm11 = xmm14[u,u,u],zero,xmm14[7],zero,xmm14[5,u,u,u],zero,xmm14[8],zero,xmm14[6,u,u]
+; AVX512DQ-NEXT: vmovdqa (%rdx), %xmm15
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm15[u,u,u,7],zero,xmm15[5],zero,xmm15[u,u,u,8],zero,xmm15[6],zero,xmm15[u,u]
+; AVX512DQ-NEXT: vpor %xmm11, %xmm12, %xmm11
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm12 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm11, %zmm12, %zmm11
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm16 = zmm11[0,1,0,1,4,5,4,5]
+; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm16
+; AVX512DQ-NEXT: vmovdqa (%r9), %xmm11
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = zero,xmm11[4,u,u,u],zero,xmm11[7],zero,xmm11[5,u,u,u],zero,xmm11[8],zero,xmm11[6]
+; AVX512DQ-NEXT: vmovdqa (%r8), %xmm12
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm13 = xmm12[4],zero,xmm12[u,u,u,7],zero,xmm12[5],zero,xmm12[u,u,u,8],zero,xmm12[6],zero
+; AVX512DQ-NEXT: vpor %xmm8, %xmm13, %xmm8
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm8, %zmm13, %zmm8
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm17 = zmm8[0,1,0,1,4,5,4,5]
+; AVX512DQ-NEXT: vmovdqa (%r10), %xmm13
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm13[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm0 = xmm13[1,1,0,0,4,5,6,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm8 = zmm0[0,0,1,0,4,4,5,4]
+; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm8
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm8
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm4[14,u,u],zero,zero,zero,zero,ymm4[15,u,u],zero,zero,zero,zero,ymm4[16,u,u],zero,zero,zero,zero,ymm4[17,u,u],zero,zero,zero,zero,ymm4[18]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[0,1,14],zero,ymm3[u,u,0,1,14,15],zero,ymm3[u,u,13,2,3,16],zero,ymm3[u,u,28,29,16,17],zero,ymm3[u,u,19,28,29,18],zero
; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm7[8],xmm9[8],xmm7[9],xmm9[9],xmm7[10],xmm9[10],xmm7[11],xmm9[11],xmm7[12],xmm9[12],xmm7[13],xmm9[13],xmm7[14],xmm9[14],xmm7[15],xmm9[15]
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm14[8],xmm15[8],xmm14[9],xmm15[9],xmm14[10],xmm15[10],xmm14[11],xmm15[11],xmm14[12],xmm15[12],xmm14[13],xmm15[13],xmm14[14],xmm15[14],xmm14[15],xmm15[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u],zero,ymm3[14,u,u,u,u,u],zero,ymm3[15,u,u,u,u,u],zero,ymm3[16,u,u,u,u,u],zero,ymm3[17,u,u,u,u,u]
-; AVX512DQ-NEXT: vmovdqa64 %ymm20, %ymm2
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,14],zero,ymm2[u,u,u,u,u,15],zero,ymm2[u,u,u,u,u,16],zero,ymm2[u,u,u,u,u,17],zero,ymm2[u,u,u,u,u]
-; AVX512DQ-NEXT: vpor %ymm1, %ymm7, %ymm1
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm11[8],xmm12[8],xmm11[9],xmm12[9],xmm11[10],xmm12[10],xmm11[11],xmm12[11],xmm11[12],xmm12[12],xmm11[13],xmm12[13],xmm11[14],xmm12[14],xmm11[15],xmm12[15]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm7, %zmm1
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u],zero,ymm6[14,u,u,u,u,u],zero,ymm6[15,u,u,u,u,u],zero,ymm6[16,u,u,u,u,u],zero,ymm6[17,u,u,u,u,u]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm14 = ymm5[u,u,u,u,14],zero,ymm5[u,u,u,u,u,15],zero,ymm5[u,u,u,u,u,16],zero,ymm5[u,u,u,u,u,17],zero,ymm5[u,u,u,u,u]
+; AVX512DQ-NEXT: vpor %ymm1, %ymm14, %ymm1
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm9[8],xmm10[8],xmm9[9],xmm10[9],xmm9[10],xmm10[10],xmm9[11],xmm10[11],xmm9[12],xmm10[12],xmm9[13],xmm10[13],xmm9[14],xmm10[14],xmm9[15],xmm10[15]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm9, %zmm1
; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm5[u,u,u,u,u,14],zero,ymm5[u,u,u,u,u,15],zero,ymm5[u,u,u,u,u,16],zero,ymm5[u,u,u,u,u,17],zero,ymm5[u,u,u]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[13,u,u,u,u,u],zero,ymm6[14,u,u,u,u,u],zero,ymm6[15,u,u,u,u,u],zero,ymm6[16,u,u,u,u,u],zero,ymm6[17,u,u,u]
-; AVX512DQ-NEXT: vpor %ymm0, %ymm7, %ymm0
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm7, %zmm0
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm7 = xmm15[0,1,2,3,4,5,5,6]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm7
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = zero,ymm4[13,u,u,u,u],zero,zero,ymm4[14,u,u,u,u],zero,zero,ymm4[15,u,u,u,u],zero,zero,ymm4[16,u,u,u,u],zero,zero,ymm4[17,u,u]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm7, %zmm7
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm7
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm7
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u]
+; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm14
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm14[u,u,u,u,u,14],zero,ymm14[u,u,u,u,u,15],zero,ymm14[u,u,u,u,u,16],zero,ymm14[u,u,u,u,u,17],zero,ymm14[u,u,u]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[13,u,u,u,u,u],zero,ymm2[14,u,u,u,u,u],zero,ymm2[15,u,u,u,u,u],zero,ymm2[16,u,u,u,u,u],zero,ymm2[17,u,u,u]
+; AVX512DQ-NEXT: vpor %ymm0, %ymm9, %ymm0
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm9, %zmm0
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm9 = xmm13[0,1,2,3,4,5,5,6]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[2,2,3,3]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm9
+; AVX512DQ-NEXT: vmovdqa64 %ymm18, %ymm11
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = zero,ymm11[13,u,u,u,u],zero,zero,ymm11[14,u,u,u,u],zero,zero,ymm11[15,u,u,u,u],zero,zero,ymm11[16,u,u,u,u],zero,zero,ymm11[17,u,u]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm9
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm6[30],zero,ymm6[28,u,u,u],zero,ymm6[31],zero,ymm6[29,u]
+; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero,zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
+; AVX512DQ-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-NEXT: vpternlogq $248, %ymm16, %ymm0, %ymm1
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,zero,zero,ymm14[30],zero,ymm14[28],zero,zero,zero,zero,ymm14[31],zero,ymm14[29]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm2[27,u,u,u],zero,ymm2[30],zero,ymm2[28,u,u,u],zero,ymm2[31],zero
+; AVX512DQ-NEXT: vpor %ymm0, %ymm2, %ymm0
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512DQ-NEXT: vpternlogq $248, %ymm17, %ymm1, %ymm2
-; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm1
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-NEXT: vmovdqa64 %ymm18, %ymm3
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[2,2,3,3,6,6,7,7]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
-; AVX512DQ-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
-; AVX512DQ-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm3
-; AVX512DQ-NEXT: vmovdqa %ymm3, 192(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm10, (%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm8, 128(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm7, 64(%rax)
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
+; AVX512DQ-NEXT: vmovdqa %ymm2, 192(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm8, (%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm7, 128(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm9, 64(%rax)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -4644,12 +4583,12 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm3
; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm4
; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %ymm5
; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %ymm6
; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa64 (%r10), %ymm17
; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm8
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm8[u],zero,xmm8[7],zero,xmm8[5,u,u,u],zero,xmm8[8],zero,xmm8[6,u,u,u],zero
@@ -4689,7 +4628,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm7
; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm15, %zmm7
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm4[14,u,u],zero,zero,zero,zero,ymm4[15,u,u],zero,zero,zero,zero,ymm4[16,u,u],zero,zero,zero,zero,ymm4[17,u,u],zero,zero,zero,zero,ymm4[18]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm2[0,1,14],zero,ymm2[u,u,0,1,14,15],zero,ymm2[u,u,13,2,3,16],zero,ymm2[u,u,28,29,16,17],zero,ymm2[u,u,19,28,29,18],zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm3[0,1,14],zero,ymm3[u,u,0,1,14,15],zero,ymm3[u,u,13,2,3,16],zero,ymm3[u,u,28,29,16,17],zero,ymm3[u,u,19,28,29,18],zero
; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm15, %ymm0
; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm11[8],xmm12[8],xmm11[9],xmm12[9],xmm11[10],xmm12[10],xmm11[11],xmm12[11],xmm11[12],xmm12[12],xmm11[13],xmm12[13],xmm11[14],xmm12[14],xmm11[15],xmm12[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
@@ -4704,7 +4643,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm8, %zmm9
; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm1[u,u,u,u,u,14],zero,ymm1[u,u,u,u,u,15],zero,ymm1[u,u,u,u,u,16],zero,ymm1[u,u,u,u,u,17],zero,ymm1[u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[13,u,u,u,u,u],zero,ymm3[14,u,u,u,u,u],zero,ymm3[15,u,u,u,u,u],zero,ymm3[16,u,u,u,u,u],zero,ymm3[17,u,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[13,u,u,u,u,u],zero,ymm2[14,u,u,u,u,u],zero,ymm2[15,u,u,u,u,u],zero,ymm2[16,u,u,u,u,u],zero,ymm2[17,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm8, %ymm0
; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm8 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
@@ -4715,74 +4654,67 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: # ymm10 = mem[0,1,0,1]
; AVX512DQ-FCP-NEXT: vpermd %ymm8, %ymm10, %ymm8
; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm8
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm13
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,ymm13[13,u,u,u,u],zero,zero,ymm13[14,u,u,u,u],zero,zero,ymm13[15,u,u,u,u],zero,zero,ymm13[16,u,u,u,u],zero,zero,ymm13[17,u,u]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm12
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,ymm12[13,u,u,u,u],zero,zero,ymm12[14,u,u,u,u],zero,zero,ymm12[15,u,u,u,u],zero,zero,ymm12[16,u,u,u,u],zero,zero,ymm12[17,u,u]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm8, %zmm8
; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm8
; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm8
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512DQ-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpand %ymm0, %ymm9, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm5[18,19,20,21],zero,ymm5[19],zero,ymm5[25,26,27,22],zero,ymm5[20],zero
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm10, %zmm0
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero,ymm2[25]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27],zero,ymm3[25]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm0
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm5[18,19,20,21],zero,ymm5[19],zero,ymm5[25,26,27,22],zero,ymm5[20],zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm4[23,u,u,u],zero,ymm4[26],zero,ymm4[24,u,u,u],zero,ymm4[27],zero
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vporq %zmm0, %zmm9, %zmm0
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[21],zero,ymm4[19],zero,zero,zero,zero,ymm4[22],zero,ymm4[20],zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,ymm3[19],zero,ymm3[21,20,21,22],zero,ymm3[20],zero,ymm3[22,23]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm5[23],zero,ymm5[21,22,23,26],zero,ymm5[24],zero,ymm5[28,29,26,27]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vporq %zmm10, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[21],zero,ymm4[19],zero,zero,zero,zero,ymm4[22],zero,ymm4[20],zero,zero
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vporq %zmm9, %zmm10, %zmm9
+; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[20],zero,ymm2[18],zero,zero,zero,zero,ymm2[21],zero,ymm2[19],zero,zero,zero,zero,ymm2[22]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm1[18],zero,ymm1[20,21,20,21],zero,ymm1[19],zero,ymm1[19,20,21,22],zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm1[23],zero,ymm1[23,24,25,26],zero,ymm1[24],zero,ymm1[30,31]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,ymm2[19],zero,ymm2[21,20,21,22],zero,ymm2[20],zero,ymm2[22,23]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm5[23],zero,ymm5[21,22,23,26],zero,ymm5[24],zero,ymm5[28,29,26,27]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm11
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm11 = zmm11[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vporq %zmm10, %zmm11, %zmm10
+; AVX512DQ-FCP-NEXT: vporq %zmm0, %zmm10, %zmm0
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} ymm10 = ymm12[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [0,5,4,0,5,0,4,0]
+; AVX512DQ-FCP-NEXT: vpermd %ymm10, %ymm11, %ymm10
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm10
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[20],zero,ymm3[18],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm1[18],zero,ymm1[20,21,20,21],zero,ymm1[19],zero,ymm1[19,20,21,22],zero
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm1[23],zero,ymm1[23,24,25,26],zero,ymm1[24],zero,ymm1[30,31]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm11
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm11 = zmm11[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vporq %zmm0, %zmm11, %zmm0
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} ymm11 = ymm13[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [0,5,4,0,5,0,4,0]
-; AVX512DQ-FCP-NEXT: vpermd %ymm11, %ymm12, %ymm11
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm11
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm11
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm11
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u]
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm10
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm6[30],zero,ymm6[28,u,u,u],zero,ymm6[31],zero,ymm6[29,u]
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm5, %ymm0
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm5
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
+; AVX512DQ-FCP-NEXT: vpor %ymm4, %ymm3, %ymm3
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm3
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm2[27,u,u,u],zero,ymm2[30],zero,ymm2[28,u,u,u],zero,ymm2[31],zero
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm2[28],zero,ymm2[30,31,30,31],zero,ymm2[29],zero,ymm2[31,28,29]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm2, %ymm0
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm9, %ymm2, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, 192(%rax)
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, 192(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, (%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm11, 128(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, 128(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, 64(%rax)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
@@ -4841,33 +4773,29 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: vmovdqu8 %zmm9, %zmm0 {%k1}
; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm9
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm9 = zmm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zmm9[18,19,20,21],zero,zmm9[19],zero,zmm9[25,26,27,22],zero,zmm9[20],zero,zmm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm9[55],zero,zero,zero,zero,zmm9[58],zero,zmm9[56],zero,zero,zero,zero,zmm9[59],zero
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
; AVX512BW-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm15
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm15 = zmm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm15[18],zero,zero,zero,zero,zmm15[21],zero,zmm15[19],zero,zero,zero,zero,zmm15[22],zero,zmm15[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm15[55],zero,zero,zero,zero,zmm15[58],zero,zmm15[56],zero,zero,zero,zero,zmm15[59],zero,zmm15[57]
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm15 = zmm15[2,3,2,3,6,7,6,7]
; AVX512BW-NEXT: vporq %zmm9, %zmm15, %zmm9
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm15 = ymm4[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[0,0,1,1,4,4,5,5]
; AVX512BW-NEXT: movl $676341840, %ecx # imm = 0x28502850
; AVX512BW-NEXT: kmovd %ecx, %k1
; AVX512BW-NEXT: vpshufb {{.*#+}} ymm15 {%k1} = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,21,u,19,u,u,u,u,22,u,20,u,u]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
; AVX512BW-NEXT: vpshufb {{.*#+}} ymm16 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm1[23],zero,ymm1[21,22,23,26],zero,ymm1[24],zero,ymm1[28,29,26,27]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm16 = ymm16[2,3,2,3]
; AVX512BW-NEXT: vpshufb {{.*#+}} ymm17 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm17 = ymm17[2,3,2,3]
; AVX512BW-NEXT: vporq %ymm16, %ymm17, %ymm16
; AVX512BW-NEXT: vinserti64x4 $1, %ymm16, %zmm15, %zmm15
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm15 = zmm15[2,3,2,3,6,7,6,7]
; AVX512BW-NEXT: movabsq $-9005497107459067808, %rcx # imm = 0x83060C180C183060
; AVX512BW-NEXT: kmovq %rcx, %k2
; AVX512BW-NEXT: vmovdqu8 %zmm15, %zmm9 {%k2}
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm15 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
; AVX512BW-NEXT: vpermw %zmm7, %zmm15, %zmm15
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm16 = zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,zmm6[18],zero,zmm6[20,21,20,21],zero,zmm6[19],zero,zmm6[19,20,21,22],zero,zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57,56,57],zero,zmm6[55],zero,zmm6[55,56,57,58],zero,zmm6[56],zero,zmm6[62,63]
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm17 = zmm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm5[20],zero,zmm5[18],zero,zero,zero,zero,zmm5[21],zero,zmm5[19],zero,zero,zero,zero,zmm5[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm5[57],zero,zmm5[55],zero,zero,zero,zero,zmm5[58],zero,zmm5[56],zero,zero
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm17 = zmm17[2,3,2,3,6,7,6,7]
; AVX512BW-NEXT: vporq %zmm16, %zmm17, %zmm16
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512BW-NEXT: movabsq $1161999626690365456, %rcx # imm = 0x1020408102040810
; AVX512BW-NEXT: kmovq %rcx, %k2
; AVX512BW-NEXT: vmovdqu8 %zmm15, %zmm16 {%k2}
@@ -4924,10 +4852,9 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: # ymm2 = mem[0,1,0,1]
; AVX512BW-NEXT: vpermw %ymm7, %ymm2, %ymm2
; AVX512BW-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
; AVX512BW-NEXT: vpshufb {{.*#+}} ymm4 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,zero,zero,ymm6[30],zero,ymm6[28],zero,zero,zero,zero,ymm6[31],zero,ymm6[29]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
; AVX512BW-NEXT: vpor %ymm3, %ymm4, %ymm3
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
; AVX512BW-NEXT: movl $-2130574328, %ecx # imm = 0x81020408
; AVX512BW-NEXT: kmovd %ecx, %k1
; AVX512BW-NEXT: vmovdqu8 %ymm2, %ymm3 {%k1}
@@ -4945,8 +4872,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm3
+; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm3
+; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm4
; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %ymm1
; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %ymm2
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[0,1,0,1,14],zero,ymm1[14,15,0,1,14,15],zero,ymm1[13,14,15,16,17,16],zero,ymm1[30,31,30,31,16,17],zero,ymm1[31,28,29,30,31]
@@ -4958,8 +4885,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm5, %zmm5
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[0,1,14],zero,ymm4[12,13,0,1,14,15],zero,ymm4[3,12,13,2,3,16],zero,ymm4[30,31,28,29,16,17],zero,ymm4[31,18,19,28,29,18],zero
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,ymm3[14],zero,zero,zero,zero,zero,zero,ymm3[15],zero,zero,zero,zero,zero,zero,ymm3[16],zero,zero,zero,zero,zero,zero,ymm3[17],zero,zero,zero,zero,zero,zero,ymm3[18]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[0,1,14],zero,ymm3[12,13,0,1,14,15],zero,ymm3[3,12,13,2,3,16],zero,ymm3[30,31,28,29,16,17],zero,ymm3[31,18,19,28,29,18],zero
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,ymm4[14],zero,zero,zero,zero,zero,zero,ymm4[15],zero,zero,zero,zero,zero,zero,ymm4[16],zero,zero,zero,zero,zero,zero,ymm4[17],zero,zero,zero,zero,zero,zero,ymm4[18]
; AVX512BW-FCP-NEXT: vpor %ymm0, %ymm6, %ymm0
; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %xmm12
; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %xmm14
@@ -4993,30 +4920,27 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: movabsq $4066998693416279096, %rcx # imm = 0x3870E1C3870E1C38
; AVX512BW-FCP-NEXT: kmovq %rcx, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm10, %zmm0 {%k1}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm4, %zmm10
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm10
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm10 = zmm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,zmm10[19],zero,zmm10[21,20,21,22],zero,zmm10[20],zero,zmm10[22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57],zero,zmm10[55],zero,zmm10[53,54,55,58],zero,zmm10[56],zero,zmm10[60,61,58,59]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm15
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm15
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm15 = zmm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm15[21],zero,zmm15[19],zero,zero,zero,zero,zmm15[22],zero,zmm15[20],zero,zero,zmm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm15[57],zero,zmm15[55],zero,zero,zero,zero,zmm15[58],zero,zmm15[56],zero,zero,zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm15 = zmm15[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vporq %zmm10, %zmm15, %zmm15
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm10
+; AVX512BW-FCP-NEXT: vporq %zmm10, %zmm15, %zmm10
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm15 = zmm10[2,3,2,3,6,7,6,7]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm1, %zmm10
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm10 = zmm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zmm10[18,19,20,21],zero,zmm10[19],zero,zmm10[25,26,27,22],zero,zmm10[20],zero,zmm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm10[55],zero,zero,zero,zero,zmm10[58],zero,zmm10[56],zero,zero,zero,zero,zmm10[59],zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm16
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm16
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm16 = zmm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm16[18],zero,zero,zero,zero,zmm16[21],zero,zmm16[19],zero,zero,zero,zero,zmm16[22],zero,zmm16[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm16[55],zero,zero,zero,zero,zmm16[58],zero,zmm16[56],zero,zero,zero,zero,zmm16[59],zero,zmm16[57]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: vporq %zmm10, %zmm16, %zmm10
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: movabsq $-9005497107459067808, %rcx # imm = 0x83060C180C183060
; AVX512BW-FCP-NEXT: kmovq %rcx, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm10 {%k1}
; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm15 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
; AVX512BW-FCP-NEXT: vpermw %zmm7, %zmm15, %zmm15
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm16 = zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,zmm6[18],zero,zmm6[20,21,20,21],zero,zmm6[19],zero,zmm6[19,20,21,22],zero,zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57,56,57],zero,zmm6[55],zero,zmm6[55,56,57,58],zero,zmm6[56],zero,zmm6[62,63]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm17 = zmm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm5[20],zero,zmm5[18],zero,zero,zero,zero,zmm5[21],zero,zmm5[19],zero,zero,zero,zero,zmm5[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm5[57],zero,zmm5[55],zero,zero,zero,zero,zmm5[58],zero,zmm5[56],zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm17 = zmm17[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: vporq %zmm16, %zmm17, %zmm16
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: movabsq $1161999626690365456, %rcx # imm = 0x1020408102040810
; AVX512BW-FCP-NEXT: kmovq %rcx, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm16 {%k1}
@@ -5055,16 +4979,14 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: movabsq $8133997386832558192, %rcx # imm = 0x70E1C3870E1C3870
; AVX512BW-FCP-NEXT: kmovq %rcx, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm9, %zmm8 {%k1}
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm4[28],zero,ymm4[30,31,30,31],zero,ymm4[29],zero,ymm4[31,28,29]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm3[30],zero,ymm3[28],zero,zero,zero,zero,ymm3[31],zero,ymm3[29],zero,zero,zero
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero,zero
+; AVX512BW-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
-; AVX512BW-FCP-NEXT: vpor %ymm4, %ymm3, %ymm3
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX512BW-FCP-NEXT: vpor %ymm2, %ymm1, %ymm1
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX512BW-FCP-NEXT: movl $101455920, %ecx # imm = 0x60C1830
; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %ymm3, %ymm1 {%k1}
@@ -5072,10 +4994,9 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: # ymm2 = mem[0,1,0,1]
; AVX512BW-FCP-NEXT: vpermw %ymm7, %ymm2, %ymm2
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,zero,zero,ymm6[30],zero,ymm6[28],zero,zero,zero,zero,ymm6[31],zero,ymm6[29]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
; AVX512BW-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
; AVX512BW-FCP-NEXT: movl $-2130574328, %ecx # imm = 0x81020408
; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %ymm2, %ymm3 {%k1}
@@ -5143,33 +5064,29 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm9, %zmm0 {%k1}
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm9
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm9 = zmm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zmm9[18,19,20,21],zero,zmm9[19],zero,zmm9[25,26,27,22],zero,zmm9[20],zero,zmm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm9[55],zero,zero,zero,zero,zmm9[58],zero,zmm9[56],zero,zero,zero,zero,zmm9[59],zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm15
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm15 = zmm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm15[18],zero,zero,zero,zero,zmm15[21],zero,zmm15[19],zero,zero,zero,zero,zmm15[22],zero,zmm15[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm15[55],zero,zero,zero,zero,zmm15[58],zero,zmm15[56],zero,zero,zero,zero,zmm15[59],zero,zmm15[57]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm15 = zmm15[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-NEXT: vporq %zmm9, %zmm15, %zmm9
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-NEXT: vpshuflw {{.*#+}} ymm15 = ymm4[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[0,0,1,1,4,4,5,5]
; AVX512DQ-BW-NEXT: movl $676341840, %ecx # imm = 0x28502850
; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm15 {%k1} = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,21,u,19,u,u,u,u,22,u,20,u,u]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm16 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm1[23],zero,ymm1[21,22,23,26],zero,ymm1[24],zero,ymm1[28,29,26,27]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm16 = ymm16[2,3,2,3]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm17 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm17 = ymm17[2,3,2,3]
; AVX512DQ-BW-NEXT: vporq %ymm16, %ymm17, %ymm16
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm16, %zmm15, %zmm15
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm15 = zmm15[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-NEXT: movabsq $-9005497107459067808, %rcx # imm = 0x83060C180C183060
; AVX512DQ-BW-NEXT: kmovq %rcx, %k2
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm15, %zmm9 {%k2}
; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm15 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
; AVX512DQ-BW-NEXT: vpermw %zmm7, %zmm15, %zmm15
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm16 = zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,zmm6[18],zero,zmm6[20,21,20,21],zero,zmm6[19],zero,zmm6[19,20,21,22],zero,zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57,56,57],zero,zmm6[55],zero,zmm6[55,56,57,58],zero,zmm6[56],zero,zmm6[62,63]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm17 = zmm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm5[20],zero,zmm5[18],zero,zero,zero,zero,zmm5[21],zero,zmm5[19],zero,zero,zero,zero,zmm5[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm5[57],zero,zmm5[55],zero,zero,zero,zero,zmm5[58],zero,zmm5[56],zero,zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm17 = zmm17[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-NEXT: vporq %zmm16, %zmm17, %zmm16
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-NEXT: movabsq $1161999626690365456, %rcx # imm = 0x1020408102040810
; AVX512DQ-BW-NEXT: kmovq %rcx, %k2
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm15, %zmm16 {%k2}
@@ -5226,10 +5143,9 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: # ymm2 = mem[0,1,0,1]
; AVX512DQ-BW-NEXT: vpermw %ymm7, %ymm2, %ymm2
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm4 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,zero,zero,ymm6[30],zero,ymm6[28],zero,zero,zero,zero,ymm6[31],zero,ymm6[29]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
; AVX512DQ-BW-NEXT: vpor %ymm3, %ymm4, %ymm3
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
; AVX512DQ-BW-NEXT: movl $-2130574328, %ecx # imm = 0x81020408
; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
; AVX512DQ-BW-NEXT: vmovdqu8 %ymm2, %ymm3 {%k1}
@@ -5247,8 +5163,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm4
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %ymm1
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %ymm2
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[0,1,0,1,14],zero,ymm1[14,15,0,1,14,15],zero,ymm1[13,14,15,16,17,16],zero,ymm1[30,31,30,31,16,17],zero,ymm1[31,28,29,30,31]
@@ -5260,8 +5176,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm5, %zmm5
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[0,1,14],zero,ymm4[12,13,0,1,14,15],zero,ymm4[3,12,13,2,3,16],zero,ymm4[30,31,28,29,16,17],zero,ymm4[31,18,19,28,29,18],zero
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,ymm3[14],zero,zero,zero,zero,zero,zero,ymm3[15],zero,zero,zero,zero,zero,zero,ymm3[16],zero,zero,zero,zero,zero,zero,ymm3[17],zero,zero,zero,zero,zero,zero,ymm3[18]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[0,1,14],zero,ymm3[12,13,0,1,14,15],zero,ymm3[3,12,13,2,3,16],zero,ymm3[30,31,28,29,16,17],zero,ymm3[31,18,19,28,29,18],zero
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,ymm4[14],zero,zero,zero,zero,zero,zero,ymm4[15],zero,zero,zero,zero,zero,zero,ymm4[16],zero,zero,zero,zero,zero,zero,ymm4[17],zero,zero,zero,zero,zero,zero,ymm4[18]
; AVX512DQ-BW-FCP-NEXT: vpor %ymm0, %ymm6, %ymm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %xmm12
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %xmm14
@@ -5295,30 +5211,27 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: movabsq $4066998693416279096, %rcx # imm = 0x3870E1C3870E1C38
; AVX512DQ-BW-FCP-NEXT: kmovq %rcx, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm10, %zmm0 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm4, %zmm10
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm10
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm10 = zmm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,zmm10[19],zero,zmm10[21,20,21,22],zero,zmm10[20],zero,zmm10[22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57],zero,zmm10[55],zero,zmm10[53,54,55,58],zero,zmm10[56],zero,zmm10[60,61,58,59]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm15
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm15
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm15 = zmm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm15[21],zero,zmm15[19],zero,zero,zero,zero,zmm15[22],zero,zmm15[20],zero,zero,zmm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm15[57],zero,zmm15[55],zero,zero,zero,zero,zmm15[58],zero,zmm15[56],zero,zero,zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm15 = zmm15[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vporq %zmm10, %zmm15, %zmm15
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm10
+; AVX512DQ-BW-FCP-NEXT: vporq %zmm10, %zmm15, %zmm10
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm15 = zmm10[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm1, %zmm10
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm10 = zmm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zmm10[18,19,20,21],zero,zmm10[19],zero,zmm10[25,26,27,22],zero,zmm10[20],zero,zmm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm10[55],zero,zero,zero,zero,zmm10[58],zero,zmm10[56],zero,zero,zero,zero,zmm10[59],zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm16
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm16
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm16 = zmm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm16[18],zero,zero,zero,zero,zmm16[21],zero,zmm16[19],zero,zero,zero,zero,zmm16[22],zero,zmm16[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm16[55],zero,zero,zero,zero,zmm16[58],zero,zmm16[56],zero,zero,zero,zero,zmm16[59],zero,zmm16[57]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: vporq %zmm10, %zmm16, %zmm10
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: movabsq $-9005497107459067808, %rcx # imm = 0x83060C180C183060
; AVX512DQ-BW-FCP-NEXT: kmovq %rcx, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm10 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm15 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
; AVX512DQ-BW-FCP-NEXT: vpermw %zmm7, %zmm15, %zmm15
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm16 = zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,zmm6[18],zero,zmm6[20,21,20,21],zero,zmm6[19],zero,zmm6[19,20,21,22],zero,zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57,56,57],zero,zmm6[55],zero,zmm6[55,56,57,58],zero,zmm6[56],zero,zmm6[62,63]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm17 = zmm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm5[20],zero,zmm5[18],zero,zero,zero,zero,zmm5[21],zero,zmm5[19],zero,zero,zero,zero,zmm5[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm5[57],zero,zmm5[55],zero,zero,zero,zero,zmm5[58],zero,zmm5[56],zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm17 = zmm17[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: vporq %zmm16, %zmm17, %zmm16
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: movabsq $1161999626690365456, %rcx # imm = 0x1020408102040810
; AVX512DQ-BW-FCP-NEXT: kmovq %rcx, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm16 {%k1}
@@ -5357,16 +5270,14 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: movabsq $8133997386832558192, %rcx # imm = 0x70E1C3870E1C3870
; AVX512DQ-BW-FCP-NEXT: kmovq %rcx, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm9, %zmm8 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm4[28],zero,ymm4[30,31,30,31],zero,ymm4[29],zero,ymm4[31,28,29]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm3[30],zero,ymm3[28],zero,zero,zero,zero,ymm3[31],zero,ymm3[29],zero,zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm4, %ymm3, %ymm3
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX512DQ-BW-FCP-NEXT: vpor %ymm2, %ymm1, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX512DQ-BW-FCP-NEXT: movl $101455920, %ecx # imm = 0x60C1830
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm3, %ymm1 {%k1}
@@ -5374,10 +5285,9 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: # ymm2 = mem[0,1,0,1]
; AVX512DQ-BW-FCP-NEXT: vpermw %ymm7, %ymm2, %ymm2
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,zero,zero,ymm6[30],zero,ymm6[28],zero,zero,zero,zero,ymm6[31],zero,ymm6[29]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
; AVX512DQ-BW-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
; AVX512DQ-BW-FCP-NEXT: movl $-2130574328, %ecx # imm = 0x81020408
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm2, %ymm3 {%k1}
@@ -7376,8 +7286,8 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX2-NEXT: vmovdqa 32(%rdi), %ymm0
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vmovdqa 32(%rsi), %ymm1
-; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT: vmovdqa 32(%rsi), %ymm2
+; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovdqa 32(%rdx), %ymm6
; AVX2-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovdqa 32(%rcx), %ymm7
@@ -7389,15 +7299,13 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vmovdqa 32(%rax), %ymm3
; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[23],zero,ymm0[27,20,21,26],zero,ymm0[24],zero,ymm0[26,27,26,27],zero,ymm0[25]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero,zero,zero,ymm1[27],zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm7[25],zero,ymm7[23],zero,zero,zero,zero,ymm7[26],zero,ymm7[24],zero,zero,zero,zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero,ymm6[27]
-; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25,22,23,22,23,24,25,26,27,24,25,30,31]
@@ -7415,17 +7323,16 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovdqa (%r8), %ymm0
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,ymm0[27,28,29,30],zero,ymm0[28],zero,ymm0[26,27,30,31],zero,ymm0[29]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-NEXT: vmovdqa (%r9), %ymm1
; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,ymm0[27,28,29,30],zero,ymm0[28],zero,ymm0[26,27,30,31],zero,ymm0[29]
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vmovdqa (%rax), %ymm1
; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vmovdqa (%rdx), %ymm1
@@ -7622,12 +7529,11 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: # ymm3 = mem[0,1,0,1]
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vpshufb %ymm3, %ymm0, %ymm2
-; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128]
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vpshufb %ymm4, %ymm0, %ymm5
-; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
; AVX2-NEXT: vpor %ymm2, %ymm5, %ymm2
+; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX2-NEXT: vpshuflw $150, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
; AVX2-NEXT: # ymm5 = mem[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[0,1,1,3,4,5,5,7]
@@ -7635,15 +7541,14 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = [u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255]
; AVX2-NEXT: vpblendvb %ymm6, %ymm2, %ymm5, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-NEXT: vpshufb %ymm3, %ymm14, %ymm3
-; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
+; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-NEXT: vpshufb %ymm3, %ymm8, %ymm3
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
; AVX2-NEXT: vpshufb %ymm4, %ymm13, %ymm4
-; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
; AVX2-NEXT: vpor %ymm3, %ymm4, %ymm3
-; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-NEXT: vpshuflw {{.*#+}} ymm4 = ymm8[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
+; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX2-NEXT: vpshuflw {{.*#+}} ymm4 = ymm14[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[0,1,1,3,4,5,5,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,2]
; AVX2-NEXT: vpblendvb %ymm6, %ymm3, %ymm4, %ymm0
@@ -7665,24 +7570,22 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vpblendvb %ymm7, %ymm6, %ymm5, %ymm5
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
; AVX2-NEXT: # ymm6 = mem[0,1,0,1]
-; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-NEXT: vpshufb %ymm6, %ymm10, %ymm7
-; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128]
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm9
-; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
+; AVX2-NEXT: vpshufb %ymm6, %ymm2, %ymm7
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128]
+; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX2-NEXT: vpshufb %ymm3, %ymm10, %ymm9
; AVX2-NEXT: vpor %ymm7, %ymm9, %ymm7
+; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u]
; AVX2-NEXT: vpblendvb %ymm9, %ymm7, %ymm4, %ymm4
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX2-NEXT: vpshufb %ymm6, %ymm15, %ymm6
-; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
; AVX2-NEXT: vpshufb %ymm3, %ymm12, %ymm7
-; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-NEXT: vpor %ymm6, %ymm7, %ymm6
+; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
; AVX2-NEXT: vpblendvb %ymm9, %ymm6, %ymm5, %ymm5
; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0]
@@ -7693,26 +7596,22 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vpblendvb %ymm6, %ymm5, %ymm3, %ymm3
; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[23],zero,ymm0[27,20,21,26],zero,ymm0[24],zero,ymm0[26,27,26,27],zero,ymm0[25]
-; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
; AVX2-NEXT: vmovdqa %ymm1, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm5 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero,zero,zero,ymm1[27],zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
; AVX2-NEXT: vpor %ymm4, %ymm5, %ymm4
; AVX2-NEXT: vpshufb {{.*#+}} ymm5 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm15[25],zero,ymm15[23],zero,zero,zero,zero,ymm15[26],zero,ymm15[24],zero,zero,zero,zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[25],zero,ymm12[23],zero,zero,zero,zero,ymm12[26],zero,ymm12[24],zero,zero,zero,zero,ymm12[27]
-; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
; AVX2-NEXT: vpor %ymm5, %ymm6, %ymm5
+; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0]
; AVX2-NEXT: vpblendvb %ymm3, %ymm4, %ymm5, %ymm4
-; AVX2-NEXT: vpshufb {{.*#+}} ymm5 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm14[25],zero,ymm14[23],zero,zero,zero,zero,ymm14[26],zero,ymm14[24],zero,zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm5 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm8[25],zero,ymm8[23],zero,zero,zero,zero,ymm8[26],zero,ymm8[24],zero,zero
; AVX2-NEXT: vpshufb {{.*#+}} ymm6 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm13[25],zero,ymm13[23],zero,zero,zero,zero,ymm13[26],zero,ymm13[24],zero,zero,zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
; AVX2-NEXT: vpor %ymm5, %ymm6, %ymm5
-; AVX2-NEXT: vpshufb {{.*#+}} ymm6 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX2-NEXT: vmovdqa %ymm8, %ymm14
+; AVX2-NEXT: vpshufb {{.*#+}} ymm6 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
+; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm7 = [0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u]
; AVX2-NEXT: vpblendvb %ymm7, %ymm5, %ymm6, %ymm5
; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = [0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
@@ -7725,10 +7624,11 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vpshufb %ymm7, %ymm4, %ymm8
; AVX2-NEXT: vpor %ymm5, %ymm8, %ymm5
; AVX2-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX2-NEXT: vpshufb %ymm8, %ymm10, %ymm9
-; AVX2-NEXT: vmovdqa %ymm10, %ymm3
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,0,128,14,128,128,128,128,1,128,15,128,128,128,128,128,128,16,128,30,128,128,128,128,17,128,31,128,128,128,128]
-; AVX2-NEXT: vpshufb %ymm10, %ymm2, %ymm11
+; AVX2-NEXT: vpshufb %ymm8, %ymm2, %ymm9
+; AVX2-NEXT: vmovdqa %ymm2, %ymm3
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,0,128,14,128,128,128,128,1,128,15,128,128,128,128,128,128,16,128,30,128,128,128,128,17,128,31,128,128,128,128]
+; AVX2-NEXT: vpshufb %ymm1, %ymm10, %ymm11
+; AVX2-NEXT: vmovdqa %ymm10, %ymm2
; AVX2-NEXT: vpor %ymm9, %ymm11, %ymm9
; AVX2-NEXT: vmovdqa {{.*#+}} ymm11 = [u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255]
; AVX2-NEXT: vpblendvb %ymm11, %ymm5, %ymm9, %ymm5
@@ -7737,7 +7637,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vpshufb %ymm7, %ymm0, %ymm7
; AVX2-NEXT: vpor %ymm6, %ymm7, %ymm6
; AVX2-NEXT: vpshufb %ymm8, %ymm15, %ymm7
-; AVX2-NEXT: vpshufb %ymm10, %ymm12, %ymm8
+; AVX2-NEXT: vpshufb %ymm1, %ymm12, %ymm8
; AVX2-NEXT: vpor %ymm7, %ymm8, %ymm7
; AVX2-NEXT: vpblendvb %ymm11, %ymm6, %ymm7, %ymm6
; AVX2-NEXT: vmovdqa {{.*#+}} ymm7 = [128,1,2,3,0,128,14,128,0,1,0,1,128,15,128,15,128,17,18,19,16,128,30,128,16,17,16,17,128,31,128,31]
@@ -7820,7 +7720,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX2-FP-LABEL: store_i8_stride7_vf64:
; AVX2-FP: # %bb.0:
-; AVX2-FP-NEXT: subq $648, %rsp # imm = 0x288
+; AVX2-FP-NEXT: subq $616, %rsp # imm = 0x268
; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX2-FP-NEXT: vmovdqa 32(%rsi), %ymm7
@@ -7832,20 +7732,18 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[23],zero,ymm1[27,20,21,26],zero,ymm1[24],zero,ymm1[26,27,26,27],zero,ymm1[25]
; AVX2-FP-NEXT: vmovdqa %ymm1, %ymm8
; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm7[23],zero,zero,zero,zero,ymm7[26],zero,ymm7[24],zero,zero,zero,zero,ymm7[27],zero
; AVX2-FP-NEXT: vmovdqa %ymm7, %ymm9
; AVX2-FP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
; AVX2-FP-NEXT: vmovdqa %ymm2, %ymm7
; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero,ymm6[27]
; AVX2-FP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm2 = [u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0]
; AVX2-FP-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25,22,23,22,23,24,25,26,27,24,25,30,31]
@@ -7865,15 +7763,13 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[17,18,19,30],zero,ymm6[28],zero,ymm6[28,29,30,31],zero,ymm6[29],zero,ymm6[31]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm7[30],zero,ymm7[28],zero,zero,zero,zero,ymm7[31],zero,ymm7[29],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm9[30],zero,ymm9[28],zero,zero,zero,zero,ymm9[31],zero,ymm9[29],zero,zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[30],zero,ymm8[28],zero,zero,zero,zero,ymm8[31],zero,ymm8[29],zero,zero,zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm2 = [255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u]
; AVX2-FP-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,26,27,30,31,30,31,28,29,28,29,28,29,28,29]
@@ -8052,184 +7948,167 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovdqa (%rdx), %ymm1
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[17,18,19,30],zero,ymm1[28],zero,ymm1[28,29,30,31],zero,ymm1[29],zero,ymm1[31]
-; AVX2-FP-NEXT: vmovdqa %ymm1, %ymm4
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX2-FP-NEXT: vmovdqa (%rcx), %ymm2
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero
-; AVX2-FP-NEXT: vmovdqa %ymm2, %ymm10
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-FP-NEXT: vmovdqa %ymm1, %ymm3
+; AVX2-FP-NEXT: vmovdqa (%rcx), %ymm5
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero
; AVX2-FP-NEXT: vpor %ymm0, %ymm1, %ymm2
-; AVX2-FP-NEXT: vmovdqa (%rsi), %ymm1
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero
-; AVX2-FP-NEXT: vmovdqa %ymm1, %ymm12
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm3 = ymm0[2,3,2,3]
-; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm5
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero,zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX2-FP-NEXT: vpor %ymm3, %ymm6, %ymm3
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm0 = [255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u]
-; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm2, %ymm3, %ymm6
-; AVX2-FP-NEXT: vmovdqa (%r8), %ymm0
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm2 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,ymm0[27,28,29,30],zero,ymm0[28],zero,ymm0[26,27,30,31],zero,ymm0[29]
-; AVX2-FP-NEXT: vmovdqa %ymm0, %ymm1
-; AVX2-FP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
+; AVX2-FP-NEXT: vmovdqa (%rsi), %ymm0
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm4 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero,ymm0[29],zero,zero,zero
+; AVX2-FP-NEXT: vmovdqa %ymm0, %ymm11
+; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm13
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm13[30],zero,ymm13[28],zero,zero,zero,zero,ymm13[31],zero,ymm13[29],zero,zero,zero,zero
+; AVX2-FP-NEXT: vpor %ymm4, %ymm6, %ymm4
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX2-FP-NEXT: vmovdqa (%r9), %ymm0
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm0[27],zero,zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero
-; AVX2-FP-NEXT: vmovdqa %ymm0, %ymm3
-; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm0 = [255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u]
+; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm2, %ymm4, %ymm6
+; AVX2-FP-NEXT: vmovdqa (%r8), %ymm14
+; AVX2-FP-NEXT: vmovdqa (%r9), %ymm1
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm2 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,ymm14[27,28,29,30],zero,ymm14[28],zero,ymm14[26,27,30,31],zero,ymm14[29]
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm7 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero
+; AVX2-FP-NEXT: vmovdqu %ymm1, (%rsp) # 32-byte Spill
; AVX2-FP-NEXT: vpor %ymm2, %ymm7, %ymm7
; AVX2-FP-NEXT: vmovdqa (%rax), %ymm0
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX2-FP-NEXT: vmovdqa %ymm0, %ymm11
+; AVX2-FP-NEXT: vmovdqa %ymm0, %ymm10
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm9 = [u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0]
; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm8 = [255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0]
; AVX2-FP-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm0
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[23],zero,ymm5[27,20,21,26],zero,ymm5[24],zero,ymm5[26,27,26,27],zero,ymm5[25]
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm13[23],zero,ymm13[27,20,21,26],zero,ymm13[24],zero,ymm13[26,27,26,27],zero,ymm13[25]
+; AVX2-FP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm11[23],zero,zero,zero,zero,ymm11[26],zero,ymm11[24],zero,zero,zero,zero,ymm11[27],zero
+; AVX2-FP-NEXT: vmovdqa %ymm11, %ymm2
+; AVX2-FP-NEXT: vpor %ymm6, %ymm8, %ymm6
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm5[25],zero,ymm5[23],zero,zero,zero,zero,ymm5[26],zero,ymm5[24],zero,zero,zero,zero
+; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27]
+; AVX2-FP-NEXT: vpor %ymm8, %ymm9, %ymm8
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm7 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm12[23],zero,zero,zero,zero,ymm12[26],zero,ymm12[24],zero,zero,zero,zero,ymm12[27],zero
-; AVX2-FP-NEXT: vmovdqa %ymm12, %ymm14
-; AVX2-FP-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX2-FP-NEXT: vpor %ymm6, %ymm7, %ymm6
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm7 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm10[25],zero,ymm10[23],zero,zero,zero,zero,ymm10[26],zero,ymm10[24],zero,zero,zero,zero
-; AVX2-FP-NEXT: vmovdqa %ymm10, %ymm13
-; AVX2-FP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX2-FP-NEXT: vmovdqa %ymm4, %ymm2
-; AVX2-FP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[25],zero,ymm4[23],zero,zero,zero,zero,ymm4[26],zero,ymm4[24],zero,zero,zero,zero,ymm4[27]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX2-FP-NEXT: vpor %ymm7, %ymm8, %ymm7
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm0 = [u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0]
-; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm6, %ymm7, %ymm6
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm7 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX2-FP-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm6, %ymm8, %ymm6
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm9 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm14[25],zero,ymm14[23],zero,zero,zero,zero,ymm14[26],zero,ymm14[24],zero,zero,zero
+; AVX2-FP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm9 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm9 = [0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u]
-; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm8 = [0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
-; AVX2-FP-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm0
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm10 = [0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u]
+; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm8
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm9 = [0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
+; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm6, %ymm8, %ymm0
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
-; AVX2-FP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
+; AVX2-FP-NEXT: # ymm9 = mem[0,1,0,1]
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm8, %ymm0, %ymm7
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128]
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm9, %ymm6, %ymm10
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX2-FP-NEXT: vpor %ymm7, %ymm10, %ymm7
-; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128]
-; AVX2-FP-NEXT: # ymm10 = mem[0,1,0,1]
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm10, %ymm4, %ymm11
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128]
+; AVX2-FP-NEXT: vpshufb %ymm9, %ymm0, %ymm8
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128]
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm10, %ymm0, %ymm11
+; AVX2-FP-NEXT: vpor %ymm8, %ymm11, %ymm8
+; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128]
+; AVX2-FP-NEXT: # ymm11 = mem[0,1,0,1]
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm11, %ymm7, %ymm12
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128]
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm12, %ymm1, %ymm15
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
-; AVX2-FP-NEXT: vpor %ymm11, %ymm15, %ymm11
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm15 = [255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u]
-; AVX2-FP-NEXT: vpblendvb %ymm15, %ymm7, %ymm11, %ymm0
-; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpshufb %ymm8, %ymm13, %ymm8
+; AVX2-FP-NEXT: vpshufb %ymm0, %ymm1, %ymm15
+; AVX2-FP-NEXT: vpor %ymm12, %ymm15, %ymm12
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX2-FP-NEXT: vpshufb %ymm9, %ymm2, %ymm9
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX2-FP-NEXT: vpor %ymm8, %ymm9, %ymm8
-; AVX2-FP-NEXT: vpshufb %ymm10, %ymm14, %ymm9
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX2-FP-NEXT: vpshufb %ymm12, %ymm5, %ymm10
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm15 = [255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u]
+; AVX2-FP-NEXT: vpblendvb %ymm15, %ymm8, %ymm12, %ymm4
+; AVX2-FP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpshufb %ymm9, %ymm5, %ymm9
+; AVX2-FP-NEXT: vpshufb %ymm10, %ymm3, %ymm10
; AVX2-FP-NEXT: vpor %ymm9, %ymm10, %ymm9
-; AVX2-FP-NEXT: vpblendvb %ymm15, %ymm8, %ymm9, %ymm8
-; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
-; AVX2-FP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm9, %ymm14, %ymm10
+; AVX2-FP-NEXT: vmovdqa %ymm2, %ymm3
+; AVX2-FP-NEXT: vpshufb %ymm11, %ymm2, %ymm10
+; AVX2-FP-NEXT: vpshufb %ymm0, %ymm13, %ymm11
+; AVX2-FP-NEXT: vpor %ymm10, %ymm11, %ymm10
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128]
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm11, %ymm3, %ymm12
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX2-FP-NEXT: vpor %ymm10, %ymm12, %ymm10
-; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm12 = [20,21,18,19,18,19,20,21,18,19,20,21,28,29,30,31,20,21,18,19,18,19,20,21,18,19,20,21,28,29,30,31]
-; AVX2-FP-NEXT: # ymm12 = mem[0,1,0,1]
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm12, %ymm13, %ymm15
+; AVX2-FP-NEXT: vpblendvb %ymm15, %ymm9, %ymm10, %ymm9
+; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
+; AVX2-FP-NEXT: # ymm10 = mem[0,1,0,1]
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm10, %ymm8, %ymm11
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128]
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm12, %ymm4, %ymm13
+; AVX2-FP-NEXT: vpor %ymm11, %ymm13, %ymm11
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
+; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [20,21,18,19,18,19,20,21,18,19,20,21,28,29,30,31,20,21,18,19,18,19,20,21,18,19,20,21,28,29,30,31]
+; AVX2-FP-NEXT: # ymm13 = mem[0,1,0,1]
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm13, %ymm6, %ymm15
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,2]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm0 = [u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255]
-; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm10, %ymm15, %ymm10
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm9, %ymm15, %ymm9
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX2-FP-NEXT: vmovdqu (%rsp), %ymm2 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm11, %ymm2, %ymm11
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX2-FP-NEXT: vpor %ymm9, %ymm11, %ymm9
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm12, %ymm7, %ymm11
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,2]
-; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm9, %ymm11, %ymm0
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0]
-; AVX2-FP-NEXT: vpblendvb %ymm9, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX2-FP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm8, %ymm0, %ymm8
+; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm11, %ymm15, %ymm11
+; AVX2-FP-NEXT: vmovdqu (%rsp), %ymm15 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm10, %ymm15, %ymm10
+; AVX2-FP-NEXT: vpshufb %ymm12, %ymm14, %ymm12
+; AVX2-FP-NEXT: vpor %ymm10, %ymm12, %ymm10
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm13, %ymm2, %ymm12
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,3,2]
+; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm10, %ymm12, %ymm0
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0]
+; AVX2-FP-NEXT: vpblendvb %ymm10, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
+; AVX2-FP-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm9, %ymm0, %ymm9
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX2-FP-NEXT: vpshufb %ymm0, %ymm4, %ymm9
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm10 = [0,128,14,128,128,128,128,1,128,15,128,128,128,128,2,128,16,128,30,128,128,128,128,17,128,31,128,128,128,128,18,128]
-; AVX2-FP-NEXT: vpshufb %ymm10, %ymm1, %ymm11
-; AVX2-FP-NEXT: vpor %ymm9, %ymm11, %ymm9
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm0, %ymm7, %ymm10
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm11 = [0,128,14,128,128,128,128,1,128,15,128,128,128,128,2,128,16,128,30,128,128,128,128,17,128,31,128,128,128,128,18,128]
; AVX2-FP-NEXT: vpshufb %ymm11, %ymm1, %ymm12
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,0,128,14,128,128,128,128,1,128,15,128,128,128,128,128,128,16,128,30,128,128,128,128,17,128,31,128,128,128,128]
-; AVX2-FP-NEXT: vpshufb %ymm4, %ymm6, %ymm6
-; AVX2-FP-NEXT: vpor %ymm6, %ymm12, %ymm6
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255]
-; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm9, %ymm6, %ymm9
+; AVX2-FP-NEXT: vpor %ymm10, %ymm12, %ymm10
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm0, %ymm1, %ymm0
-; AVX2-FP-NEXT: vpshufb %ymm10, %ymm5, %ymm1
-; AVX2-FP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-FP-NEXT: vpshufb %ymm12, %ymm1, %ymm13
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm14 = [128,128,0,128,14,128,128,128,128,1,128,15,128,128,128,128,128,128,16,128,30,128,128,128,128,17,128,31,128,128,128,128]
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm14, %ymm1, %ymm7
+; AVX2-FP-NEXT: vpor %ymm7, %ymm13, %ymm7
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm13 = [u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255]
+; AVX2-FP-NEXT: vpblendvb %ymm13, %ymm10, %ymm7, %ymm10
+; AVX2-FP-NEXT: vpshufb %ymm0, %ymm3, %ymm0
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX2-FP-NEXT: vpshufb %ymm11, %ymm1, %ymm1
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm4, %ymm5, %ymm4
-; AVX2-FP-NEXT: vpor %ymm1, %ymm4, %ymm1
-; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm0, %ymm1, %ymm0
+; AVX2-FP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-FP-NEXT: vpshufb %ymm12, %ymm5, %ymm1
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm14, %ymm3, %ymm3
+; AVX2-FP-NEXT: vpor %ymm1, %ymm3, %ymm1
+; AVX2-FP-NEXT: vpblendvb %ymm13, %ymm0, %ymm1, %ymm0
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm1 = [128,1,2,3,0,128,14,128,0,1,0,1,128,15,128,15,128,17,18,19,16,128,30,128,16,17,16,17,128,31,128,31]
-; AVX2-FP-NEXT: vpshufb %ymm1, %ymm3, %ymm4
+; AVX2-FP-NEXT: vpshufb %ymm1, %ymm4, %ymm3
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm5 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX2-FP-NEXT: vpshufb %ymm5, %ymm14, %ymm6
-; AVX2-FP-NEXT: vpor %ymm4, %ymm6, %ymm4
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm6 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX2-FP-NEXT: vpshufb %ymm6, %ymm13, %ymm10
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm11 = [255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u]
-; AVX2-FP-NEXT: vpblendvb %ymm11, %ymm4, %ymm10, %ymm4
-; AVX2-FP-NEXT: vpshufb %ymm1, %ymm2, %ymm1
-; AVX2-FP-NEXT: vpshufb %ymm5, %ymm15, %ymm3
-; AVX2-FP-NEXT: vpor %ymm1, %ymm3, %ymm1
-; AVX2-FP-NEXT: vpshufb %ymm6, %ymm7, %ymm2
-; AVX2-FP-NEXT: vpblendvb %ymm11, %ymm1, %ymm2, %ymm1
+; AVX2-FP-NEXT: vpshufb %ymm5, %ymm8, %ymm7
+; AVX2-FP-NEXT: vpor %ymm3, %ymm7, %ymm3
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm7 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX2-FP-NEXT: vpshufb %ymm7, %ymm6, %ymm11
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u]
+; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm3, %ymm11, %ymm3
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm1, %ymm4, %ymm1
+; AVX2-FP-NEXT: vpshufb %ymm5, %ymm15, %ymm4
+; AVX2-FP-NEXT: vpor %ymm1, %ymm4, %ymm1
+; AVX2-FP-NEXT: vpshufb %ymm7, %ymm2, %ymm2
+; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm1, %ymm2, %ymm1
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255]
-; AVX2-FP-NEXT: vpblendvb %ymm2, %ymm9, %ymm4, %ymm3
+; AVX2-FP-NEXT: vpblendvb %ymm2, %ymm10, %ymm3, %ymm3
; AVX2-FP-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX2-FP-NEXT: vmovdqa %ymm0, 96(%rax)
; AVX2-FP-NEXT: vmovdqa %ymm3, 320(%rax)
-; AVX2-FP-NEXT: vmovdqa %ymm8, 128(%rax)
+; AVX2-FP-NEXT: vmovdqa %ymm9, 128(%rax)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 352(%rax)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -8252,13 +8131,13 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vmovaps %ymm0, 416(%rax)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 384(%rax)
-; AVX2-FP-NEXT: addq $648, %rsp # imm = 0x288
+; AVX2-FP-NEXT: addq $616, %rsp # imm = 0x268
; AVX2-FP-NEXT: vzeroupper
; AVX2-FP-NEXT: retq
;
; AVX2-FCP-LABEL: store_i8_stride7_vf64:
; AVX2-FCP: # %bb.0:
-; AVX2-FCP-NEXT: subq $648, %rsp # imm = 0x288
+; AVX2-FCP-NEXT: subq $616, %rsp # imm = 0x268
; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX2-FCP-NEXT: vmovdqa 32(%rsi), %ymm7
@@ -8270,20 +8149,18 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[23],zero,ymm1[27,20,21,26],zero,ymm1[24],zero,ymm1[26,27,26,27],zero,ymm1[25]
; AVX2-FCP-NEXT: vmovdqa %ymm1, %ymm8
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm7[23],zero,zero,zero,zero,ymm7[26],zero,ymm7[24],zero,zero,zero,zero,ymm7[27],zero
; AVX2-FCP-NEXT: vmovdqa %ymm7, %ymm9
; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
; AVX2-FCP-NEXT: vmovdqa %ymm2, %ymm7
; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero,ymm6[27]
; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0]
; AVX2-FCP-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25,22,23,22,23,24,25,26,27,24,25,30,31]
@@ -8303,15 +8180,13 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[17,18,19,30],zero,ymm6[28],zero,ymm6[28,29,30,31],zero,ymm6[29],zero,ymm6[31]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm7[30],zero,ymm7[28],zero,zero,zero,zero,ymm7[31],zero,ymm7[29],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm9[30],zero,ymm9[28],zero,zero,zero,zero,ymm9[31],zero,ymm9[29],zero,zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[30],zero,ymm8[28],zero,zero,zero,zero,ymm8[31],zero,ymm8[29],zero,zero,zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u]
; AVX2-FCP-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,26,27,30,31,30,31,28,29,28,29,28,29,28,29]
@@ -8332,7 +8207,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX2-FCP-NEXT: vmovdqa %xmm1, %xmm14
-; AVX2-FCP-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
@@ -8349,7 +8224,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm12
; AVX2-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
; AVX2-FCP-NEXT: vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vmovdqa %xmm11, (%rsp) # 16-byte Spill
+; AVX2-FCP-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX2-FCP-NEXT: vmovdqa (%rcx), %xmm2
; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -8445,7 +8320,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm10[8],xmm7[8],xmm10[9],xmm7[9],xmm10[10],xmm7[10],xmm10[11],xmm7[11],xmm10[12],xmm7[12],xmm10[13],xmm7[13],xmm10[14],xmm7[14],xmm10[15],xmm7[15]
-; AVX2-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX2-FCP-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
; AVX2-FCP-NEXT: vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm1 = xmm1[8],mem[8],xmm1[9],mem[9],xmm1[10],mem[10],xmm1[11],mem[11],xmm1[12],mem[12],xmm1[13],mem[13],xmm1[14],mem[14],xmm1[15],mem[15]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
@@ -8458,7 +8333,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vpblendvb %ymm6, %ymm0, %ymm1, %ymm0
; AVX2-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX2-FCP-NEXT: vmovdqa (%rsp), %xmm2 # 16-byte Reload
+; AVX2-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; AVX2-FCP-NEXT: vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm2 = xmm2[8],mem[8],xmm2[9],mem[9],xmm2[10],mem[10],xmm2[11],mem[11],xmm2[12],mem[12],xmm2[13],mem[13],xmm2[14],mem[14],xmm2[15],mem[15]
; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm2, %xmm2
@@ -8488,186 +8363,167 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa (%rdx), %ymm1
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[17,18,19,30],zero,ymm1[28],zero,ymm1[28,29,30,31],zero,ymm1[29],zero,ymm1[31]
-; AVX2-FCP-NEXT: vmovdqa %ymm1, %ymm4
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX2-FCP-NEXT: vmovdqa (%rcx), %ymm2
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero
-; AVX2-FCP-NEXT: vmovdqa %ymm2, %ymm10
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-FCP-NEXT: vmovdqa %ymm1, %ymm3
+; AVX2-FCP-NEXT: vmovdqa (%rcx), %ymm5
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero
; AVX2-FCP-NEXT: vpor %ymm0, %ymm1, %ymm2
-; AVX2-FCP-NEXT: vmovdqa (%rsi), %ymm5
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm0[2,3,2,3]
-; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm0
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero,ymm0[29],zero,zero,zero,zero
-; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm12
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm3, %ymm6, %ymm3
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u]
-; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm2, %ymm3, %ymm6
-; AVX2-FCP-NEXT: vmovdqa (%r8), %ymm0
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,ymm0[27,28,29,30],zero,ymm0[28],zero,ymm0[26,27,30,31],zero,ymm0[29]
-; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm1
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa (%rsi), %ymm0
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero,ymm0[29],zero,zero,zero
+; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm11
+; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm13
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm13[30],zero,ymm13[28],zero,zero,zero,zero,ymm13[31],zero,ymm13[29],zero,zero,zero,zero
+; AVX2-FCP-NEXT: vpor %ymm4, %ymm6, %ymm4
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX2-FCP-NEXT: vmovdqa (%r9), %ymm0
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm0[27],zero,zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero
-; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm3
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u]
+; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm2, %ymm4, %ymm6
+; AVX2-FCP-NEXT: vmovdqa (%r8), %ymm14
+; AVX2-FCP-NEXT: vmovdqa (%r9), %ymm1
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,ymm14[27,28,29,30],zero,ymm14[28],zero,ymm14[26,27,30,31],zero,ymm14[29]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero
+; AVX2-FCP-NEXT: vmovdqu %ymm1, (%rsp) # 32-byte Spill
; AVX2-FCP-NEXT: vpor %ymm2, %ymm7, %ymm7
; AVX2-FCP-NEXT: vmovdqa (%rax), %ymm0
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm11
-; AVX2-FCP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm10
+; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0]
; AVX2-FCP-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0]
; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[23],zero,ymm12[27,20,21,26],zero,ymm12[24],zero,ymm12[26,27,26,27],zero,ymm12[25]
-; AVX2-FCP-NEXT: vmovdqa %ymm12, %ymm13
-; AVX2-FCP-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm13[23],zero,ymm13[27,20,21,26],zero,ymm13[24],zero,ymm13[26,27,26,27],zero,ymm13[25]
+; AVX2-FCP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm11[23],zero,zero,zero,zero,ymm11[26],zero,ymm11[24],zero,zero,zero,zero,ymm11[27],zero
+; AVX2-FCP-NEXT: vmovdqa %ymm11, %ymm2
+; AVX2-FCP-NEXT: vpor %ymm6, %ymm8, %ymm6
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm5[25],zero,ymm5[23],zero,zero,zero,zero,ymm5[26],zero,ymm5[24],zero,zero,zero,zero
+; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27]
+; AVX2-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm5[23],zero,zero,zero,zero,ymm5[26],zero,ymm5[24],zero,zero,zero,zero,ymm5[27],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm6, %ymm7, %ymm6
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm10[25],zero,ymm10[23],zero,zero,zero,zero,ymm10[26],zero,ymm10[24],zero,zero,zero,zero
-; AVX2-FCP-NEXT: vmovdqa %ymm10, %ymm14
-; AVX2-FCP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX2-FCP-NEXT: vmovdqa %ymm4, %ymm2
-; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[25],zero,ymm4[23],zero,zero,zero,zero,ymm4[26],zero,ymm4[24],zero,zero,zero,zero,ymm4[27]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0]
-; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm6, %ymm7, %ymm6
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm6, %ymm8, %ymm6
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm14[25],zero,ymm14[23],zero,zero,zero,zero,ymm14[26],zero,ymm14[24],zero,zero,zero
+; AVX2-FCP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u]
-; AVX2-FCP-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
-; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm0
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u]
+; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm8
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
+; AVX2-FCP-NEXT: vpblendvb %ymm9, %ymm6, %ymm8, %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
-; AVX2-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
+; AVX2-FCP-NEXT: # ymm9 = mem[0,1,0,1]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm7
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm6, %ymm10
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm7, %ymm10, %ymm7
-; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128]
-; AVX2-FCP-NEXT: # ymm10 = mem[0,1,0,1]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm4, %ymm11
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128]
+; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm0, %ymm8
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm0, %ymm11
+; AVX2-FCP-NEXT: vpor %ymm8, %ymm11, %ymm8
+; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128]
+; AVX2-FCP-NEXT: # ymm11 = mem[0,1,0,1]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm7, %ymm12
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm1, %ymm15
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm11, %ymm15, %ymm11
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u]
-; AVX2-FCP-NEXT: vpblendvb %ymm15, %ymm7, %ymm11, %ymm0
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm14, %ymm8
+; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm1, %ymm15
+; AVX2-FCP-NEXT: vpor %ymm12, %ymm15, %ymm12
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm2, %ymm9
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm5, %ymm9
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm13, %ymm10
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm9, %ymm10, %ymm9
-; AVX2-FCP-NEXT: vpblendvb %ymm15, %ymm8, %ymm9, %ymm8
-; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
-; AVX2-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm14, %ymm10
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm12
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm10, %ymm12, %ymm10
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} ymm12 = ymm13[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm15 = [4,5,4,5,5,7,4,5]
-; AVX2-FCP-NEXT: vpermd %ymm12, %ymm15, %ymm12
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255]
-; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm10, %ymm12, %ymm10
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm2, %ymm9
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u]
+; AVX2-FCP-NEXT: vpblendvb %ymm15, %ymm8, %ymm12, %ymm4
+; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm9
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm3, %ymm10
+; AVX2-FCP-NEXT: vpor %ymm9, %ymm10, %ymm9
+; AVX2-FCP-NEXT: vmovdqa %ymm2, %ymm3
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm2, %ymm10
+; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm13, %ymm11
+; AVX2-FCP-NEXT: vpor %ymm10, %ymm11, %ymm10
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
+; AVX2-FCP-NEXT: vpblendvb %ymm15, %ymm9, %ymm10, %ymm9
+; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
+; AVX2-FCP-NEXT: # ymm10 = mem[0,1,0,1]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm11
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm2, %ymm11
+; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm2, %ymm13
+; AVX2-FCP-NEXT: vpor %ymm11, %ymm13, %ymm11
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm9, %ymm11, %ymm9
-; AVX2-FCP-NEXT: vmovdqu (%rsp), %ymm7 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} ymm11 = ymm7[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX2-FCP-NEXT: vpermd %ymm11, %ymm15, %ymm11
-; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm9, %ymm11, %ymm0
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0]
-; AVX2-FCP-NEXT: vpblendvb %ymm9, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendvb %ymm9, %ymm8, %ymm0, %ymm8
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} ymm13 = ymm6[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm15 = [4,5,4,5,5,7,4,5]
+; AVX2-FCP-NEXT: vpermd %ymm13, %ymm15, %ymm13
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255]
+; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm11, %ymm13, %ymm11
+; AVX2-FCP-NEXT: vmovdqu (%rsp), %ymm4 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm4, %ymm10
+; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm14, %ymm12
+; AVX2-FCP-NEXT: vpor %ymm10, %ymm12, %ymm10
+; AVX2-FCP-NEXT: vpshuflw $150, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm12 = mem[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX2-FCP-NEXT: vpermd %ymm12, %ymm15, %ymm12
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
+; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm10, %ymm12, %ymm0
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0]
+; AVX2-FCP-NEXT: vpblendvb %ymm10, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm14 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm9, %ymm0, %ymm9
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm9
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [0,128,14,128,128,128,128,1,128,15,128,128,128,128,2,128,16,128,30,128,128,128,128,17,128,31,128,128,128,128,18,128]
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm1, %ymm11
-; AVX2-FCP-NEXT: vpor %ymm9, %ymm11, %ymm9
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm7, %ymm10
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [0,128,14,128,128,128,128,1,128,15,128,128,128,128,2,128,16,128,30,128,128,128,128,17,128,31,128,128,128,128,18,128]
; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm12
+; AVX2-FCP-NEXT: vpor %ymm10, %ymm12, %ymm10
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm1, %ymm13
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [128,128,0,128,14,128,128,128,128,1,128,15,128,128,128,128,128,128,16,128,30,128,128,128,128,17,128,31,128,128,128,128]
-; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm6, %ymm6
-; AVX2-FCP-NEXT: vpor %ymm6, %ymm12, %ymm6
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255]
-; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm9, %ymm6, %ymm9
-; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm5, %ymm0
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm1, %ymm1
-; AVX2-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm1, %ymm7
+; AVX2-FCP-NEXT: vpor %ymm7, %ymm13, %ymm7
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255]
+; AVX2-FCP-NEXT: vpblendvb %ymm13, %ymm10, %ymm7, %ymm10
+; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm3, %ymm0
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm1
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm4, %ymm4
-; AVX2-FCP-NEXT: vpor %ymm1, %ymm4, %ymm1
-; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm0, %ymm1, %ymm0
+; AVX2-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm5, %ymm1
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm3, %ymm3
+; AVX2-FCP-NEXT: vpor %ymm1, %ymm3, %ymm1
+; AVX2-FCP-NEXT: vpblendvb %ymm13, %ymm0, %ymm1, %ymm0
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [128,1,2,3,0,128,14,128,0,1,0,1,128,15,128,15,128,17,18,19,16,128,30,128,16,17,16,17,128,31,128,31]
-; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm3, %ymm4
+; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm3
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm14, %ymm6
-; AVX2-FCP-NEXT: vpor %ymm4, %ymm6, %ymm4
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm13, %ymm10
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u]
-; AVX2-FCP-NEXT: vpblendvb %ymm11, %ymm4, %ymm10, %ymm4
+; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm8, %ymm7
+; AVX2-FCP-NEXT: vpor %ymm3, %ymm7, %ymm3
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm6, %ymm11
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u]
+; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm3, %ymm11, %ymm3
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm4, %ymm4
+; AVX2-FCP-NEXT: vpor %ymm1, %ymm4, %ymm1
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm3
-; AVX2-FCP-NEXT: vpor %ymm1, %ymm3, %ymm1
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm7, %ymm2
-; AVX2-FCP-NEXT: vpblendvb %ymm11, %ymm1, %ymm2, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm1, %ymm2, %ymm1
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255]
-; AVX2-FCP-NEXT: vpblendvb %ymm2, %ymm9, %ymm4, %ymm3
+; AVX2-FCP-NEXT: vpblendvb %ymm2, %ymm10, %ymm3, %ymm3
; AVX2-FCP-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX2-FCP-NEXT: vmovdqa %ymm0, 96(%rax)
; AVX2-FCP-NEXT: vmovdqa %ymm3, 320(%rax)
-; AVX2-FCP-NEXT: vmovdqa %ymm8, 128(%rax)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm0, 352(%rax)
+; AVX2-FCP-NEXT: vmovdqa %ymm9, 128(%rax)
+; AVX2-FCP-NEXT: vmovdqa %ymm14, 352(%rax)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 160(%rax)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -8688,1747 +8544,1675 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovaps %ymm0, 416(%rax)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 384(%rax)
-; AVX2-FCP-NEXT: addq $648, %rsp # imm = 0x288
+; AVX2-FCP-NEXT: addq $616, %rsp # imm = 0x268
; AVX2-FCP-NEXT: vzeroupper
; AVX2-FCP-NEXT: retq
;
; AVX512-LABEL: store_i8_stride7_vf64:
; AVX512: # %bb.0:
-; AVX512-NEXT: subq $1448, %rsp # imm = 0x5A8
-; AVX512-NEXT: vmovdqa (%rsi), %ymm1
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero,zero,zero,ymm1[18]
-; AVX512-NEXT: vmovdqa %ymm1, %ymm10
-; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: subq $1720, %rsp # imm = 0x6B8
+; AVX512-NEXT: vmovdqa (%rsi), %ymm7
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512-NEXT: vpshufb %ymm2, %ymm7, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm2, %ymm26
; AVX512-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[0,1,14],zero,ymm2[12,13,0,1,14,15],zero,ymm2[3,12,13,2,3,16],zero,ymm2[30,31,28,29,16,17],zero,ymm2[31,18,19,28,29,18],zero
-; AVX512-NEXT: vmovdqa %ymm2, %ymm14
-; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512-NEXT: vpshufb %ymm3, %ymm2, %ymm1
+; AVX512-NEXT: vmovdqa64 %ymm3, %ymm25
+; AVX512-NEXT: vmovdqa64 %ymm2, %ymm18
; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vmovdqa (%rcx), %ymm6
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512-NEXT: vpshufb %ymm1, %ymm6, %ymm0
-; AVX512-NEXT: vmovdqa64 %ymm1, %ymm23
+; AVX512-NEXT: vmovdqa (%rcx), %ymm14
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512-NEXT: vpshufb %ymm2, %ymm14, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm2, %ymm28
; AVX512-NEXT: vmovdqa (%rdx), %ymm8
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512-NEXT: vpshufb %ymm2, %ymm8, %ymm1
-; AVX512-NEXT: vmovdqa64 %ymm2, %ymm17
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512-NEXT: vpshufb %ymm3, %ymm8, %ymm1
+; AVX512-NEXT: vmovdqa64 %ymm3, %ymm23
+; AVX512-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vmovdqa (%r8), %ymm0
-; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vmovdqa (%r8), %ymm15
; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512-NEXT: vpshufb %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vmovdqa64 %ymm2, %ymm18
-; AVX512-NEXT: vmovdqa (%r9), %ymm1
-; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vpshufb %ymm2, %ymm15, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm2, %ymm24
+; AVX512-NEXT: vmovdqa (%r9), %ymm2
; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0,13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0]
; AVX512-NEXT: # ymm3 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; AVX512-NEXT: vmovdqa64 %ymm3, %ymm22
-; AVX512-NEXT: vporq %ymm0, %ymm1, %ymm24
-; AVX512-NEXT: vmovdqa 32(%r9), %ymm11
-; AVX512-NEXT: vmovdqa 32(%r8), %ymm1
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
-; AVX512-NEXT: vmovdqa %ymm1, %ymm13
-; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm11[25],zero,ymm11[23],zero,zero,zero,zero,ymm11[26],zero,ymm11[24],zero,zero
+; AVX512-NEXT: vpshufb %ymm3, %ymm2, %ymm1
+; AVX512-NEXT: vmovdqa64 %ymm3, %ymm27
+; AVX512-NEXT: vmovdqa64 %ymm2, %ymm19
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vmovdqa 32(%rsi), %ymm10
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [128,128,128,30,128,28,128,128,128,128,31,128,29,128,128,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,128,128]
+; AVX512-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm6, %ymm10, %ymm0
+; AVX512-NEXT: vmovdqa 32(%rdi), %ymm9
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm9[23],zero,zero,zero,zero,ymm9[26],zero,ymm9[24],zero,zero,zero,zero,ymm9[27],zero,ymm9[25]
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [12,13,14,128,12,128,14,15,14,15,128,13,128,15,12,13,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29]
+; AVX512-NEXT: vpshufb %ymm1, %ymm9, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm1, %ymm16
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm10[23,u,u,u],zero,ymm10[26],zero,ymm10[24,u,u,u],zero,ymm10[27],zero
; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqa 32(%rcx), %ymm7
-; AVX512-NEXT: vmovdqa 32(%rdx), %ymm9
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm9[30],zero,ymm9[28],zero,zero,zero,zero,ymm9[31],zero,ymm9[29],zero,zero
-; AVX512-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512-NEXT: vmovdqa 32(%rdx), %ymm5
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,128,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,128]
+; AVX512-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm1, %ymm5, %ymm2
+; AVX512-NEXT: vmovdqa 32(%rcx), %ymm4
+; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[25],zero,ymm4[23],zero,zero,zero,zero,ymm4[26],zero,ymm4[24],zero,zero,zero,zero
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [27,0,0,0,128,30,128,28,0,0,0,128,31,128,29,0,27,0,0,0,128,30,128,28,0,0,0,128,31,128,29,0]
; AVX512-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm0, %ymm7, %ymm2
-; AVX512-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqa 32(%rsi), %ymm5
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero,zero
-; AVX512-NEXT: vmovdqa 32(%rdi), %ymm4
-; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512-NEXT: # ymm3 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm3, %ymm4, %ymm2
-; AVX512-NEXT: vmovdqa64 %ymm4, %ymm20
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vpshufb %ymm0, %ymm4, %ymm2
+; AVX512-NEXT: vmovdqa64 %ymm0, %ymm20
+; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm5[23],zero,ymm5[21,22,23,26],zero,ymm5[24],zero,ymm5[28,29,26,27]
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vmovdqa 32(%r8), %ymm3
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29]
+; AVX512-NEXT: # ymm0 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm0, %ymm3, %ymm11
+; AVX512-NEXT: vmovdqa64 %ymm0, %ymm21
+; AVX512-NEXT: vmovdqa 32(%r9), %ymm2
+; AVX512-NEXT: vpshufb {{.*#+}} ymm12 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
+; AVX512-NEXT: vinserti64x4 $1, %ymm11, %zmm12, %zmm11
+; AVX512-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [29,128,27,0,0,0,128,30,128,28,0,0,0,128,31,128,29,128,27,0,0,0,128,30,128,28,0,0,0,128,31,128]
+; AVX512-NEXT: # ymm11 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm11, %ymm2, %ymm12
+; AVX512-NEXT: vpshufb {{.*#+}} ymm13 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm3[23],zero,ymm3[23,24,25,26],zero,ymm3[24],zero,ymm3[30,31]
+; AVX512-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm12
+; AVX512-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-NEXT: vmovdqa 32(%rax), %ymm4
-; AVX512-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm2, %ymm4, %ymm4
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm4, %zmm1
-; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpshufb %ymm3, %ymm14, %ymm1
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm10[21],zero,ymm10[19],zero,zero,zero,zero,ymm10[22],zero,ymm10[20],zero,zero
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
-; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb %ymm0, %ymm6, %ymm0
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: vmovdqa 32(%rax), %ymm0
+; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vpshufb {{.*#+}} ymm12 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm13 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm12
+; AVX512-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vpshufb %ymm6, %ymm7, %ymm6
+; AVX512-NEXT: vmovdqa64 %ymm7, %ymm17
+; AVX512-NEXT: vmovdqa64 %ymm18, %ymm13
+; AVX512-NEXT: vmovdqa64 %ymm16, %ymm0
+; AVX512-NEXT: vpshufb %ymm0, %ymm13, %ymm7
+; AVX512-NEXT: vpor %ymm6, %ymm7, %ymm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm8[23],zero,ymm8[21,22,23,26],zero,ymm8[24],zero,ymm8[28,29,26,27]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm8[18,19,20,21],zero,ymm8[19],zero,ymm8[25,26,27,22],zero,ymm8[20],zero
-; AVX512-NEXT: vmovdqa64 %ymm8, %ymm19
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: vpshufb %ymm1, %ymm8, %ymm6
+; AVX512-NEXT: vmovdqa64 %ymm20, %ymm0
+; AVX512-NEXT: vpshufb %ymm0, %ymm14, %ymm7
+; AVX512-NEXT: vmovdqa64 %ymm14, %ymm22
+; AVX512-NEXT: vpor %ymm6, %ymm7, %ymm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqa (%rax), %ymm1
-; AVX512-NEXT: vpshufb %ymm2, %ymm1, %ymm0
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm16 = [0,5,4,0,5,0,4,0,20,21,0,23,0,21,0,23]
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm2 = ymm1[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512-NEXT: vpermi2d %zmm0, %zmm2, %zmm16
-; AVX512-NEXT: vmovdqa 32(%rdx), %xmm3
-; AVX512-NEXT: vmovdqa 32(%rcx), %xmm0
+; AVX512-NEXT: vmovdqa64 %ymm21, %ymm0
+; AVX512-NEXT: vpshufb %ymm0, %ymm15, %ymm6
+; AVX512-NEXT: vmovdqa64 %ymm15, %ymm16
+; AVX512-NEXT: vmovdqa64 %ymm19, %ymm15
+; AVX512-NEXT: vpshufb %ymm11, %ymm15, %ymm7
+; AVX512-NEXT: vpor %ymm6, %ymm7, %ymm0
+; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vmovdqa 32(%rdx), %xmm0
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vmovdqa64 %xmm2, %xmm29
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm4 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512-NEXT: vpshufb %xmm4, %xmm3, %xmm2
-; AVX512-NEXT: vmovdqa64 %xmm4, %xmm31
-; AVX512-NEXT: vmovdqa64 %xmm3, %xmm30
-; AVX512-NEXT: vpor %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: vmovdqa 32(%rcx), %xmm1
+; AVX512-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm11 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512-NEXT: vpshufb %xmm11, %xmm1, %xmm6
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm12 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512-NEXT: vpshufb %xmm12, %xmm0, %xmm7
+; AVX512-NEXT: vpor %xmm6, %xmm7, %xmm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqa 32(%rdi), %xmm3
-; AVX512-NEXT: vmovdqa 32(%rsi), %xmm4
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm12 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512-NEXT: vpshufb %xmm12, %xmm4, %xmm0
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm15 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512-NEXT: vpshufb %xmm15, %xmm3, %xmm2
-; AVX512-NEXT: vmovdqa64 %xmm3, %xmm21
-; AVX512-NEXT: vpor %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: vmovdqa 32(%rdi), %xmm0
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqa 32(%rsi), %xmm14
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512-NEXT: vpshufb %xmm6, %xmm14, %xmm7
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512-NEXT: vpshufb %xmm1, %xmm0, %xmm8
+; AVX512-NEXT: vmovdqa64 %xmm1, %xmm18
+; AVX512-NEXT: vpor %xmm7, %xmm8, %xmm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm3 = [0,0,0,0,2,3,0,1,0,18,0,19,18,0,19,0]
-; AVX512-NEXT: vmovdqa 32(%rax), %xmm2
-; AVX512-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5,5,6]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512-NEXT: vpermi2d %zmm0, %zmm2, %zmm3
-; AVX512-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm1 = [0,0,0,0,2,3,0,1,0,18,0,19,18,0,19,0]
+; AVX512-NEXT: vmovdqa 32(%rax), %xmm0
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm7 = xmm0[0,1,2,3,4,5,5,6]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm0[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512-NEXT: vpermi2d %zmm7, %zmm8, %zmm1
+; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vmovdqa 32(%r9), %xmm0
-; AVX512-NEXT: vmovdqa 32(%r8), %xmm2
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512-NEXT: vpshufb %xmm6, %xmm0, %xmm8
-; AVX512-NEXT: vmovdqa64 %xmm0, %xmm28
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm14 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512-NEXT: vpshufb %xmm14, %xmm2, %xmm10
-; AVX512-NEXT: vmovdqa %xmm2, %xmm3
-; AVX512-NEXT: vporq %xmm8, %xmm10, %xmm26
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqa 32(%r8), %xmm1
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm8 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512-NEXT: vpshufb %xmm8, %xmm0, %xmm7
+; AVX512-NEXT: vmovdqa64 %xmm8, %xmm20
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512-NEXT: vpshufb %xmm0, %xmm1, %xmm8
+; AVX512-NEXT: vmovdqa64 %xmm0, %xmm30
+; AVX512-NEXT: vmovdqa64 %xmm1, %xmm29
+; AVX512-NEXT: vpor %xmm7, %xmm8, %xmm0
+; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vmovdqa64 %ymm28, %ymm0
+; AVX512-NEXT: vpshufb %ymm0, %ymm4, %ymm7
; AVX512-NEXT: vmovdqa64 %ymm23, %ymm0
-; AVX512-NEXT: vpshufb %ymm0, %ymm7, %ymm8
-; AVX512-NEXT: vmovdqa64 %ymm17, %ymm0
-; AVX512-NEXT: vpshufb %ymm0, %ymm9, %ymm10
-; AVX512-NEXT: vpor %ymm8, %ymm10, %ymm0
+; AVX512-NEXT: vpshufb %ymm0, %ymm5, %ymm8
+; AVX512-NEXT: vpor %ymm7, %ymm8, %ymm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,ymm5[14],zero,zero,zero,zero,zero,zero,ymm5[15],zero,zero,zero,zero,zero,zero,ymm5[16],zero,zero,zero,zero,zero,zero,ymm5[17],zero,zero,zero,zero,zero,zero,ymm5[18]
-; AVX512-NEXT: vmovdqa64 %ymm5, %ymm23
-; AVX512-NEXT: vmovdqa64 %ymm20, %ymm0
-; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = ymm0[0,1,14],zero,ymm0[12,13,0,1,14,15],zero,ymm0[3,12,13,2,3,16],zero,ymm0[30,31,28,29,16,17],zero,ymm0[31,18,19,28,29,18],zero
-; AVX512-NEXT: vpor %ymm8, %ymm10, %ymm2
-; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqa64 %ymm18, %ymm2
-; AVX512-NEXT: vpshufb %ymm2, %ymm13, %ymm8
-; AVX512-NEXT: vmovdqa64 %ymm22, %ymm2
-; AVX512-NEXT: vpshufb %ymm2, %ymm11, %ymm10
-; AVX512-NEXT: vmovdqa64 %ymm11, %ymm27
-; AVX512-NEXT: vpor %ymm8, %ymm10, %ymm2
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
+; AVX512-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm1, %ymm4, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm1, %ymm21
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
+; AVX512-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm4, %ymm5, %ymm1
+; AVX512-NEXT: vmovdqa64 %ymm4, %ymm19
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vmovdqa64 %ymm26, %ymm0
+; AVX512-NEXT: vpshufb %ymm0, %ymm10, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm25, %ymm1
+; AVX512-NEXT: vpshufb %ymm1, %ymm9, %ymm1
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128]
+; AVX512-NEXT: # ymm7 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm7, %ymm10, %ymm0
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [2,3,4,5,128,3,128,5,4,5,6,128,4,128,6,7,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
+; AVX512-NEXT: vpshufb %ymm1, %ymm9, %ymm4
+; AVX512-NEXT: vpor %ymm0, %ymm4, %ymm0
+; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
+; AVX512-NEXT: # ymm10 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm10, %ymm2, %ymm4
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
+; AVX512-NEXT: # ymm0 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm0, %ymm3, %ymm5
+; AVX512-NEXT: vpor %ymm4, %ymm5, %ymm4
+; AVX512-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vmovdqa64 %ymm24, %ymm4
+; AVX512-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512-NEXT: vmovdqa64 %ymm27, %ymm4
+; AVX512-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX512-NEXT: vpor %ymm3, %ymm2, %ymm2
; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqa (%rsi), %xmm2
-; AVX512-NEXT: vpshufb %xmm12, %xmm2, %xmm5
-; AVX512-NEXT: vmovdqa64 %xmm2, %xmm25
-; AVX512-NEXT: vmovdqa (%rdi), %xmm12
-; AVX512-NEXT: vpshufb %xmm15, %xmm12, %xmm9
-; AVX512-NEXT: vporq %xmm5, %xmm9, %xmm22
-; AVX512-NEXT: vmovdqa (%rcx), %xmm13
-; AVX512-NEXT: vmovdqa64 %xmm29, %xmm2
-; AVX512-NEXT: vpshufb %xmm2, %xmm13, %xmm7
-; AVX512-NEXT: vmovdqa (%rdx), %xmm9
-; AVX512-NEXT: vmovdqa64 %xmm31, %xmm2
-; AVX512-NEXT: vpshufb %xmm2, %xmm9, %xmm10
-; AVX512-NEXT: vpor %xmm7, %xmm10, %xmm2
+; AVX512-NEXT: vmovdqa (%rsi), %xmm3
+; AVX512-NEXT: vpshufb %xmm6, %xmm3, %xmm2
+; AVX512-NEXT: vmovdqa64 %xmm3, %xmm31
+; AVX512-NEXT: vmovdqa (%rdi), %xmm4
+; AVX512-NEXT: vmovdqa64 %xmm18, %xmm3
+; AVX512-NEXT: vpshufb %xmm3, %xmm4, %xmm3
+; AVX512-NEXT: vmovdqa64 %xmm4, %xmm28
+; AVX512-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX512-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-NEXT: vmovdqa (%r9), %xmm2
+; AVX512-NEXT: vmovdqa (%rcx), %xmm3
+; AVX512-NEXT: vpshufb %xmm11, %xmm3, %xmm2
+; AVX512-NEXT: vmovdqa %xmm3, %xmm6
+; AVX512-NEXT: vmovdqa (%rdx), %xmm4
+; AVX512-NEXT: vpshufb %xmm12, %xmm4, %xmm3
+; AVX512-NEXT: vmovdqa64 %xmm4, %xmm18
+; AVX512-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX512-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-NEXT: vpshufb %xmm6, %xmm2, %xmm6
-; AVX512-NEXT: vmovdqa (%r8), %xmm5
-; AVX512-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-NEXT: vpshufb %xmm14, %xmm5, %xmm11
-; AVX512-NEXT: vpor %xmm6, %xmm11, %xmm6
-; AVX512-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vinserti64x4 $1, %ymm24, %zmm0, %zmm6
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15]
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm7 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512-NEXT: vpshufb %xmm7, %xmm11, %xmm11
-; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm11[0,1,0,1],zmm6[4,5,6,7]
+; AVX512-NEXT: vmovdqa (%r9), %xmm3
+; AVX512-NEXT: vmovdqa64 %xmm20, %xmm2
+; AVX512-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512-NEXT: vmovdqa %xmm3, %xmm8
+; AVX512-NEXT: vmovdqa (%r8), %xmm3
+; AVX512-NEXT: vmovdqa64 %xmm30, %xmm4
+; AVX512-NEXT: vpshufb %xmm4, %xmm3, %xmm4
+; AVX512-NEXT: vmovdqa %xmm3, %xmm9
+; AVX512-NEXT: vpor %xmm2, %xmm4, %xmm2
; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqa (%rax), %xmm10
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm11 = xmm10[0,1,2,3,4,5,5,6]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
-; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm11, %ymm11
-; AVX512-NEXT: vpshufb {{.*#+}} ymm14 = zero,ymm1[13],zero,zero,zero,zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero
-; AVX512-NEXT: vinserti64x4 $1, %ymm14, %zmm11, %zmm24
-; AVX512-NEXT: vmovdqa64 %ymm19, %ymm2
-; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
-; AVX512-NEXT: vmovdqa64 %ymm2, %ymm18
-; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm5 = [13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm13[23],zero,zero,zero,zero,ymm13[26],zero,ymm13[24],zero,zero,zero,zero,ymm13[27],zero,ymm13[25]
+; AVX512-NEXT: vmovdqa64 %ymm17, %ymm3
+; AVX512-NEXT: vpshufb %ymm7, %ymm3, %ymm4
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm2
+; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm3[23,u,u,u],zero,ymm3[26],zero,ymm3[24,u,u,u],zero,ymm3[27],zero
+; AVX512-NEXT: vpshufb %ymm1, %ymm13, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vmovdqa64 %ymm22, %ymm2
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
+; AVX512-NEXT: vmovdqa64 %ymm21, %ymm3
+; AVX512-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512-NEXT: vpshufb %ymm5, %ymm2, %ymm11
-; AVX512-NEXT: vmovdqa64 %ymm5, %ymm29
-; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX512-NEXT: vpshufb {{.*#+}} ymm15 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,zero,zero,ymm14[30],zero,ymm14[28],zero,zero,zero,zero,ymm14[31],zero,ymm14[29]
-; AVX512-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm31 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800,18446463693966278655,18446742978476179455,18446463693966278655,18446742978476179455]
-; AVX512-NEXT: vpternlogq $248, %ymm31, %ymm11, %ymm15
-; AVX512-NEXT: vmovdqa64 %xmm28, %xmm6
-; AVX512-NEXT: vmovdqa %xmm3, %xmm8
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm15[0,1,2,3],zmm11[0,1,0,1]
-; AVX512-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufhw {{.*#+}} ymm1 = ymm0[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512-NEXT: vmovdqa64 %ymm20, %ymm17
-; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,2,3,3,6,6,7,7]
-; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm11 = [9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10]
-; AVX512-NEXT: vmovdqa64 %ymm23, %ymm5
-; AVX512-NEXT: vpshufb %ymm11, %ymm5, %ymm15
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm15, %zmm19
-; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero,ymm0[29],zero,zero,zero
-; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb %ymm11, %ymm0, %ymm1
-; AVX512-NEXT: vpshuflw $233, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm11 = mem[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm11 = ymm11[0,0,1,1,4,4,5,5]
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm11, %zmm23
-; AVX512-NEXT: vmovdqa64 %xmm30, %xmm0
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm2[23],zero,ymm2[21,22,23,26],zero,ymm2[24],zero,ymm2[28,29,26,27]
+; AVX512-NEXT: vmovdqa64 %ymm19, %ymm3
+; AVX512-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm30
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm15[25],zero,ymm15[23],zero,zero,zero,zero,ymm15[26],zero,ymm15[24],zero,zero
+; AVX512-NEXT: vmovdqa64 %ymm16, %ymm3
+; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm3[23],zero,ymm3[23,24,25,26],zero,ymm3[24],zero,ymm3[30,31]
+; AVX512-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vpshufb %ymm10, %ymm15, %ymm1
+; AVX512-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vmovdqa (%rax), %ymm4
+; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm15 = [0,5,4,0,5,0,4,0,20,21,0,23,0,21,0,23]
+; AVX512-NEXT: vpshuflw {{.*#+}} ymm1 = ymm4[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512-NEXT: vpermi2d %zmm0, %zmm1, %zmm15
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm9[8],xmm8[8],xmm9[9],xmm8[9],xmm9[10],xmm8[10],xmm9[11],xmm8[11],xmm9[12],xmm8[12],xmm9[13],xmm8[13],xmm9[14],xmm8[14],xmm9[15],xmm8[15]
+; AVX512-NEXT: vmovdqa64 %xmm9, %xmm21
+; AVX512-NEXT: vmovdqa64 %xmm8, %xmm22
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX512-NEXT: vmovdqa64 %xmm2, %xmm24
+; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm23 = zmm1[0,1,0,1],zmm0[4,5,6,7]
+; AVX512-NEXT: vmovdqa (%rax), %xmm11
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm1 = xmm11[0,1,2,3,4,5,5,6]
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm9 = [128,13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128]
+; AVX512-NEXT: vpshufb %ymm9, %ymm4, %ymm2
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm16
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm10[0],xmm14[0],xmm10[1],xmm14[1],xmm10[2],xmm14[2],xmm10[3],xmm14[3],xmm10[4],xmm14[4],xmm10[5],xmm14[5],xmm10[6],xmm14[6],xmm10[7],xmm14[7]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512-NEXT: vpshufb %xmm0, %xmm2, %xmm2
+; AVX512-NEXT: vmovdqa64 %xmm0, %xmm27
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm12 = zmm0[2,3,2,3],zmm2[0,1,0,1]
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm15 = xmm13[8],xmm9[8],xmm13[9],xmm9[9],xmm13[10],xmm9[10],xmm13[11],xmm9[11],xmm13[12],xmm9[12],xmm13[13],xmm9[13],xmm13[14],xmm9[14],xmm13[15],xmm9[15]
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512-NEXT: vpshufb %xmm0, %xmm15, %xmm3
-; AVX512-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512-NEXT: vpshufb %xmm0, %xmm3, %xmm3
+; AVX512-NEXT: vmovdqa64 %xmm0, %xmm26
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm17 = zmm0[2,3,2,3],zmm3[0,1,0,1]
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX512-NEXT: vmovdqa64 %xmm29, %xmm3
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512-NEXT: vpshufb %xmm0, %xmm13, %xmm13
+; AVX512-NEXT: vmovdqa64 %xmm0, %xmm25
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm29 = zmm0[2,3,2,3],zmm13[0,1,0,1]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512-NEXT: vmovdqa64 %ymm0, %ymm20
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; AVX512-NEXT: vmovdqa %xmm6, %xmm8
+; AVX512-NEXT: vmovdqa64 %xmm18, %xmm7
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512-NEXT: vpshufb %xmm1, %xmm4, %xmm2
+; AVX512-NEXT: vmovdqa64 %ymm2, %ymm18
+; AVX512-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm30
-; AVX512-NEXT: vmovdqa64 %xmm21, %xmm0
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm15 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
-; AVX512-NEXT: vmovdqa64 %xmm25, %xmm3
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm3[8],xmm12[8],xmm3[9],xmm12[9],xmm3[10],xmm12[10],xmm3[11],xmm12[11],xmm3[12],xmm12[12],xmm3[13],xmm12[13],xmm3[14],xmm12[14],xmm3[15],xmm12[15]
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm4 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512-NEXT: vpshufb %xmm4, %xmm1, %xmm1
-; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm19
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm14[8],xmm10[8],xmm14[9],xmm10[9],xmm14[10],xmm10[10],xmm14[11],xmm10[11],xmm14[12],xmm10[12],xmm14[13],xmm10[13],xmm14[14],xmm10[14],xmm14[15],xmm10[15]
+; AVX512-NEXT: vmovdqa64 %xmm31, %xmm6
+; AVX512-NEXT: vmovdqa64 %xmm28, %xmm4
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm14 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512-NEXT: vpshufb %xmm0, %xmm14, %xmm13
+; AVX512-NEXT: vpshufb %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm21
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm8[8],xmm6[8],xmm8[9],xmm6[9],xmm8[10],xmm6[10],xmm8[11],xmm6[11],xmm8[12],xmm6[12],xmm8[13],xmm6[13],xmm8[14],xmm6[14],xmm8[15],xmm6[15]
-; AVX512-NEXT: vpshufb %xmm7, %xmm0, %xmm0
-; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm26 = zmm26[0,1,0,1],zmm0[0,1,0,1]
-; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm1 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-NEXT: vpshufb %ymm1, %ymm0, %ymm6
-; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-NEXT: vpshufb %ymm1, %ymm0, %ymm8
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20]
-; AVX512-NEXT: vmovdqa64 %ymm0, %ymm28
-; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm0[23],zero,ymm0[21,22,23,26],zero,ymm0[24],zero,ymm0[28,29,26,27]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm0[18,19,20,21],zero,ymm0[19],zero,ymm0[25,26,27,22],zero,ymm0[20],zero
-; AVX512-NEXT: vmovdqa64 %ymm0, %ymm20
-; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[21],zero,ymm5[19],zero,zero,zero,zero,ymm5[22],zero,ymm5[20],zero,zero
-; AVX512-NEXT: vmovdqa64 %ymm27, %ymm1
-; AVX512-NEXT: vmovdqa64 %ymm29, %ymm0
-; AVX512-NEXT: vpshufb %ymm0, %ymm1, %ymm4
-; AVX512-NEXT: vmovdqa %ymm2, %ymm0
-; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
-; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
-; AVX512-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb %ymm2, %ymm1, %ymm0
-; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-NEXT: vpshufb %ymm2, %ymm0, %ymm1
-; AVX512-NEXT: vpshufb %ymm2, %ymm14, %ymm2
-; AVX512-NEXT: vmovdqa64 %ymm2, %ymm27
-; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
-; AVX512-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm2, %ymm14, %ymm14
-; AVX512-NEXT: vmovdqa64 %ymm14, %ymm25
-; AVX512-NEXT: vpshufb %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vmovdqa64 %ymm0, %ymm29
-; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = zero,ymm0[13],zero,zero,zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,ymm0[17],zero,zero
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm14 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm14 = ymm14[0,1,1,3,4,5,5,7]
-; AVX512-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,2]
-; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm14, %ymm14
-; AVX512-NEXT: vinserti64x4 $1, %ymm14, %zmm2, %zmm2
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm12 = xmm12[0],xmm3[0],xmm12[1],xmm3[1],xmm12[2],xmm3[2],xmm12[3],xmm3[3],xmm12[4],xmm3[4],xmm12[5],xmm3[5],xmm12[6],xmm3[6],xmm12[7],xmm3[7]
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512-NEXT: vpshufb %xmm14, %xmm15, %xmm15
-; AVX512-NEXT: vpshufb %xmm14, %xmm12, %xmm12
-; AVX512-NEXT: vinserti32x4 $2, %xmm22, %zmm12, %zmm0
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm9[0],xmm13[0],xmm9[1],xmm13[1],xmm9[2],xmm13[2],xmm9[3],xmm13[3],xmm9[4],xmm13[4],xmm9[5],xmm13[5],xmm9[6],xmm13[6],xmm9[7],xmm13[7]
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm14 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512-NEXT: vpshufb %xmm14, %xmm11, %xmm9
-; AVX512-NEXT: vpshufb %xmm14, %xmm13, %xmm11
-; AVX512-NEXT: vpermq {{.*#+}} ymm13 = ymm6[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm14 = ymm18[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm22 = ymm4[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm8[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm7[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm5[2,3,2,3]
-; AVX512-NEXT: vmovdqa64 %ymm17, %ymm3
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm3 = ymm3[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,0,1,1,4,4,5,5]
-; AVX512-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm12 # 16-byte Folded Reload
-; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX512-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX512-NEXT: # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1],xmm5[2],mem[2],xmm5[3],mem[3],xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512-NEXT: vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm11 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm11 = zmm5[0,1,0,1],mem[0,1,0,1]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm10[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512-NEXT: vpshuflw {{.*#+}} xmm7 = xmm10[1,1,0,0,4,5,6,7]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[0,1,2,0]
-; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm7, %zmm7
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm10 = mem[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vpermq {{.*#+}} zmm5 = zmm19[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vpternlogq $236, %zmm31, %zmm10, %zmm5
-; AVX512-NEXT: vpandq %ymm31, %ymm22, %ymm10
-; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm1, %zmm1
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm10 = mem[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vporq %zmm10, %zmm1, %zmm1
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm10 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655,18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512-NEXT: vpand %ymm6, %ymm10, %ymm6
-; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm0, %zmm0
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm6 = mem[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vporq %zmm6, %zmm0, %zmm0
-; AVX512-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm5
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
-; AVX512-NEXT: vpternlogq $226, %zmm1, %zmm0, %zmm5
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm1 = mem[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vpermq {{.*#+}} zmm6 = zmm23[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vpternlogq $236, %zmm10, %zmm1, %zmm6
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm1 = mem[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm17 = mem[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vporq %zmm1, %zmm17, %zmm1
-; AVX512-NEXT: vpternlogq $226, %zmm6, %zmm0, %zmm1
-; AVX512-NEXT: vpermq {{.*#+}} zmm6 = zmm30[0,1,0,1,4,5,4,5]
-; AVX512-NEXT: vpermq {{.*#+}} zmm17 = zmm21[0,1,0,1,4,5,4,5]
-; AVX512-NEXT: vpternlogq $226, %zmm6, %zmm0, %zmm17
-; AVX512-NEXT: vpternlogq $248, %ymm10, %ymm13, %ymm14
-; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm3[2,3,2,3]
-; AVX512-NEXT: vpternlogq $236, %ymm10, %ymm4, %ymm0
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm9[0,1,0,1]
-; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm14, %zmm3
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm4 = mem[2,3,2,3]
-; AVX512-NEXT: vpshufhw $190, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm6 = mem[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,2,3,3,6,6,7,7]
-; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX512-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm6
-; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm15[0,1,0,1]
-; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm6, %zmm4
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512-NEXT: vpternlogq $184, %zmm3, %zmm6, %zmm4
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm28[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm20[2,3,2,3]
-; AVX512-NEXT: vpor %ymm3, %ymm8, %ymm3
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm8, %zmm3
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm8, %zmm0
-; AVX512-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm8 = mem[0,1,0,1]
-; AVX512-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm10 = mem[0,1,0,1]
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm13 = mem[2,3,2,3]
-; AVX512-NEXT: vpshuflw $5, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
-; AVX512-NEXT: # xmm14 = mem[1,1,0,0,4,5,6,7]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm14 = xmm14[0,1,2,0]
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm15 = mem[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm18 = ymm27[2,3,2,3]
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm19 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm19 = mem[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm20 = ymm25[2,3,2,3]
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm21 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm21 = mem[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm22 = ymm29[2,3,2,3]
-; AVX512-NEXT: vpternlogq $226, %zmm3, %zmm6, %zmm0
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm3 # 32-byte Folded Reload
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm6 # 32-byte Folded Reload
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm6
+; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
+; AVX512-NEXT: vmovdqa64 %xmm24, %xmm2
+; AVX512-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm14 = zmm2[0,1,0,1],zmm1[0,1,0,1]
+; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX512-NEXT: vpshufb %ymm9, %ymm10, %ymm1
+; AVX512-NEXT: vpshuflw {{.*#+}} ymm9 = ymm10[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[0,1,1,3,4,5,5,7]
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm10 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
+; AVX512-NEXT: vpternlogq $226, %zmm12, %zmm10, %zmm17
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm2 = mem[2,3,2,3]
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm28 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm28 = mem[2,3,2,3]
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm24
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm24
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm14[0,0,1,0]
-; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm13, %zmm3
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm3
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm3
+; AVX512-NEXT: vinserti64x4 $1, %ymm28, %zmm3, %zmm28
+; AVX512-NEXT: vpternlogq $226, %zmm2, %zmm10, %zmm28
+; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm9[2,2,3,2]
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm10
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
+; AVX512-NEXT: vmovdqa64 %xmm27, %xmm2
+; AVX512-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX512-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm27 # 16-byte Folded Reload
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
+; AVX512-NEXT: vmovdqa64 %xmm26, %xmm1
+; AVX512-NEXT: vpshufb %xmm1, %xmm2, %xmm2
+; AVX512-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 16-byte Folded Reload
+; AVX512-NEXT: vmovdqa64 %xmm21, %xmm1
+; AVX512-NEXT: vmovdqa64 %xmm22, %xmm3
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; AVX512-NEXT: vmovdqa64 %xmm25, %xmm1
+; AVX512-NEXT: vpshufb %xmm1, %xmm8, %xmm8
+; AVX512-NEXT: vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm8 = zmm8[0,1,0,1],mem[0,1,0,1]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm11[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm5 = xmm11[1,1,0,0,4,5,6,7]
+; AVX512-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,1,2,0]
+; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm5, %zmm5
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm9 = mem[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm11 = mem[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vporq %zmm9, %zmm11, %zmm9
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm11 = mem[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm12 = mem[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vporq %zmm11, %zmm12, %zmm11
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm11
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm9 = mem[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm12 = mem[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vporq %zmm9, %zmm12, %zmm9
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm12 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
+; AVX512-NEXT: vpternlogq $184, %zmm11, %zmm12, %zmm9
+; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm13[0,1,0,1]
+; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm18[0,1,0,1]
+; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm20[2,3,2,3]
+; AVX512-NEXT: vpshuflw $5, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm1 = mem[1,1,0,0,4,5,6,7]
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
+; AVX512-NEXT: vpermq {{.*#+}} zmm13 = zmm19[0,1,0,1,4,5,4,5]
+; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512-NEXT: vpternlogq $226, %zmm13, %zmm12, %zmm0
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm13 = mem[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm21 = mem[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vporq %zmm13, %zmm21, %zmm13
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm20 = mem[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vpermq {{.*#+}} zmm18 = zmm30[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vporq %zmm20, %zmm18, %zmm18
+; AVX512-NEXT: vpternlogq $226, %zmm13, %zmm12, %zmm18
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm11 # 32-byte Folded Reload
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 32-byte Folded Reload
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm4
+; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm23, %zmm16
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm16
; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
; AVX512-NEXT: # zmm4 = mem[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm4
-; AVX512-NEXT: vporq %ymm15, %ymm18, %ymm5
-; AVX512-NEXT: vporq %ymm19, %ymm20, %ymm6
-; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm5[0,1,2,3]
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm16
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm16
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm26
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm26
-; AVX512-NEXT: vporq %ymm21, %ymm22, %ymm1
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm5[0,1,2,3],zmm1[0,1,2,3]
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2
+; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm4
+; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,0]
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm29, %zmm1
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm1
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm14
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm14
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512-NEXT: vshufi64x2 $84, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm0 = zmm0[0,1,2,3],mem[2,3,2,3]
+; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm10
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm28, %zmm10
+; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm27[0,1,0,1,4,5,4,5]
+; AVX512-NEXT: vpermq {{.*#+}} zmm2 = zmm2[0,1,0,1,4,5,4,5]
; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
-; AVX512-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm0 = mem[0,1,0,1,4,5,4,5]
-; AVX512-NEXT: vpermq {{.*#+}} zmm1 = zmm12[0,1,0,1,4,5,4,5]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm7[0,0,1,0,4,4,5,4]
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm0
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm5[0,0,1,0,4,4,5,4]
+; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm0
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512-NEXT: vshufi64x2 $85, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm2 = zmm2[2,3,2,3],mem[2,3,2,3]
+; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm15
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm15
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: vmovdqa64 %zmm15, 128(%rax)
; AVX512-NEXT: vmovdqa64 %zmm0, (%rax)
-; AVX512-NEXT: vmovdqa64 %zmm2, 320(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm26, 256(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm16, 128(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm10, 320(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm14, 256(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm1, 192(%rax)
; AVX512-NEXT: vmovdqa64 %zmm4, 384(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm3, 192(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm24, 64(%rax)
-; AVX512-NEXT: addq $1448, %rsp # imm = 0x5A8
+; AVX512-NEXT: vmovdqa64 %zmm16, 64(%rax)
+; AVX512-NEXT: addq $1720, %rsp # imm = 0x6B8
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512-FCP-LABEL: store_i8_stride7_vf64:
; AVX512-FCP: # %bb.0:
-; AVX512-FCP-NEXT: subq $1256, %rsp # imm = 0x4E8
-; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %ymm2
-; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %ymm1
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero
-; AVX512-FCP-NEXT: vmovdqa %ymm1, %ymm14
-; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
-; AVX512-FCP-NEXT: vmovdqa %ymm2, %ymm13
+; AVX512-FCP-NEXT: subq $1432, %rsp # imm = 0x598
+; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %ymm2
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero,zero
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
+; AVX512-FCP-NEXT: # ymm15 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm15, %ymm3, %ymm1
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %ymm1
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero
-; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm16
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero,ymm2[25]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm25
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [25,128,23,0,0,0,128,26,128,24,0,0,0,128,27,128,25,128,23,0,0,0,128,26,128,24,0,0,0,128,27,128]
+; AVX512-FCP-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm27
+; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm18
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %ymm2
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
+; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %ymm3
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512-FCP-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm4, %ymm28
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm3[30],zero,ymm3[28,u,u,u],zero,ymm3[31],zero,ymm3[29,u]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm19
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512-FCP-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm24
; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm17
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 32(%r9), %ymm4
-; AVX512-FCP-NEXT: vmovdqa 32(%r8), %ymm1
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm18
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[25],zero,ymm4[23],zero,zero,zero,zero,ymm4[26],zero,ymm4[24],zero,zero
-; AVX512-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 32(%r8), %ymm3
+; AVX512-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[27],zero,zero,zero,zero,ymm3[30],zero,ymm3[28],zero,zero,zero,zero,ymm3[31],zero,ymm3[29]
+; AVX512-FCP-NEXT: vmovdqa 32(%r9), %ymm2
+; AVX512-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512-FCP-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm4, %ymm23
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm2[27,u,u,u],zero,ymm2[30],zero,ymm2[28,u,u,u],zero,ymm2[31],zero
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512-FCP-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm3, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm22
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-FCP-NEXT: vmovdqa 32(%rax), %ymm1
; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm1
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero,zero,zero,ymm1[18]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm23
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,14],zero,ymm1[12,13,0,1,14,15],zero,ymm1[3,12,13,2,3,16],zero,ymm1[30,31,28,29,16,17],zero,ymm1[31,18,19,28,29,18],zero
+; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm5
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm5[14],zero,zero,zero,zero,zero,zero,ymm5[15],zero,zero,zero,zero,zero,zero,ymm5[16],zero,zero,zero,zero,zero,zero,ymm5[17],zero,zero,zero,zero,zero,zero,ymm5[18]
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm4
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[0,1,14],zero,ymm4[12,13,0,1,14,15],zero,ymm4[3,12,13,2,3,16],zero,ymm4[30,31,28,29,16,17],zero,ymm4[31,18,19,28,29,18],zero
; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa (%rcx), %ymm1
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm0
-; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm26
+; AVX512-FCP-NEXT: vmovdqa (%rcx), %ymm3
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,ymm3[14],zero,zero,zero,zero,zero,zero,ymm3[15],zero,zero,zero,zero,zero,zero,ymm3[16],zero,zero,zero,zero,zero,zero,ymm3[17],zero,zero,zero,zero,zero
; AVX512-FCP-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm3
-; AVX512-FCP-NEXT: vpor %ymm0, %ymm3, %ymm0
-; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa (%r8), %ymm0
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[0,1,0,1,14],zero,ymm2[14,15,0,1,14,15],zero,ymm2[13,14,15,16,17,16],zero,ymm2[30,31,30,31,16,17],zero,ymm2[31,28,29,30,31]
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm3
-; AVX512-FCP-NEXT: vmovdqa64 %ymm5, %ymm25
-; AVX512-FCP-NEXT: vmovdqa (%r9), %ymm5
-; AVX512-FCP-NEXT: vmovdqu %ymm5, (%rsp) # 32-byte Spill
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0,13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0]
-; AVX512-FCP-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm5, %ymm5
-; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm30
-; AVX512-FCP-NEXT: vporq %ymm3, %ymm5, %ymm24
-; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %xmm3
-; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %xmm6
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm6, %xmm5
-; AVX512-FCP-NEXT: vmovdqa64 %xmm6, %xmm28
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm3, %xmm6
-; AVX512-FCP-NEXT: vmovdqa64 %xmm9, %xmm19
-; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm29
-; AVX512-FCP-NEXT: vpor %xmm5, %xmm6, %xmm3
-; AVX512-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm10
-; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %xmm6
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm6, %xmm5
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm10, %xmm9
-; AVX512-FCP-NEXT: vmovdqa64 %xmm10, %xmm27
-; AVX512-FCP-NEXT: vpor %xmm5, %xmm9, %xmm5
-; AVX512-FCP-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 32(%r9), %xmm15
-; AVX512-FCP-NEXT: vmovdqa 32(%r8), %xmm10
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm15, %xmm9
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm10, %xmm12
-; AVX512-FCP-NEXT: vmovdqa64 %xmm0, %xmm21
-; AVX512-FCP-NEXT: vporq %xmm9, %xmm12, %xmm22
-; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm13, %ymm7
-; AVX512-FCP-NEXT: vmovdqa64 %ymm13, %ymm20
-; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm14, %ymm1
-; AVX512-FCP-NEXT: vpor %ymm7, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa64 %ymm16, %ymm7
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = zero,zero,zero,ymm7[14],zero,zero,zero,zero,zero,zero,ymm7[15],zero,zero,zero,zero,zero,zero,ymm7[16],zero,zero,zero,zero,zero,zero,ymm7[17],zero,zero,zero,zero,zero,zero,ymm7[18]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm7
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[0,1,14],zero,ymm7[12,13,0,1,14,15],zero,ymm7[3,12,13,2,3,16],zero,ymm7[30,31,28,29,16,17],zero,ymm7[31,18,19,28,29,18],zero
-; AVX512-FCP-NEXT: vpor %ymm1, %ymm7, %ymm1
-; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa64 %ymm18, %ymm1
-; AVX512-FCP-NEXT: vmovdqu64 %ymm18, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm7
-; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vmovdqa64 %ymm30, %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm0
-; AVX512-FCP-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm13
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm13, %xmm0
+; AVX512-FCP-NEXT: vmovdqa (%r8), %ymm1
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero
+; AVX512-FCP-NEXT: vmovdqa (%r9), %ymm0
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[13,u,u,u,u,u],zero,ymm0[14,u,u,u,u,u],zero,ymm0[15,u,u,u,u,u],zero,ymm0[16,u,u,u,u,u],zero,ymm0[17,u,u,u]
+; AVX512-FCP-NEXT: vpor %ymm6, %ymm7, %ymm6
+; AVX512-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %xmm9
+; AVX512-FCP-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %xmm7
+; AVX512-FCP-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm7
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm9, %xmm9
+; AVX512-FCP-NEXT: vpor %xmm7, %xmm9, %xmm7
+; AVX512-FCP-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm11
+; AVX512-FCP-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %xmm9
+; AVX512-FCP-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm9
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm11, %xmm11
+; AVX512-FCP-NEXT: vpor %xmm9, %xmm11, %xmm9
+; AVX512-FCP-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 32(%r9), %xmm9
+; AVX512-FCP-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 32(%r8), %xmm14
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm9, %xmm9
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm14, %xmm11
+; AVX512-FCP-NEXT: vmovdqa64 %xmm14, %xmm30
+; AVX512-FCP-NEXT: vpor %xmm9, %xmm11, %xmm9
+; AVX512-FCP-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm9
+; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm7
+; AVX512-FCP-NEXT: vmovdqa64 %xmm9, %xmm26
; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm9
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm9, %xmm1
-; AVX512-FCP-NEXT: vporq %xmm0, %xmm1, %xmm31
-; AVX512-FCP-NEXT: vmovdqa (%rcx), %xmm14
-; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm14, %xmm0
-; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm8
-; AVX512-FCP-NEXT: vmovdqa64 %xmm19, %xmm1
-; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm8, %xmm1
-; AVX512-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vmovdqa (%r9), %xmm1
-; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm0
-; AVX512-FCP-NEXT: vmovdqa %xmm1, %xmm3
-; AVX512-FCP-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vmovdqa (%r8), %xmm4
-; AVX512-FCP-NEXT: vmovdqa64 %xmm21, %xmm1
-; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm4, %xmm1
-; AVX512-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm9, %xmm10
+; AVX512-FCP-NEXT: vmovdqa64 %xmm9, %xmm21
+; AVX512-FCP-NEXT: vpor %xmm7, %xmm10, %xmm7
+; AVX512-FCP-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vmovdqa (%rcx), %xmm7
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm6
+; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm10
+; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm10, %xmm8
+; AVX512-FCP-NEXT: vpor %xmm6, %xmm8, %xmm6
+; AVX512-FCP-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vmovdqa (%r9), %xmm11
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm11, %xmm12
+; AVX512-FCP-NEXT: vmovdqa (%r8), %xmm6
+; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm6, %xmm13
+; AVX512-FCP-NEXT: vmovdqa %xmm6, %xmm9
+; AVX512-FCP-NEXT: vporq %xmm12, %xmm13, %xmm31
+; AVX512-FCP-NEXT: vpshufb %ymm15, %ymm4, %ymm12
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128]
+; AVX512-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm13
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm6
+; AVX512-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vmovdqa64 %ymm27, %ymm6
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm12
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
+; AVX512-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm4, %ymm13
+; AVX512-FCP-NEXT: vmovdqa64 %ymm6, %ymm20
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm29
+; AVX512-FCP-NEXT: vmovdqa64 %ymm28, %ymm6
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm12
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
+; AVX512-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm13
+; AVX512-FCP-NEXT: vmovdqa64 %ymm6, %ymm16
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm28
+; AVX512-FCP-NEXT: vmovdqa64 %ymm24, %ymm6
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm13
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm12 = [18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
+; AVX512-FCP-NEXT: # ymm12 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm2, %ymm15
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm15, %zmm27
+; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm6
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm13
+; AVX512-FCP-NEXT: vmovdqa64 %ymm22, %ymm6
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm15
+; AVX512-FCP-NEXT: vpor %ymm13, %ymm15, %ymm6
+; AVX512-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
+; AVX512-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm15
+; AVX512-FCP-NEXT: vmovdqa64 %ymm6, %ymm24
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
+; AVX512-FCP-NEXT: # ymm13 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm13, %ymm1, %ymm14
+; AVX512-FCP-NEXT: vpor %ymm15, %ymm14, %ymm6
+; AVX512-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm4[28],zero,ymm4[30,31,30,31],zero,ymm4[29],zero,ymm4[31,28,29]
+; AVX512-FCP-NEXT: vpor %ymm5, %ymm4, %ymm4
+; AVX512-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm3[30],zero,ymm3[28,u,u,u],zero,ymm3[31],zero,ymm3[29,u]
+; AVX512-FCP-NEXT: vporq %ymm2, %ymm3, %ymm23
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm0[27,u,u,u],zero,ymm0[30],zero,ymm0[28,u,u,u],zero,ymm0[31],zero
+; AVX512-FCP-NEXT: vporq %ymm1, %ymm0, %ymm22
+; AVX512-FCP-NEXT: vmovdqa64 %ymm19, %ymm2
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,zero
+; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm3
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[0,1,0,1,14],zero,ymm3[14,15,0,1,14,15],zero,ymm3[13,14,15,16,17,16],zero,ymm3[30,31,30,31,16,17],zero,ymm3[31,28,29,30,31]
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero,zero,zero,ymm0[27],zero,ymm0[25]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm12
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm12[21],zero,ymm12[19],zero,zero,zero,zero,ymm12[22],zero,ymm12[20],zero,zero
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-FCP-NEXT: vmovdqa64 %ymm16, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm3, %ymm1
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vmovdqa64 %ymm18, %ymm3
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm3[14],zero,zero,zero,zero,zero,zero,ymm3[15],zero,zero,zero,zero,zero,zero,ymm3[16],zero,zero,zero,zero,zero,zero,ymm3[17],zero,zero,zero,zero,zero,zero,ymm3[18]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm2
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[0,1,14],zero,ymm2[12,13,0,1,14,15],zero,ymm2[3,12,13,2,3,16],zero,ymm2[30,31,28,29,16,17],zero,ymm2[31,18,19,28,29,18],zero
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa64 %ymm26, %ymm11
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm11[25],zero,ymm11[23],zero,zero,zero,zero,ymm11[26],zero,ymm11[24],zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[18],zero,zero,zero,zero,ymm11[21],zero,ymm11[19],zero,zero,zero,zero,ymm11[22],zero,ymm11[20]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm0
+; AVX512-FCP-NEXT: vmovdqa64 %ymm20, %ymm1
+; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm1
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX512-FCP-NEXT: vmovdqa64 %ymm24, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512-FCP-NEXT: vpshufb %ymm13, %ymm2, %ymm1
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
-; AVX512-FCP-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
-; AVX512-FCP-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm0
-; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm19
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm1
-; AVX512-FCP-NEXT: vmovdqa64 %ymm5, %ymm30
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[13,u,u,u,u,u],zero,ymm3[14,u,u,u,u,u],zero,ymm3[15,u,u,u,u,u],zero,ymm3[16,u,u,u,u,u],zero,ymm3[17,u,u,u]
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm24, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm9[8],xmm11[8],xmm9[9],xmm11[9],xmm9[10],xmm11[10],xmm9[11],xmm11[11],xmm9[12],xmm11[12],xmm9[13],xmm11[13],xmm9[14],xmm11[14],xmm9[15],xmm11[15]
+; AVX512-FCP-NEXT: vmovdqa %xmm9, %xmm8
+; AVX512-FCP-NEXT: vmovdqa64 %xmm11, %xmm17
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [2,2,3,3,2,2,3,3]
-; AVX512-FCP-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [2,2,3,3,2,2,3,3]
+; AVX512-FCP-NEXT: # ymm6 = mem[0,1,0,1]
; AVX512-FCP-NEXT: vmovdqa (%rax), %xmm0
-; AVX512-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,6]
-; AVX512-FCP-NEXT: vpermd %ymm0, %ymm1, %ymm0
-; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512-FCP-NEXT: vmovdqa (%rax), %ymm4
-; AVX512-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm4, %ymm4
-; AVX512-FCP-NEXT: vmovdqa64 %ymm5, %ymm18
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm24
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
-; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm23
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14]
-; AVX512-FCP-NEXT: vmovdqu (%rsp), %ymm0 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm25
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm26 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512-FCP-NEXT: # ymm26 = mem[0,1,2,3,0,1,2,3]
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm26, %ymm0, %ymm2
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3],xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm2[0,1,2,3],zmm0[0,1,0,1]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm29, %xmm0
-; AVX512-FCP-NEXT: vmovdqa64 %xmm28, %xmm2
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm14[8],xmm8[8],xmm14[9],xmm8[9],xmm14[10],xmm8[10],xmm14[11],xmm8[11],xmm14[12],xmm8[12],xmm14[13],xmm8[13],xmm14[14],xmm8[14],xmm14[15],xmm8[15]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm2
-; AVX512-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm0, %xmm0
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm28
-; AVX512-FCP-NEXT: vmovdqa64 %xmm27, %xmm0
-; AVX512-FCP-NEXT: vmovdqa %xmm6, %xmm2
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm13[8],xmm9[8],xmm13[9],xmm9[9],xmm13[10],xmm9[10],xmm13[11],xmm9[11],xmm13[12],xmm9[12],xmm13[13],xmm9[13],xmm13[14],xmm9[14],xmm13[15],xmm9[15]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm4, %xmm0
-; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm2
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm2, %zmm0, %zmm27
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm10[8],xmm15[8],xmm10[9],xmm15[9],xmm10[10],xmm15[10],xmm10[11],xmm15[11],xmm10[12],xmm15[12],xmm10[13],xmm15[13],xmm10[14],xmm15[14],xmm10[15],xmm15[15]
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm2, %xmm2
-; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm22[0,1,0,1],zmm2[0,1,0,1]
-; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 32(%rax), %xmm0
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,5,5,6]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm0, %xmm29
-; AVX512-FCP-NEXT: vpermd %ymm2, %ymm1, %ymm0
-; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm1 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm11, %ymm5
-; AVX512-FCP-NEXT: vmovdqa64 %ymm20, %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm1
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm22 = ymm1[2,3,2,3]
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512-FCP-NEXT: vmovdqa64 %ymm19, %ymm1
-; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm1
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm20 = ymm1[2,3,2,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm30, %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm4
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm16, %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm1
-; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm30
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm19 = ymm1[2,3,2,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20],zero,zero
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-FCP-NEXT: vmovdqa64 %ymm18, %ymm1
-; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm3
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [4,5,4,5,5,7,4,5]
-; AVX512-FCP-NEXT: vpermd %ymm0, %ymm1, %ymm0
-; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm16
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm9[0],xmm13[0],xmm9[1],xmm13[1],xmm9[2],xmm13[2],xmm9[3],xmm13[3],xmm9[4],xmm13[4],xmm9[5],xmm13[5],xmm9[6],xmm13[6],xmm9[7],xmm13[7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm6, %xmm6
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm0
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29]
-; AVX512-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm3
-; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm15
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm17 = ymm15[2,3,2,3]
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm2
-; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm13
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm18 = ymm13[2,3,2,3]
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm31, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3],xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm7, %xmm7
-; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm0, %xmm13
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm0[23],zero,ymm0[23,24,25,26],zero,ymm0[24],zero,ymm0[30,31]
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,5,6]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm0, %xmm19
+; AVX512-FCP-NEXT: vpermd %ymm1, %ymm6, %ymm1
+; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
+; AVX512-FCP-NEXT: vmovdqa (%rax), %ymm12
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = zero,ymm12[13],zero,zero,zero,zero,zero,zero,ymm12[14],zero,zero,zero,zero,zero,zero,ymm12[15],zero,zero,zero,zero,zero,zero,ymm12[16],zero,zero,zero,zero,zero,zero,ymm12[17],zero,zero
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm3, %zmm20
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm7[8],xmm10[8],xmm7[9],xmm10[9],xmm7[10],xmm10[10],xmm7[11],xmm10[11],xmm7[12],xmm10[12],xmm7[13],xmm10[13],xmm7[14],xmm10[14],xmm7[15],xmm10[15]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm0
; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm25
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm8[2,3,2,3]
-; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm1
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm12[30],zero,ymm12[28],zero,zero,zero,zero,ymm12[31],zero,ymm12[29],zero,zero,zero
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm2, %ymm9
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm23[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm11[2,3,2,3]
-; AVX512-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm31 # 16-byte Folded Reload
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
-; AVX512-FCP-NEXT: # ymm11 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm14
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm30, %ymm3
-; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm13
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm2, %ymm1
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512-FCP-NEXT: vpor %ymm12, %ymm9, %ymm9
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm9, %zmm6
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
-; AVX512-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm9, %ymm5, %ymm0
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm6, %zmm0, %zmm7
-; AVX512-FCP-NEXT: vpor %ymm4, %ymm10, %ymm4
+; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX512-FCP-NEXT: vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm16 # 16-byte Reload
+; AVX512-FCP-NEXT: vmovdqa64 %xmm16, %xmm2
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm11[8],xmm2[8],xmm11[9],xmm2[9],xmm11[10],xmm2[10],xmm11[11],xmm2[11],xmm11[12],xmm2[12],xmm11[13],xmm2[13],xmm11[14],xmm2[14],xmm11[15],xmm2[15]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm1
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm4
-; AVX512-FCP-NEXT: vpor %ymm8, %ymm14, %ymm2
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm18
+; AVX512-FCP-NEXT: vmovdqa64 %xmm26, %xmm2
+; AVX512-FCP-NEXT: vmovdqa64 %xmm21, %xmm0
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm3
+; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm21
+; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm13 = xmm14[8],xmm4[8],xmm14[9],xmm4[9],xmm14[10],xmm4[10],xmm14[11],xmm4[11],xmm14[12],xmm4[12],xmm14[13],xmm4[13],xmm14[14],xmm4[14],xmm14[15],xmm4[15]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm13, %xmm1
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm5
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm4, %zmm0, %zmm5
-; AVX512-FCP-NEXT: vpandq %ymm9, %ymm22, %ymm0
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm20, %zmm0
-; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm2 = mem[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vporq %zmm2, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vpandq %ymm26, %ymm19, %ymm2
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm2, %zmm2
-; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm4 = mem[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vporq %zmm4, %zmm2, %zmm2
-; AVX512-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
-; AVX512-FCP-NEXT: vpandq %ymm26, %ymm18, %ymm0
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm15, %zmm0
-; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm4 = mem[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vporq %zmm4, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm2, %zmm4, %zmm0
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm28[0,1,0,1,4,5,4,5]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm27[0,1,0,1,4,5,4,5]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm2, %zmm4, %zmm8
-; AVX512-FCP-NEXT: vpandq %ymm26, %ymm13, %ymm2
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm2 = mem[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vporq %zmm2, %zmm1, %zmm1
-; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm2 = mem[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm6 = mem[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vporq %zmm2, %zmm6, %zmm9
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm9
-; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512-FCP-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX512-FCP-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512-FCP-NEXT: vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm18 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm18 = zmm1[0,1,0,1],mem[0,1,0,1]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm29, %xmm3
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[1,1,0,0,4,5,6,7]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,1,0,1,2,0,0,1]
-; AVX512-FCP-NEXT: vpermd %ymm2, %ymm4, %ymm19
-; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm1[1,1,0,0,4,5,6,7]
-; AVX512-FCP-NEXT: vpermd %ymm6, %ymm4, %ymm17
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm3, %xmm10
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm6
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
-; AVX512-FCP-NEXT: # ymm11 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm12
-; AVX512-FCP-NEXT: vmovdqu (%rsp), %ymm1 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm11
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm14 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
-; AVX512-FCP-NEXT: # ymm14 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm1
-; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm1, %ymm15
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm2[23],zero,ymm2[23,24,25,26],zero,ymm2[24],zero,ymm2[30,31]
-; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm2, %ymm14
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} ymm4 = ymm3[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [4,5,4,5,5,7,4,5]
-; AVX512-FCP-NEXT: vpermd %ymm4, %ymm2, %ymm20
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm22 = mem[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm22
-; AVX512-FCP-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: # ymm23 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,0]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm3, %zmm13
+; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX512-FCP-NEXT: vmovdqa64 %xmm30, %xmm5
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm5[8],xmm15[8],xmm5[9],xmm15[9],xmm5[10],xmm15[10],xmm5[11],xmm15[11],xmm5[12],xmm15[12],xmm5[13],xmm15[13],xmm5[14],xmm15[14],xmm5[15],xmm15[15]
+; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm3[0,1,0,1],zmm1[0,1,0,1]
+; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 32(%rax), %xmm3
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm3[0,1,2,3,4,5,5,6]
+; AVX512-FCP-NEXT: vpermd %ymm1, %ymm6, %ymm26
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX512-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm24 # 16-byte Folded Reload
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm10[0],xmm7[0],xmm10[1],xmm7[1],xmm10[2],xmm7[2],xmm10[3],xmm7[3],xmm10[4],xmm7[4],xmm10[5],xmm7[5],xmm10[6],xmm7[6],xmm10[7],xmm7[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm9, %xmm9
+; AVX512-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 16-byte Folded Reload
+; AVX512-FCP-NEXT: vmovdqa64 %xmm17, %xmm2
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3],xmm8[4],xmm2[4],xmm8[5],xmm2[5],xmm8[6],xmm2[6],xmm8[7],xmm2[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm8, %xmm8
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,0,1],zmm31[0,1,0,1]
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm14, %xmm1
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm14 = mem[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm31 = mem[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vporq %zmm14, %zmm31, %zmm14
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm31 = mem[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm30 = mem[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vporq %zmm31, %zmm30, %zmm30
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm30
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm14 = mem[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm31 = mem[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vporq %zmm14, %zmm31, %zmm14
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm31 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm30, %zmm31, %zmm14
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm18[0,1,0,1,4,5,4,5]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm13 = zmm13[0,1,0,1,4,5,4,5]
+; AVX512-FCP-NEXT: vpternlogq $226, %zmm7, %zmm31, %zmm13
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm7 = mem[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm29 = zmm29[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vporq %zmm7, %zmm29, %zmm7
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm28 = zmm28[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm27 = zmm27[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vporq %zmm28, %zmm27, %zmm27
+; AVX512-FCP-NEXT: vpternlogq $226, %zmm7, %zmm31, %zmm27
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm2[2,3,2,3],zmm1[0,1,0,1]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm16, %xmm2
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3],xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm7, %xmm7
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm23[2,3,2,3],zmm7[0,1,0,1]
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm5[0],xmm15[0],xmm5[1],xmm15[1],xmm5[2],xmm15[2],xmm5[3],xmm15[3],xmm5[4],xmm15[4],xmm5[5],xmm15[5],xmm5[6],xmm15[6],xmm5[7],xmm15[7]
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm10, %xmm10
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm22[2,3,2,3],zmm10[0,1,0,1]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm19, %xmm2
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm11 = xmm2[1,1,0,0,4,5,6,7]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm17 = [0,1,0,1,2,0,0,1]
+; AVX512-FCP-NEXT: vpermd %ymm11, %ymm17, %ymm28
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm2, %xmm6
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm3[1,1,0,0,4,5,6,7]
+; AVX512-FCP-NEXT: vpermd %ymm4, %ymm17, %ymm17
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} ymm2 = ymm12[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm23 = [4,5,4,5,5,7,4,5]
+; AVX512-FCP-NEXT: vpermd %ymm2, %ymm23, %ymm2
+; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = zero,ymm15[13],zero,zero,zero,zero,zero,zero,ymm15[14],zero,zero,zero,zero,zero,zero,ymm15[15],zero,zero,zero,zero,zero,zero,ymm15[16],zero,zero,zero,zero,zero,zero,ymm15[17],zero,zero
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} ymm15 = ymm15[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512-FCP-NEXT: vpermd %ymm15, %ymm23, %ymm15
+; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm15
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm11, %zmm11
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm15 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
+; AVX512-FCP-NEXT: vpternlogq $226, %zmm1, %zmm15, %zmm7
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: # ymm1 = mem[2,3,2,3]
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: # ymm18 = mem[2,3,2,3]
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm18, %zmm3, %zmm18
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm21[0,1,0,1]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm19 = ymm25[0,1,0,1]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,1,0]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm23 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm23
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm24
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm23, %zmm24
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm19, %zmm2, %zmm0
-; AVX512-FCP-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm21
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm21
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm0 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm0
-; AVX512-FCP-NEXT: vpor %ymm12, %ymm15, %ymm2
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm7[0,1,2,3],zmm2[0,1,2,3]
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm16
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm16
-; AVX512-FCP-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm2 = mem[0,1,0,1,4,5,4,5]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm31[0,1,0,1,4,5,4,5]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm5
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm17, %zmm2
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm2
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm2
-; AVX512-FCP-NEXT: vpor %ymm1, %ymm13, %ymm1
-; AVX512-FCP-NEXT: vpor %ymm11, %ymm14, %ymm5
-; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm5[0,1,2,3],zmm1[0,1,2,3]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm20, %zmm4
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm4
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm4
+; AVX512-FCP-NEXT: vpternlogq $226, %zmm1, %zmm15, %zmm18
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm1 = mem[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm1
+; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19, %zmm14 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm14
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm20
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm20
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm26, %zmm0, %zmm0
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm0
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm0
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm24[0,1,0,1,4,5,4,5]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm9 = zmm9[0,1,0,1,4,5,4,5]
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm9
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm28, %zmm5
+; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm5
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm5
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-FCP-NEXT: vshufi64x2 $85, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm6 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm6 = zmm3[2,3,2,3],mem[2,3,2,3]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm2, %zmm2
+; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm2
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm27, %zmm2
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm4, %zmm4
+; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm22, %zmm4
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm4
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-FCP-NEXT: vshufi64x2 $84, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm6 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm6 = zmm3[0,1,2,3],mem[2,3,2,3]
+; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm11
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm11
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-FCP-NEXT: vmovdqa64 %zmm4, 128(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm2, (%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm16, 320(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm11, 320(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm4, 192(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm2, 128(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm5, (%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 256(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm21, 192(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm24, 64(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm22, 384(%rax)
-; AVX512-FCP-NEXT: addq $1256, %rsp # imm = 0x4E8
+; AVX512-FCP-NEXT: vmovdqa64 %zmm20, 64(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm1, 384(%rax)
+; AVX512-FCP-NEXT: addq $1432, %rsp # imm = 0x598
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
; AVX512DQ-LABEL: store_i8_stride7_vf64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: subq $1448, %rsp # imm = 0x5A8
-; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm1
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero,zero,zero,ymm1[18]
-; AVX512DQ-NEXT: vmovdqa %ymm1, %ymm10
-; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: subq $1720, %rsp # imm = 0x6B8
+; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm7
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512DQ-NEXT: vpshufb %ymm2, %ymm7, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm26
; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[0,1,14],zero,ymm2[12,13,0,1,14,15],zero,ymm2[3,12,13,2,3,16],zero,ymm2[30,31,28,29,16,17],zero,ymm2[31,18,19,28,29,18],zero
-; AVX512DQ-NEXT: vmovdqa %ymm2, %ymm14
-; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm2, %ymm1
+; AVX512DQ-NEXT: vmovdqa64 %ymm3, %ymm25
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm18
; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm6
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm6, %ymm0
-; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm23
+; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm14
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512DQ-NEXT: vpshufb %ymm2, %ymm14, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm28
; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm8
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm8, %ymm1
-; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm17
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm8, %ymm1
+; AVX512DQ-NEXT: vmovdqa64 %ymm3, %ymm23
+; AVX512DQ-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vmovdqa (%r8), %ymm0
-; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vmovdqa (%r8), %ymm15
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm18
-; AVX512DQ-NEXT: vmovdqa (%r9), %ymm1
-; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vpshufb %ymm2, %ymm15, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm24
+; AVX512DQ-NEXT: vmovdqa (%r9), %ymm2
; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0,13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0]
; AVX512DQ-NEXT: # ymm3 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; AVX512DQ-NEXT: vmovdqa64 %ymm3, %ymm22
-; AVX512DQ-NEXT: vporq %ymm0, %ymm1, %ymm24
-; AVX512DQ-NEXT: vmovdqa 32(%r9), %ymm11
-; AVX512DQ-NEXT: vmovdqa 32(%r8), %ymm1
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
-; AVX512DQ-NEXT: vmovdqa %ymm1, %ymm13
-; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm11[25],zero,ymm11[23],zero,zero,zero,zero,ymm11[26],zero,ymm11[24],zero,zero
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm2, %ymm1
+; AVX512DQ-NEXT: vmovdqa64 %ymm3, %ymm27
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm19
+; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vmovdqa 32(%rsi), %ymm10
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [128,128,128,30,128,28,128,128,128,128,31,128,29,128,128,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,128,128]
+; AVX512DQ-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm6, %ymm10, %ymm0
+; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm9
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm9[23],zero,zero,zero,zero,ymm9[26],zero,ymm9[24],zero,zero,zero,zero,ymm9[27],zero,ymm9[25]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa 32(%rcx), %ymm7
-; AVX512DQ-NEXT: vmovdqa 32(%rdx), %ymm9
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm9[30],zero,ymm9[28],zero,zero,zero,zero,ymm9[31],zero,ymm9[29],zero,zero
-; AVX512DQ-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [12,13,14,128,12,128,14,15,14,15,128,13,128,15,12,13,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29]
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm9, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm16
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm10[23,u,u,u],zero,ymm10[26],zero,ymm10[24,u,u,u],zero,ymm10[27],zero
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vmovdqa 32(%rdx), %ymm5
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,128,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,128]
+; AVX512DQ-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm5, %ymm2
+; AVX512DQ-NEXT: vmovdqa 32(%rcx), %ymm4
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[25],zero,ymm4[23],zero,zero,zero,zero,ymm4[26],zero,ymm4[24],zero,zero,zero,zero
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [27,0,0,0,128,30,128,28,0,0,0,128,31,128,29,0,27,0,0,0,128,30,128,28,0,0,0,128,31,128,29,0]
; AVX512DQ-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm7, %ymm2
-; AVX512DQ-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa 32(%rsi), %ymm5
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero,zero
-; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm4
-; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512DQ-NEXT: # ymm3 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm2
-; AVX512DQ-NEXT: vmovdqa64 %ymm4, %ymm20
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm2
+; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm20
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm5[23],zero,ymm5[21,22,23,26],zero,ymm5[24],zero,ymm5[28,29,26,27]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vmovdqa 32(%r8), %ymm3
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29]
+; AVX512DQ-NEXT: # ymm0 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm3, %ymm11
+; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm21
+; AVX512DQ-NEXT: vmovdqa 32(%r9), %ymm2
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm12 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm11, %zmm12, %zmm11
+; AVX512DQ-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [29,128,27,0,0,0,128,30,128,28,0,0,0,128,31,128,29,128,27,0,0,0,128,30,128,28,0,0,0,128,31,128]
+; AVX512DQ-NEXT: # ymm11 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm2, %ymm12
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm13 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm3[23],zero,ymm3[23,24,25,26],zero,ymm3[24],zero,ymm3[30,31]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm12
+; AVX512DQ-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-NEXT: vmovdqa 32(%rax), %ymm4
-; AVX512DQ-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512DQ-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm4, %ymm4
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm4, %zmm1
-; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpshufb %ymm3, %ymm14, %ymm1
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm10[21],zero,ymm10[19],zero,zero,zero,zero,ymm10[22],zero,ymm10[20],zero,zero
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
-; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm6, %ymm0
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-NEXT: vmovdqa 32(%rax), %ymm0
+; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm12 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm13 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm12
+; AVX512DQ-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vpshufb %ymm6, %ymm7, %ymm6
+; AVX512DQ-NEXT: vmovdqa64 %ymm7, %ymm17
+; AVX512DQ-NEXT: vmovdqa64 %ymm18, %ymm13
+; AVX512DQ-NEXT: vmovdqa64 %ymm16, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm13, %ymm7
+; AVX512DQ-NEXT: vpor %ymm6, %ymm7, %ymm0
; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm8[23],zero,ymm8[21,22,23,26],zero,ymm8[24],zero,ymm8[28,29,26,27]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm8[18,19,20,21],zero,ymm8[19],zero,ymm8[25,26,27,22],zero,ymm8[20],zero
-; AVX512DQ-NEXT: vmovdqa64 %ymm8, %ymm19
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm8, %ymm6
+; AVX512DQ-NEXT: vmovdqa64 %ymm20, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm14, %ymm7
+; AVX512DQ-NEXT: vmovdqa64 %ymm14, %ymm22
+; AVX512DQ-NEXT: vpor %ymm6, %ymm7, %ymm0
+; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vmovdqa64 %ymm21, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm15, %ymm6
+; AVX512DQ-NEXT: vmovdqa64 %ymm15, %ymm16
+; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm15
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm15, %ymm7
+; AVX512DQ-NEXT: vpor %ymm6, %ymm7, %ymm0
; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa (%rax), %ymm1
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm1, %ymm0
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm16 = [0,5,4,0,5,0,4,0,20,21,0,23,0,21,0,23]
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm2 = ymm1[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512DQ-NEXT: vpermi2d %zmm0, %zmm2, %zmm16
-; AVX512DQ-NEXT: vmovdqa 32(%rdx), %xmm3
-; AVX512DQ-NEXT: vmovdqa 32(%rcx), %xmm0
+; AVX512DQ-NEXT: vmovdqa 32(%rdx), %xmm0
; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm2 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512DQ-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX512DQ-NEXT: vmovdqa64 %xmm2, %xmm29
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm4 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512DQ-NEXT: vpshufb %xmm4, %xmm3, %xmm2
-; AVX512DQ-NEXT: vmovdqa64 %xmm4, %xmm31
-; AVX512DQ-NEXT: vmovdqa64 %xmm3, %xmm30
-; AVX512DQ-NEXT: vpor %xmm0, %xmm2, %xmm0
+; AVX512DQ-NEXT: vmovdqa 32(%rcx), %xmm1
+; AVX512DQ-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm11 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512DQ-NEXT: vpshufb %xmm11, %xmm1, %xmm6
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm12 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512DQ-NEXT: vpshufb %xmm12, %xmm0, %xmm7
+; AVX512DQ-NEXT: vpor %xmm6, %xmm7, %xmm0
; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa 32(%rdi), %xmm3
-; AVX512DQ-NEXT: vmovdqa 32(%rsi), %xmm4
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm12 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512DQ-NEXT: vpshufb %xmm12, %xmm4, %xmm0
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm15 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512DQ-NEXT: vpshufb %xmm15, %xmm3, %xmm2
-; AVX512DQ-NEXT: vmovdqa64 %xmm3, %xmm21
-; AVX512DQ-NEXT: vpor %xmm0, %xmm2, %xmm0
+; AVX512DQ-NEXT: vmovdqa 32(%rdi), %xmm0
+; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-NEXT: vmovdqa 32(%rsi), %xmm14
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm6 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512DQ-NEXT: vpshufb %xmm6, %xmm14, %xmm7
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm1 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512DQ-NEXT: vpshufb %xmm1, %xmm0, %xmm8
+; AVX512DQ-NEXT: vmovdqa64 %xmm1, %xmm18
+; AVX512DQ-NEXT: vpor %xmm7, %xmm8, %xmm0
; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm3 = [0,0,0,0,2,3,0,1,0,18,0,19,18,0,19,0]
-; AVX512DQ-NEXT: vmovdqa 32(%rax), %xmm2
-; AVX512DQ-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5,5,6]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512DQ-NEXT: vpermi2d %zmm0, %zmm2, %zmm3
-; AVX512DQ-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm1 = [0,0,0,0,2,3,0,1,0,18,0,19,18,0,19,0]
+; AVX512DQ-NEXT: vmovdqa 32(%rax), %xmm0
+; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm7 = xmm0[0,1,2,3,4,5,5,6]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm0[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512DQ-NEXT: vpermi2d %zmm7, %zmm8, %zmm1
+; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: vmovdqa 32(%r9), %xmm0
-; AVX512DQ-NEXT: vmovdqa 32(%r8), %xmm2
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm6 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512DQ-NEXT: vpshufb %xmm6, %xmm0, %xmm8
-; AVX512DQ-NEXT: vmovdqa64 %xmm0, %xmm28
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm14 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm2, %xmm10
-; AVX512DQ-NEXT: vmovdqa %xmm2, %xmm3
-; AVX512DQ-NEXT: vporq %xmm8, %xmm10, %xmm26
+; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-NEXT: vmovdqa 32(%r8), %xmm1
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm8 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512DQ-NEXT: vpshufb %xmm8, %xmm0, %xmm7
+; AVX512DQ-NEXT: vmovdqa64 %xmm8, %xmm20
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm0 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512DQ-NEXT: vpshufb %xmm0, %xmm1, %xmm8
+; AVX512DQ-NEXT: vmovdqa64 %xmm0, %xmm30
+; AVX512DQ-NEXT: vmovdqa64 %xmm1, %xmm29
+; AVX512DQ-NEXT: vpor %xmm7, %xmm8, %xmm0
+; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vmovdqa64 %ymm28, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm7
; AVX512DQ-NEXT: vmovdqa64 %ymm23, %ymm0
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm7, %ymm8
-; AVX512DQ-NEXT: vmovdqa64 %ymm17, %ymm0
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm9, %ymm10
-; AVX512DQ-NEXT: vpor %ymm8, %ymm10, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm5, %ymm8
+; AVX512DQ-NEXT: vpor %ymm7, %ymm8, %ymm0
; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,ymm5[14],zero,zero,zero,zero,zero,zero,ymm5[15],zero,zero,zero,zero,zero,zero,ymm5[16],zero,zero,zero,zero,zero,zero,ymm5[17],zero,zero,zero,zero,zero,zero,ymm5[18]
-; AVX512DQ-NEXT: vmovdqa64 %ymm5, %ymm23
-; AVX512DQ-NEXT: vmovdqa64 %ymm20, %ymm0
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = ymm0[0,1,14],zero,ymm0[12,13,0,1,14,15],zero,ymm0[3,12,13,2,3,16],zero,ymm0[30,31,28,29,16,17],zero,ymm0[31,18,19,28,29,18],zero
-; AVX512DQ-NEXT: vpor %ymm8, %ymm10, %ymm2
-; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa64 %ymm18, %ymm2
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm13, %ymm8
-; AVX512DQ-NEXT: vmovdqa64 %ymm22, %ymm2
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm11, %ymm10
-; AVX512DQ-NEXT: vmovdqa64 %ymm11, %ymm27
-; AVX512DQ-NEXT: vpor %ymm8, %ymm10, %ymm2
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
+; AVX512DQ-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm21
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
+; AVX512DQ-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm5, %ymm1
+; AVX512DQ-NEXT: vmovdqa64 %ymm4, %ymm19
+; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vmovdqa64 %ymm26, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm10, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm25, %ymm1
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm9, %ymm1
+; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128]
+; AVX512DQ-NEXT: # ymm7 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm7, %ymm10, %ymm0
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [2,3,4,5,128,3,128,5,4,5,6,128,4,128,6,7,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm9, %ymm4
+; AVX512DQ-NEXT: vpor %ymm0, %ymm4, %ymm0
+; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
+; AVX512DQ-NEXT: # ymm10 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm10, %ymm2, %ymm4
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
+; AVX512DQ-NEXT: # ymm0 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm3, %ymm5
+; AVX512DQ-NEXT: vpor %ymm4, %ymm5, %ymm4
+; AVX512DQ-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vmovdqa64 %ymm24, %ymm4
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vmovdqa64 %ymm27, %ymm4
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpor %ymm3, %ymm2, %ymm2
; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm2
-; AVX512DQ-NEXT: vpshufb %xmm12, %xmm2, %xmm5
-; AVX512DQ-NEXT: vmovdqa64 %xmm2, %xmm25
-; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm12
-; AVX512DQ-NEXT: vpshufb %xmm15, %xmm12, %xmm9
-; AVX512DQ-NEXT: vporq %xmm5, %xmm9, %xmm22
-; AVX512DQ-NEXT: vmovdqa (%rcx), %xmm13
-; AVX512DQ-NEXT: vmovdqa64 %xmm29, %xmm2
-; AVX512DQ-NEXT: vpshufb %xmm2, %xmm13, %xmm7
-; AVX512DQ-NEXT: vmovdqa (%rdx), %xmm9
-; AVX512DQ-NEXT: vmovdqa64 %xmm31, %xmm2
-; AVX512DQ-NEXT: vpshufb %xmm2, %xmm9, %xmm10
-; AVX512DQ-NEXT: vpor %xmm7, %xmm10, %xmm2
+; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm3
+; AVX512DQ-NEXT: vpshufb %xmm6, %xmm3, %xmm2
+; AVX512DQ-NEXT: vmovdqa64 %xmm3, %xmm31
+; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm4
+; AVX512DQ-NEXT: vmovdqa64 %xmm18, %xmm3
+; AVX512DQ-NEXT: vpshufb %xmm3, %xmm4, %xmm3
+; AVX512DQ-NEXT: vmovdqa64 %xmm4, %xmm28
+; AVX512DQ-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX512DQ-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-NEXT: vmovdqa (%r9), %xmm2
+; AVX512DQ-NEXT: vmovdqa (%rcx), %xmm3
+; AVX512DQ-NEXT: vpshufb %xmm11, %xmm3, %xmm2
+; AVX512DQ-NEXT: vmovdqa %xmm3, %xmm6
+; AVX512DQ-NEXT: vmovdqa (%rdx), %xmm4
+; AVX512DQ-NEXT: vpshufb %xmm12, %xmm4, %xmm3
+; AVX512DQ-NEXT: vmovdqa64 %xmm4, %xmm18
+; AVX512DQ-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX512DQ-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-NEXT: vpshufb %xmm6, %xmm2, %xmm6
-; AVX512DQ-NEXT: vmovdqa (%r8), %xmm5
-; AVX512DQ-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm5, %xmm11
-; AVX512DQ-NEXT: vpor %xmm6, %xmm11, %xmm6
-; AVX512DQ-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm24, %zmm0, %zmm6
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm7 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512DQ-NEXT: vpshufb %xmm7, %xmm11, %xmm11
-; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm11[0,1,0,1],zmm6[4,5,6,7]
+; AVX512DQ-NEXT: vmovdqa (%r9), %xmm3
+; AVX512DQ-NEXT: vmovdqa64 %xmm20, %xmm2
+; AVX512DQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512DQ-NEXT: vmovdqa %xmm3, %xmm8
+; AVX512DQ-NEXT: vmovdqa (%r8), %xmm3
+; AVX512DQ-NEXT: vmovdqa64 %xmm30, %xmm4
+; AVX512DQ-NEXT: vpshufb %xmm4, %xmm3, %xmm4
+; AVX512DQ-NEXT: vmovdqa %xmm3, %xmm9
+; AVX512DQ-NEXT: vpor %xmm2, %xmm4, %xmm2
; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa (%rax), %xmm10
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm11 = xmm10[0,1,2,3,4,5,5,6]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
-; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm11, %ymm11
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm14 = zero,ymm1[13],zero,zero,zero,zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm14, %zmm11, %zmm24
-; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm2
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
-; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm18
-; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} ymm5 = [13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm13[23],zero,zero,zero,zero,ymm13[26],zero,ymm13[24],zero,zero,zero,zero,ymm13[27],zero,ymm13[25]
+; AVX512DQ-NEXT: vmovdqa64 %ymm17, %ymm3
+; AVX512DQ-NEXT: vpshufb %ymm7, %ymm3, %ymm4
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm2
+; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm3[23,u,u,u],zero,ymm3[26],zero,ymm3[24,u,u,u],zero,ymm3[27],zero
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm13, %ymm1
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vmovdqa64 %ymm22, %ymm2
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
+; AVX512DQ-NEXT: vmovdqa64 %ymm21, %ymm3
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufb %ymm5, %ymm2, %ymm11
-; AVX512DQ-NEXT: vmovdqa64 %ymm5, %ymm29
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm15 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,zero,zero,ymm14[30],zero,ymm14[28],zero,zero,zero,zero,ymm14[31],zero,ymm14[29]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm31 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800,18446463693966278655,18446742978476179455,18446463693966278655,18446742978476179455]
-; AVX512DQ-NEXT: vpternlogq $248, %ymm31, %ymm11, %ymm15
-; AVX512DQ-NEXT: vmovdqa64 %xmm28, %xmm6
-; AVX512DQ-NEXT: vmovdqa %xmm3, %xmm8
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm15[0,1,2,3],zmm11[0,1,0,1]
-; AVX512DQ-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm1 = ymm0[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512DQ-NEXT: vmovdqa64 %ymm20, %ymm17
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,2,3,3,6,6,7,7]
-; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} ymm11 = [9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10]
-; AVX512DQ-NEXT: vmovdqa64 %ymm23, %ymm5
-; AVX512DQ-NEXT: vpshufb %ymm11, %ymm5, %ymm15
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm15, %zmm19
-; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero,ymm0[29],zero,zero,zero
-; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb %ymm11, %ymm0, %ymm1
-; AVX512DQ-NEXT: vpshuflw $233, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm11 = mem[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm11 = ymm11[0,0,1,1,4,4,5,5]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm11, %zmm23
-; AVX512DQ-NEXT: vmovdqa64 %xmm30, %xmm0
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm2[23],zero,ymm2[21,22,23,26],zero,ymm2[24],zero,ymm2[28,29,26,27]
+; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm3
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm30
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm15[25],zero,ymm15[23],zero,zero,zero,zero,ymm15[26],zero,ymm15[24],zero,zero
+; AVX512DQ-NEXT: vmovdqa64 %ymm16, %ymm3
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm3[23],zero,ymm3[23,24,25,26],zero,ymm3[24],zero,ymm3[30,31]
+; AVX512DQ-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vpshufb %ymm10, %ymm15, %ymm1
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512DQ-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vmovdqa (%rax), %ymm4
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm15 = [0,5,4,0,5,0,4,0,20,21,0,23,0,21,0,23]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm1 = ymm4[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512DQ-NEXT: vpermi2d %zmm0, %zmm1, %zmm15
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm9[8],xmm8[8],xmm9[9],xmm8[9],xmm9[10],xmm8[10],xmm9[11],xmm8[11],xmm9[12],xmm8[12],xmm9[13],xmm8[13],xmm9[14],xmm8[14],xmm9[15],xmm8[15]
+; AVX512DQ-NEXT: vmovdqa64 %xmm9, %xmm21
+; AVX512DQ-NEXT: vmovdqa64 %xmm8, %xmm22
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm2 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512DQ-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX512DQ-NEXT: vmovdqa64 %xmm2, %xmm24
+; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm23 = zmm1[0,1,0,1],zmm0[4,5,6,7]
+; AVX512DQ-NEXT: vmovdqa (%rax), %xmm11
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm11[0,1,2,3,4,5,5,6]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm9 = [128,13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128]
+; AVX512DQ-NEXT: vpshufb %ymm9, %ymm4, %ymm2
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm16
+; AVX512DQ-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm10[0],xmm14[0],xmm10[1],xmm14[1],xmm10[2],xmm14[2],xmm10[3],xmm14[3],xmm10[4],xmm14[4],xmm10[5],xmm14[5],xmm10[6],xmm14[6],xmm10[7],xmm14[7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512DQ-NEXT: vpshufb %xmm0, %xmm2, %xmm2
+; AVX512DQ-NEXT: vmovdqa64 %xmm0, %xmm27
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm12 = zmm0[2,3,2,3],zmm2[0,1,0,1]
+; AVX512DQ-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; AVX512DQ-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm15 = xmm13[8],xmm9[8],xmm13[9],xmm9[9],xmm13[10],xmm9[10],xmm13[11],xmm9[11],xmm13[12],xmm9[12],xmm13[13],xmm9[13],xmm13[14],xmm9[14],xmm13[15],xmm9[15]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm0 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512DQ-NEXT: vpshufb %xmm0, %xmm15, %xmm3
-; AVX512DQ-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb %xmm0, %xmm1, %xmm0
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm0 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512DQ-NEXT: vpshufb %xmm0, %xmm3, %xmm3
+; AVX512DQ-NEXT: vmovdqa64 %xmm0, %xmm26
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm17 = zmm0[2,3,2,3],zmm3[0,1,0,1]
+; AVX512DQ-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX512DQ-NEXT: vmovdqa64 %xmm29, %xmm3
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512DQ-NEXT: vpshufb %xmm0, %xmm13, %xmm13
+; AVX512DQ-NEXT: vmovdqa64 %xmm0, %xmm25
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm29 = zmm0[2,3,2,3],zmm13[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm20
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; AVX512DQ-NEXT: vmovdqa %xmm6, %xmm8
+; AVX512DQ-NEXT: vmovdqa64 %xmm18, %xmm7
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm1 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512DQ-NEXT: vpshufb %xmm1, %xmm4, %xmm2
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm18
+; AVX512DQ-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm30
-; AVX512DQ-NEXT: vmovdqa64 %xmm21, %xmm0
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm15 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
-; AVX512DQ-NEXT: vmovdqa64 %xmm25, %xmm3
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm3[8],xmm12[8],xmm3[9],xmm12[9],xmm3[10],xmm12[10],xmm3[11],xmm12[11],xmm3[12],xmm12[12],xmm3[13],xmm12[13],xmm3[14],xmm12[14],xmm3[15],xmm12[15]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm4 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-NEXT: vpshufb %xmm4, %xmm1, %xmm1
-; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm19
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm14[8],xmm10[8],xmm14[9],xmm10[9],xmm14[10],xmm10[10],xmm14[11],xmm10[11],xmm14[12],xmm10[12],xmm14[13],xmm10[13],xmm14[14],xmm10[14],xmm14[15],xmm10[15]
+; AVX512DQ-NEXT: vmovdqa64 %xmm31, %xmm6
+; AVX512DQ-NEXT: vmovdqa64 %xmm28, %xmm4
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm14 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm0 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512DQ-NEXT: vpshufb %xmm0, %xmm14, %xmm13
+; AVX512DQ-NEXT: vpshufb %xmm0, %xmm1, %xmm0
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm21
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm8[8],xmm6[8],xmm8[9],xmm6[9],xmm8[10],xmm6[10],xmm8[11],xmm6[11],xmm8[12],xmm6[12],xmm8[13],xmm6[13],xmm8[14],xmm6[14],xmm8[15],xmm6[15]
-; AVX512DQ-NEXT: vpshufb %xmm7, %xmm0, %xmm0
-; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm26 = zmm26[0,1,0,1],zmm0[0,1,0,1]
-; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} ymm1 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm0, %ymm6
-; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm0, %ymm8
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20]
-; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm28
-; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm0[23],zero,ymm0[21,22,23,26],zero,ymm0[24],zero,ymm0[28,29,26,27]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm0[18,19,20,21],zero,ymm0[19],zero,ymm0[25,26,27,22],zero,ymm0[20],zero
-; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm20
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[21],zero,ymm5[19],zero,zero,zero,zero,ymm5[22],zero,ymm5[20],zero,zero
-; AVX512DQ-NEXT: vmovdqa64 %ymm27, %ymm1
-; AVX512DQ-NEXT: vmovdqa64 %ymm29, %ymm0
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm1, %ymm4
-; AVX512DQ-NEXT: vmovdqa %ymm2, %ymm0
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
-; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
-; AVX512DQ-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm1, %ymm0
-; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512DQ-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm0, %ymm1
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm14, %ymm2
-; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm27
-; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
-; AVX512DQ-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm14, %ymm14
-; AVX512DQ-NEXT: vmovdqa64 %ymm14, %ymm25
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm29
-; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = zero,ymm0[13],zero,zero,zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,ymm0[17],zero,zero
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm14 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm14 = ymm14[0,1,1,3,4,5,5,7]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,2]
-; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm14, %ymm14
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm14, %zmm2, %zmm2
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm12 = xmm12[0],xmm3[0],xmm12[1],xmm3[1],xmm12[2],xmm3[2],xmm12[3],xmm3[3],xmm12[4],xmm3[4],xmm12[5],xmm3[5],xmm12[6],xmm3[6],xmm12[7],xmm3[7]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm15, %xmm15
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm12, %xmm12
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm22, %zmm12, %zmm0
-; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm9[0],xmm13[0],xmm9[1],xmm13[1],xmm9[2],xmm13[2],xmm9[3],xmm13[3],xmm9[4],xmm13[4],xmm9[5],xmm13[5],xmm9[6],xmm13[6],xmm9[7],xmm13[7]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm14 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm11, %xmm9
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm13, %xmm11
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm13 = ymm6[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm14 = ymm18[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm22 = ymm4[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm8[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm7[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm5[2,3,2,3]
-; AVX512DQ-NEXT: vmovdqa64 %ymm17, %ymm3
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm3 = ymm3[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,0,1,1,4,4,5,5]
-; AVX512DQ-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm12 # 16-byte Folded Reload
-; AVX512DQ-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX512DQ-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX512DQ-NEXT: # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1],xmm5[2],mem[2],xmm5[3],mem[3],xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-NEXT: vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm11 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm11 = zmm5[0,1,0,1],mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm10[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm7 = xmm10[1,1,0,0,4,5,6,7]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[0,1,2,0]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm7, %zmm7
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm10 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm5 = zmm19[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vpternlogq $236, %zmm31, %zmm10, %zmm5
-; AVX512DQ-NEXT: vpandq %ymm31, %ymm22, %ymm10
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm1, %zmm1
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm10 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vporq %zmm10, %zmm1, %zmm1
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm10 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655,18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512DQ-NEXT: vpand %ymm6, %ymm10, %ymm6
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm6 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vporq %zmm6, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm5
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm0 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm1, %zmm0, %zmm5
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm1 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm6 = zmm23[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vpternlogq $236, %zmm10, %zmm1, %zmm6
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm1 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm17 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vporq %zmm1, %zmm17, %zmm1
-; AVX512DQ-NEXT: vpternlogq $226, %zmm6, %zmm0, %zmm1
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm6 = zmm30[0,1,0,1,4,5,4,5]
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm17 = zmm21[0,1,0,1,4,5,4,5]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm6, %zmm0, %zmm17
-; AVX512DQ-NEXT: vpternlogq $248, %ymm10, %ymm13, %ymm14
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm3[2,3,2,3]
-; AVX512DQ-NEXT: vpternlogq $236, %ymm10, %ymm4, %ymm0
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm9[0,1,0,1]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm14, %zmm3
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm4 = mem[2,3,2,3]
-; AVX512DQ-NEXT: vpshufhw $190, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm6 = mem[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,2,3,3,6,6,7,7]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX512DQ-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm6
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm15[0,1,0,1]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm4, %zmm6, %zmm4
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm6 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512DQ-NEXT: vpternlogq $184, %zmm3, %zmm6, %zmm4
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm28[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm20[2,3,2,3]
-; AVX512DQ-NEXT: vpor %ymm3, %ymm8, %ymm3
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm8, %zmm3
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm8, %zmm0
-; AVX512DQ-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm8 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm10 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm13 = mem[2,3,2,3]
-; AVX512DQ-NEXT: vpshuflw $5, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
-; AVX512DQ-NEXT: # xmm14 = mem[1,1,0,0,4,5,6,7]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm14 = xmm14[0,1,2,0]
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm15 = mem[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm18 = ymm27[2,3,2,3]
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm19 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm19 = mem[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm20 = ymm25[2,3,2,3]
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm21 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm21 = mem[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm22 = ymm29[2,3,2,3]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm3, %zmm6, %zmm0
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm3 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm6 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm6
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
+; AVX512DQ-NEXT: vmovdqa64 %xmm24, %xmm2
+; AVX512DQ-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm14 = zmm2[0,1,0,1],zmm1[0,1,0,1]
+; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX512DQ-NEXT: vpshufb %ymm9, %ymm10, %ymm1
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm9 = ymm10[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[0,1,1,3,4,5,5,7]
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm10 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
+; AVX512DQ-NEXT: vpternlogq $226, %zmm12, %zmm10, %zmm17
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm2 = mem[2,3,2,3]
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm28 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm28 = mem[2,3,2,3]
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm24
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm24
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm14[0,0,1,0]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm13, %zmm3
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm3
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm3
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm28, %zmm3, %zmm28
+; AVX512DQ-NEXT: vpternlogq $226, %zmm2, %zmm10, %zmm28
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm9[2,2,3,2]
+; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm10
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
+; AVX512DQ-NEXT: vmovdqa64 %xmm27, %xmm2
+; AVX512DQ-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX512DQ-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm27 # 16-byte Folded Reload
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
+; AVX512DQ-NEXT: vmovdqa64 %xmm26, %xmm1
+; AVX512DQ-NEXT: vpshufb %xmm1, %xmm2, %xmm2
+; AVX512DQ-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 16-byte Folded Reload
+; AVX512DQ-NEXT: vmovdqa64 %xmm21, %xmm1
+; AVX512DQ-NEXT: vmovdqa64 %xmm22, %xmm3
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; AVX512DQ-NEXT: vmovdqa64 %xmm25, %xmm1
+; AVX512DQ-NEXT: vpshufb %xmm1, %xmm8, %xmm8
+; AVX512DQ-NEXT: vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm8 = zmm8[0,1,0,1],mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm11[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm5 = xmm11[1,1,0,0,4,5,6,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,1,2,0]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm5, %zmm5
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm9 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm11 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vporq %zmm9, %zmm11, %zmm9
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm11 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm12 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vporq %zmm11, %zmm12, %zmm11
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm11
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm9 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm12 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vporq %zmm9, %zmm12, %zmm9
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm12 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
+; AVX512DQ-NEXT: vpternlogq $184, %zmm11, %zmm12, %zmm9
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm13[0,1,0,1]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm18[0,1,0,1]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm20[2,3,2,3]
+; AVX512DQ-NEXT: vpshuflw $5, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; AVX512DQ-NEXT: # xmm1 = mem[1,1,0,0,4,5,6,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm13 = zmm19[0,1,0,1,4,5,4,5]
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512DQ-NEXT: vpternlogq $226, %zmm13, %zmm12, %zmm0
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm13 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm21 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vporq %zmm13, %zmm21, %zmm13
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm20 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm18 = zmm30[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vporq %zmm20, %zmm18, %zmm18
+; AVX512DQ-NEXT: vpternlogq $226, %zmm13, %zmm12, %zmm18
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm11 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm4
+; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm23, %zmm16
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm16
; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
; AVX512DQ-NEXT: # zmm4 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm4
-; AVX512DQ-NEXT: vporq %ymm15, %ymm18, %ymm5
-; AVX512DQ-NEXT: vporq %ymm19, %ymm20, %ymm6
-; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm5[0,1,2,3]
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm16
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm16
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm26
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm26
-; AVX512DQ-NEXT: vporq %ymm21, %ymm22, %ymm1
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm5[0,1,2,3],zmm1[0,1,2,3]
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2
+; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm4
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,0]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm29, %zmm1
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm1
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm14
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm14
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-NEXT: vshufi64x2 $84, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm0 = zmm0[0,1,2,3],mem[2,3,2,3]
+; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm10
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm28, %zmm10
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm27[0,1,0,1,4,5,4,5]
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm2 = zmm2[0,1,0,1,4,5,4,5]
; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
-; AVX512DQ-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm0 = mem[0,1,0,1,4,5,4,5]
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm1 = zmm12[0,1,0,1,4,5,4,5]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm7[0,0,1,0,4,4,5,4]
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm0
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm5[0,0,1,0,4,4,5,4]
+; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm0
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-NEXT: vshufi64x2 $85, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm2 = zmm2[2,3,2,3],mem[2,3,2,3]
+; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm15
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm15
; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQ-NEXT: vmovdqa64 %zmm15, 128(%rax)
; AVX512DQ-NEXT: vmovdqa64 %zmm0, (%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm2, 320(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm26, 256(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm16, 128(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm10, 320(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm14, 256(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm1, 192(%rax)
; AVX512DQ-NEXT: vmovdqa64 %zmm4, 384(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm3, 192(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm24, 64(%rax)
-; AVX512DQ-NEXT: addq $1448, %rsp # imm = 0x5A8
+; AVX512DQ-NEXT: vmovdqa64 %zmm16, 64(%rax)
+; AVX512DQ-NEXT: addq $1720, %rsp # imm = 0x6B8
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ-FCP-LABEL: store_i8_stride7_vf64:
; AVX512DQ-FCP: # %bb.0:
-; AVX512DQ-FCP-NEXT: subq $1256, %rsp # imm = 0x4E8
-; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, %ymm14
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, %ymm13
+; AVX512DQ-FCP-NEXT: subq $1432, %rsp # imm = 0x598
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero,zero
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
+; AVX512DQ-FCP-NEXT: # ymm15 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm15, %ymm3, %ymm1
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm16
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero,ymm2[25]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm25
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [25,128,23,0,0,0,128,26,128,24,0,0,0,128,27,128,25,128,23,0,0,0,128,26,128,24,0,0,0,128,27,128]
+; AVX512DQ-FCP-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm27
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm18
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %ymm3
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512DQ-FCP-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm4, %ymm28
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm3[30],zero,ymm3[28,u,u,u],zero,ymm3[31],zero,ymm3[29,u]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm19
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512DQ-FCP-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm24
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm17
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%r9), %ymm4
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm18
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[25],zero,ymm4[23],zero,zero,zero,zero,ymm4[26],zero,ymm4[24],zero,zero
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[27],zero,zero,zero,zero,ymm3[30],zero,ymm3[28],zero,zero,zero,zero,ymm3[31],zero,ymm3[29]
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%r9), %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512DQ-FCP-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm4, %ymm23
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm2[27,u,u,u],zero,ymm2[30],zero,ymm2[28,u,u,u],zero,ymm2[31],zero
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512DQ-FCP-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm3, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm22
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rax), %ymm1
; AVX512DQ-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero,zero,zero,ymm1[18]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm23
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,14],zero,ymm1[12,13,0,1,14,15],zero,ymm1[3,12,13,2,3,16],zero,ymm1[30,31,28,29,16,17],zero,ymm1[31,18,19,28,29,18],zero
+; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm5
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm5[14],zero,zero,zero,zero,zero,zero,ymm5[15],zero,zero,zero,zero,zero,zero,ymm5[16],zero,zero,zero,zero,zero,zero,ymm5[17],zero,zero,zero,zero,zero,zero,ymm5[18]
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm4
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[0,1,14],zero,ymm4[12,13,0,1,14,15],zero,ymm4[3,12,13,2,3,16],zero,ymm4[30,31,28,29,16,17],zero,ymm4[31,18,19,28,29,18],zero
; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm26
+; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %ymm3
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,ymm3[14],zero,zero,zero,zero,zero,zero,ymm3[15],zero,zero,zero,zero,zero,zero,ymm3[16],zero,zero,zero,zero,zero,zero,ymm3[17],zero,zero,zero,zero,zero
; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm3
-; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm3, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[0,1,0,1,14],zero,ymm2[14,15,0,1,14,15],zero,ymm2[13,14,15,16,17,16],zero,ymm2[30,31,30,31,16,17],zero,ymm2[31,28,29,30,31]
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm5, %ymm25
-; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %ymm5
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm5, (%rsp) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0,13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0]
-; AVX512DQ-FCP-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm5, %ymm5
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm30
-; AVX512DQ-FCP-NEXT: vporq %ymm3, %ymm5, %ymm24
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %xmm3
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %xmm6
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm6, %xmm5
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm6, %xmm28
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm3, %xmm6
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm9, %xmm19
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm3, %xmm29
-; AVX512DQ-FCP-NEXT: vpor %xmm5, %xmm6, %xmm3
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm10
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %xmm6
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm6, %xmm5
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm10, %xmm9
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm10, %xmm27
-; AVX512DQ-FCP-NEXT: vpor %xmm5, %xmm9, %xmm5
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%r9), %xmm15
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %xmm10
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm15, %xmm9
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm10, %xmm12
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm0, %xmm21
-; AVX512DQ-FCP-NEXT: vporq %xmm9, %xmm12, %xmm22
-; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm13, %ymm7
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm13, %ymm20
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm14, %ymm1
-; AVX512DQ-FCP-NEXT: vpor %ymm7, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm16, %ymm7
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = zero,zero,zero,ymm7[14],zero,zero,zero,zero,zero,zero,ymm7[15],zero,zero,zero,zero,zero,zero,ymm7[16],zero,zero,zero,zero,zero,zero,ymm7[17],zero,zero,zero,zero,zero,zero,ymm7[18]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm7
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[0,1,14],zero,ymm7[12,13,0,1,14,15],zero,ymm7[3,12,13,2,3,16],zero,ymm7[30,31,28,29,16,17],zero,ymm7[31,18,19,28,29,18],zero
-; AVX512DQ-FCP-NEXT: vpor %ymm1, %ymm7, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqu64 %ymm18, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm7
-; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm30, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm0
-; AVX512DQ-FCP-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm13
-; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm13, %xmm0
+; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[13,u,u,u,u,u],zero,ymm0[14,u,u,u,u,u],zero,ymm0[15,u,u,u,u,u],zero,ymm0[16,u,u,u,u,u],zero,ymm0[17,u,u,u]
+; AVX512DQ-FCP-NEXT: vpor %ymm6, %ymm7, %ymm6
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %xmm9
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %xmm7
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm7
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm9, %xmm9
+; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm9, %xmm7
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm11
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %xmm9
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm9
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm11, %xmm11
+; AVX512DQ-FCP-NEXT: vpor %xmm9, %xmm11, %xmm9
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%r9), %xmm9
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %xmm14
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm9, %xmm9
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm14, %xmm11
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm14, %xmm30
+; AVX512DQ-FCP-NEXT: vpor %xmm9, %xmm11, %xmm9
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm9
+; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm7
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm9, %xmm26
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm9
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm9, %xmm1
-; AVX512DQ-FCP-NEXT: vporq %xmm0, %xmm1, %xmm31
-; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %xmm14
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm14, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %xmm8
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm19, %xmm1
-; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm8, %xmm1
-; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %xmm1
-; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm1, %xmm3
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %xmm4
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm21, %xmm1
-; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm4, %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm9, %xmm10
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm9, %xmm21
+; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm10, %xmm7
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %xmm7
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm6
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %xmm10
+; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm10, %xmm8
+; AVX512DQ-FCP-NEXT: vpor %xmm6, %xmm8, %xmm6
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %xmm11
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm11, %xmm12
+; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %xmm6
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm6, %xmm13
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm6, %xmm9
+; AVX512DQ-FCP-NEXT: vporq %xmm12, %xmm13, %xmm31
+; AVX512DQ-FCP-NEXT: vpshufb %ymm15, %ymm4, %ymm12
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128]
+; AVX512DQ-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm13
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm6
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm27, %ymm6
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm12
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
+; AVX512DQ-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm4, %ymm13
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm6, %ymm20
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm29
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm28, %ymm6
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm12
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
+; AVX512DQ-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm13
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm6, %ymm16
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm28
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm24, %ymm6
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm13
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm12 = [18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
+; AVX512DQ-FCP-NEXT: # ymm12 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm2, %ymm15
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm15, %zmm27
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm6
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm13
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm22, %ymm6
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm15
+; AVX512DQ-FCP-NEXT: vpor %ymm13, %ymm15, %ymm6
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
+; AVX512DQ-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm15
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm6, %ymm24
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
+; AVX512DQ-FCP-NEXT: # ymm13 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm13, %ymm1, %ymm14
+; AVX512DQ-FCP-NEXT: vpor %ymm15, %ymm14, %ymm6
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm4[28],zero,ymm4[30,31,30,31],zero,ymm4[29],zero,ymm4[31,28,29]
+; AVX512DQ-FCP-NEXT: vpor %ymm5, %ymm4, %ymm4
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm3[30],zero,ymm3[28,u,u,u],zero,ymm3[31],zero,ymm3[29,u]
+; AVX512DQ-FCP-NEXT: vporq %ymm2, %ymm3, %ymm23
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm0[27,u,u,u],zero,ymm0[30],zero,ymm0[28,u,u,u],zero,ymm0[31],zero
+; AVX512DQ-FCP-NEXT: vporq %ymm1, %ymm0, %ymm22
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm19, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm3
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[0,1,0,1,14],zero,ymm3[14,15,0,1,14,15],zero,ymm3[13,14,15,16,17,16],zero,ymm3[30,31,30,31,16,17],zero,ymm3[31,28,29,30,31]
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero,zero,zero,ymm0[27],zero,ymm0[25]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm12
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm12[21],zero,ymm12[19],zero,zero,zero,zero,ymm12[22],zero,ymm12[20],zero,zero
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm16, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm3, %ymm1
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm3
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm3[14],zero,zero,zero,zero,zero,zero,ymm3[15],zero,zero,zero,zero,zero,zero,ymm3[16],zero,zero,zero,zero,zero,zero,ymm3[17],zero,zero,zero,zero,zero,zero,ymm3[18]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[0,1,14],zero,ymm2[12,13,0,1,14,15],zero,ymm2[3,12,13,2,3,16],zero,ymm2[30,31,28,29,16,17],zero,ymm2[31,18,19,28,29,18],zero
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm26, %ymm11
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm11[25],zero,ymm11[23],zero,zero,zero,zero,ymm11[26],zero,ymm11[24],zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[18],zero,zero,zero,zero,ymm11[21],zero,ymm11[19],zero,zero,zero,zero,ymm11[22],zero,ymm11[20]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm20, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm1
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm24, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpshufb %ymm13, %ymm2, %ymm1
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
-; AVX512DQ-FCP-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
-; AVX512DQ-FCP-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm19
-; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm5, %ymm30
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[13,u,u,u,u,u],zero,ymm3[14,u,u,u,u,u],zero,ymm3[15,u,u,u,u,u],zero,ymm3[16,u,u,u,u,u],zero,ymm3[17,u,u,u]
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm24, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm9[8],xmm11[8],xmm9[9],xmm11[9],xmm9[10],xmm11[10],xmm9[11],xmm11[11],xmm9[12],xmm11[12],xmm9[13],xmm11[13],xmm9[14],xmm11[14],xmm9[15],xmm11[15]
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm9, %xmm8
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm11, %xmm17
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [2,2,3,3,2,2,3,3]
-; AVX512DQ-FCP-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [2,2,3,3,2,2,3,3]
+; AVX512DQ-FCP-NEXT: # ymm6 = mem[0,1,0,1]
; AVX512DQ-FCP-NEXT: vmovdqa (%rax), %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,6]
-; AVX512DQ-FCP-NEXT: vpermd %ymm0, %ymm1, %ymm0
-; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa (%rax), %ymm4
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm4, %ymm4
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm5, %ymm18
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm24
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm23
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14]
-; AVX512DQ-FCP-NEXT: vmovdqu (%rsp), %ymm0 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm25
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm26 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512DQ-FCP-NEXT: # ymm26 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm26, %ymm0, %ymm2
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3],xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm2[0,1,2,3],zmm0[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm29, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm28, %xmm2
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm14[8],xmm8[8],xmm14[9],xmm8[9],xmm14[10],xmm8[10],xmm14[11],xmm8[11],xmm14[12],xmm8[12],xmm14[13],xmm8[13],xmm14[14],xmm8[14],xmm14[15],xmm8[15]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm0, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm28
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm27, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm6, %xmm2
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm13[8],xmm9[8],xmm13[9],xmm9[9],xmm13[10],xmm9[10],xmm13[11],xmm9[11],xmm13[12],xmm9[12],xmm13[13],xmm9[13],xmm13[14],xmm9[14],xmm13[15],xmm9[15]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm4, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm2, %zmm0, %zmm27
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm10[8],xmm15[8],xmm10[9],xmm15[9],xmm10[10],xmm15[10],xmm10[11],xmm15[11],xmm10[12],xmm15[12],xmm10[13],xmm15[13],xmm10[14],xmm15[14],xmm10[15],xmm15[15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm2, %xmm2
-; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm22[0,1,0,1],zmm2[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rax), %xmm0
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,5,5,6]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm0, %xmm29
-; AVX512DQ-FCP-NEXT: vpermd %ymm2, %ymm1, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm1 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm11, %ymm5
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm20, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm22 = ymm1[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm19, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm1
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm20 = ymm1[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm30, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm4
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm16, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm30
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm19 = ymm1[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20],zero,zero
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm3
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [4,5,4,5,5,7,4,5]
-; AVX512DQ-FCP-NEXT: vpermd %ymm0, %ymm1, %ymm0
-; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm16
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm9[0],xmm13[0],xmm9[1],xmm13[1],xmm9[2],xmm13[2],xmm9[3],xmm13[3],xmm9[4],xmm13[4],xmm9[5],xmm13[5],xmm9[6],xmm13[6],xmm9[7],xmm13[7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm6, %xmm6
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm0
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29]
-; AVX512DQ-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm15
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm17 = ymm15[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm2
-; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm13
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm18 = ymm13[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm31, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3],xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm7, %xmm7
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm0, %xmm13
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm0[23],zero,ymm0[23,24,25,26],zero,ymm0[24],zero,ymm0[30,31]
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,5,6]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm0, %xmm19
+; AVX512DQ-FCP-NEXT: vpermd %ymm1, %ymm6, %ymm1
+; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa (%rax), %ymm12
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = zero,ymm12[13],zero,zero,zero,zero,zero,zero,ymm12[14],zero,zero,zero,zero,zero,zero,ymm12[15],zero,zero,zero,zero,zero,zero,ymm12[16],zero,zero,zero,zero,zero,zero,ymm12[17],zero,zero
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm3, %zmm20
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm7[8],xmm10[8],xmm7[9],xmm10[9],xmm7[10],xmm10[10],xmm7[11],xmm10[11],xmm7[12],xmm10[12],xmm7[13],xmm10[13],xmm7[14],xmm10[14],xmm7[15],xmm10[15]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm0
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm25
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm8[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm12[30],zero,ymm12[28],zero,zero,zero,zero,ymm12[31],zero,ymm12[29],zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm2, %ymm9
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm23[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm11[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm31 # 16-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
-; AVX512DQ-FCP-NEXT: # ymm11 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm14
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm30, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm13
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm2, %ymm1
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpor %ymm12, %ymm9, %ymm9
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm9, %zmm6
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
-; AVX512DQ-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm9, %ymm5, %ymm0
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm6, %zmm0, %zmm7
-; AVX512DQ-FCP-NEXT: vpor %ymm4, %ymm10, %ymm4
+; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm16 # 16-byte Reload
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm16, %xmm2
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm11[8],xmm2[8],xmm11[9],xmm2[9],xmm11[10],xmm2[10],xmm11[11],xmm2[11],xmm11[12],xmm2[12],xmm11[13],xmm2[13],xmm11[14],xmm2[14],xmm11[15],xmm2[15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm1
; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm4
-; AVX512DQ-FCP-NEXT: vpor %ymm8, %ymm14, %ymm2
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm18
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm26, %xmm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm21, %xmm0
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm3
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm21
+; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm13 = xmm14[8],xmm4[8],xmm14[9],xmm4[9],xmm14[10],xmm4[10],xmm14[11],xmm4[11],xmm14[12],xmm4[12],xmm14[13],xmm4[13],xmm14[14],xmm4[14],xmm14[15],xmm4[15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm13, %xmm1
; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm5
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm4, %zmm0, %zmm5
-; AVX512DQ-FCP-NEXT: vpandq %ymm9, %ymm22, %ymm0
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm20, %zmm0
-; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm2 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vporq %zmm2, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vpandq %ymm26, %ymm19, %ymm2
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm2, %zmm2
-; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm4 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vporq %zmm4, %zmm2, %zmm2
-; AVX512DQ-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
-; AVX512DQ-FCP-NEXT: vpandq %ymm26, %ymm18, %ymm0
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm15, %zmm0
-; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm4 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vporq %zmm4, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm2, %zmm4, %zmm0
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm28[0,1,0,1,4,5,4,5]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm27[0,1,0,1,4,5,4,5]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm2, %zmm4, %zmm8
-; AVX512DQ-FCP-NEXT: vpandq %ymm26, %ymm13, %ymm2
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm2 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vporq %zmm2, %zmm1, %zmm1
-; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm2 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm6 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vporq %zmm2, %zmm6, %zmm9
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm9
-; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-FCP-NEXT: vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm18 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm18 = zmm1[0,1,0,1],mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm29, %xmm3
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[1,1,0,0,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,1,0,1,2,0,0,1]
-; AVX512DQ-FCP-NEXT: vpermd %ymm2, %ymm4, %ymm19
-; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm1[1,1,0,0,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermd %ymm6, %ymm4, %ymm17
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm3, %xmm10
-; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm6
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
-; AVX512DQ-FCP-NEXT: # ymm11 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm12
-; AVX512DQ-FCP-NEXT: vmovdqu (%rsp), %ymm1 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm11
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm14 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
-; AVX512DQ-FCP-NEXT: # ymm14 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm1, %ymm15
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm2[23],zero,ymm2[23,24,25,26],zero,ymm2[24],zero,ymm2[30,31]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm2, %ymm14
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} ymm4 = ymm3[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [4,5,4,5,5,7,4,5]
-; AVX512DQ-FCP-NEXT: vpermd %ymm4, %ymm2, %ymm20
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm22 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm22
-; AVX512DQ-FCP-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm23 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,0]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm3, %zmm13
+; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm30, %xmm5
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm5[8],xmm15[8],xmm5[9],xmm15[9],xmm5[10],xmm15[10],xmm5[11],xmm15[11],xmm5[12],xmm15[12],xmm5[13],xmm15[13],xmm5[14],xmm15[14],xmm5[15],xmm15[15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm3[0,1,0,1],zmm1[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rax), %xmm3
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm3[0,1,2,3,4,5,5,6]
+; AVX512DQ-FCP-NEXT: vpermd %ymm1, %ymm6, %ymm26
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm24 # 16-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm10[0],xmm7[0],xmm10[1],xmm7[1],xmm10[2],xmm7[2],xmm10[3],xmm7[3],xmm10[4],xmm7[4],xmm10[5],xmm7[5],xmm10[6],xmm7[6],xmm10[7],xmm7[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm9, %xmm9
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 16-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm17, %xmm2
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3],xmm8[4],xmm2[4],xmm8[5],xmm2[5],xmm8[6],xmm2[6],xmm8[7],xmm2[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm8, %xmm8
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,0,1],zmm31[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm14, %xmm1
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm14 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm31 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vporq %zmm14, %zmm31, %zmm14
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm31 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm30 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vporq %zmm31, %zmm30, %zmm30
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm30
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm14 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm31 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vporq %zmm14, %zmm31, %zmm14
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm31 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm30, %zmm31, %zmm14
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm18[0,1,0,1,4,5,4,5]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm13 = zmm13[0,1,0,1,4,5,4,5]
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm7, %zmm31, %zmm13
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm7 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm29 = zmm29[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vporq %zmm7, %zmm29, %zmm7
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm28 = zmm28[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm27 = zmm27[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vporq %zmm28, %zmm27, %zmm27
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm7, %zmm31, %zmm27
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm2[2,3,2,3],zmm1[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm16, %xmm2
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3],xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm7, %xmm7
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm23[2,3,2,3],zmm7[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm5[0],xmm15[0],xmm5[1],xmm15[1],xmm5[2],xmm15[2],xmm5[3],xmm15[3],xmm5[4],xmm15[4],xmm5[5],xmm15[5],xmm5[6],xmm15[6],xmm5[7],xmm15[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm10, %xmm10
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm22[2,3,2,3],zmm10[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm19, %xmm2
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm11 = xmm2[1,1,0,0,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm17 = [0,1,0,1,2,0,0,1]
+; AVX512DQ-FCP-NEXT: vpermd %ymm11, %ymm17, %ymm28
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm2, %xmm6
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm3[1,1,0,0,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermd %ymm4, %ymm17, %ymm17
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} ymm2 = ymm12[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm23 = [4,5,4,5,5,7,4,5]
+; AVX512DQ-FCP-NEXT: vpermd %ymm2, %ymm23, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = zero,ymm15[13],zero,zero,zero,zero,zero,zero,ymm15[14],zero,zero,zero,zero,zero,zero,ymm15[15],zero,zero,zero,zero,zero,zero,ymm15[16],zero,zero,zero,zero,zero,zero,ymm15[17],zero,zero
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} ymm15 = ymm15[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpermd %ymm15, %ymm23, %ymm15
+; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm15
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm11, %zmm11
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm15 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm1, %zmm15, %zmm7
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # ymm1 = mem[2,3,2,3]
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # ymm18 = mem[2,3,2,3]
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm18, %zmm3, %zmm18
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm21[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm19 = ymm25[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,1,0]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm23 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm23
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm24
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm23, %zmm24
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm19, %zmm2, %zmm0
-; AVX512DQ-FCP-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm21
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm21
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm0 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm0
-; AVX512DQ-FCP-NEXT: vpor %ymm12, %ymm15, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm7[0,1,2,3],zmm2[0,1,2,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm16
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm16
-; AVX512DQ-FCP-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm2 = mem[0,1,0,1,4,5,4,5]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm31[0,1,0,1,4,5,4,5]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm5
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm17, %zmm2
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm2
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm2
-; AVX512DQ-FCP-NEXT: vpor %ymm1, %ymm13, %ymm1
-; AVX512DQ-FCP-NEXT: vpor %ymm11, %ymm14, %ymm5
-; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm5[0,1,2,3],zmm1[0,1,2,3]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm20, %zmm4
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm4
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm4
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm1, %zmm15, %zmm18
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm1 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm1
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19, %zmm14 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm14
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm20
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm20
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm26, %zmm0, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm0
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm0
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm24[0,1,0,1,4,5,4,5]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm9 = zmm9[0,1,0,1,4,5,4,5]
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm9
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm28, %zmm5
+; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm5
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm5
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vshufi64x2 $85, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm6 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm6 = zmm3[2,3,2,3],mem[2,3,2,3]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm2, %zmm2
+; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm2
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm27, %zmm2
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm4, %zmm4
+; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm22, %zmm4
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm4
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vshufi64x2 $84, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm6 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm6 = zmm3[0,1,2,3],mem[2,3,2,3]
+; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm11
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm11
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, 128(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, (%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm16, 320(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm11, 320(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, 192(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, 128(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, (%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 256(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm21, 192(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm24, 64(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm22, 384(%rax)
-; AVX512DQ-FCP-NEXT: addq $1256, %rsp # imm = 0x4E8
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm20, 64(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, 384(%rax)
+; AVX512DQ-FCP-NEXT: addq $1432, %rsp # imm = 0x598
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
@@ -10436,8 +10220,8 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512BW-NEXT: vmovdqa (%rax), %ymm13
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm27 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX512BW-NEXT: vpshufb %ymm27, %ymm13, %ymm0
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm26 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX512BW-NEXT: vpshufb %ymm26, %ymm13, %ymm0
; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
; AVX512BW-NEXT: # ymm1 = mem[0,1,0,1]
; AVX512BW-NEXT: vpermw %ymm13, %ymm1, %ymm1
@@ -10446,12 +10230,13 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm17 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
; AVX512BW-NEXT: vpshufb %ymm17, %ymm9, %ymm1
; AVX512BW-NEXT: vmovdqa (%r8), %ymm10
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm22 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512BW-NEXT: vpshufb %ymm22, %ymm10, %ymm2
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm21 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512BW-NEXT: vpshufb %ymm21, %ymm10, %ymm2
; AVX512BW-NEXT: vpor %ymm1, %ymm2, %ymm2
-; AVX512BW-NEXT: vmovdqa (%r9), %xmm12
-; AVX512BW-NEXT: vmovdqa (%r8), %xmm3
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm12[8],xmm3[9],xmm12[9],xmm3[10],xmm12[10],xmm3[11],xmm12[11],xmm3[12],xmm12[12],xmm3[13],xmm12[13],xmm3[14],xmm12[14],xmm3[15],xmm12[15]
+; AVX512BW-NEXT: vmovdqa (%r9), %xmm1
+; AVX512BW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-NEXT: vmovdqa (%r8), %xmm12
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm12[8],xmm1[8],xmm12[9],xmm1[9],xmm12[10],xmm1[10],xmm12[11],xmm1[11],xmm12[12],xmm1[12],xmm12[13],xmm1[13],xmm12[14],xmm1[14],xmm12[15],xmm1[15]
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
; AVX512BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm8
@@ -10461,270 +10246,257 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: vmovdqa (%rdx), %ymm14
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm0 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
; AVX512BW-NEXT: vpshufb %ymm0, %ymm14, %ymm2
-; AVX512BW-NEXT: vmovdqa64 (%rcx), %ymm16
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm23 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512BW-NEXT: vpshufb %ymm23, %ymm16, %ymm4
+; AVX512BW-NEXT: vmovdqa (%rcx), %ymm15
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm20 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512BW-NEXT: vpshufb %ymm20, %ymm15, %ymm4
; AVX512BW-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512BW-NEXT: vmovdqa (%rdx), %xmm4
; AVX512BW-NEXT: vmovdqa (%rcx), %xmm5
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
; AVX512BW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm6, %zmm20
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm6, %zmm22
; AVX512BW-NEXT: vmovdqa64 (%rdi), %ymm18
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm24 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
; AVX512BW-NEXT: vpshufb %ymm24, %ymm18, %ymm2
; AVX512BW-NEXT: vmovdqa64 (%rsi), %ymm19
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm26 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512BW-NEXT: vpshufb %ymm26, %ymm19, %ymm6
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm25 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512BW-NEXT: vpshufb %ymm25, %ymm19, %ymm6
; AVX512BW-NEXT: vpor %ymm2, %ymm6, %ymm2
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm6
; AVX512BW-NEXT: vmovdqa (%rsi), %xmm7
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm21 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm21 = xmm21[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm21 = ymm21[0,1,0,1]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm21, %zmm2
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm23 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm23 = xmm23[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm23 = ymm23[0,1,0,1]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm23, %zmm3
; AVX512BW-NEXT: movabsq $435749860008887046, %r10 # imm = 0x60C183060C18306
; AVX512BW-NEXT: kmovq %r10, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm20, %zmm2 {%k1}
+; AVX512BW-NEXT: vmovdqu8 %zmm22, %zmm3 {%k1}
; AVX512BW-NEXT: movabsq $4066998693416279096, %r10 # imm = 0x3870E1C3870E1C38
; AVX512BW-NEXT: kmovq %r10, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm8, %zmm2 {%k1}
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm20 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512BW-NEXT: vmovdqu8 %zmm8, %zmm3 {%k1}
; AVX512BW-NEXT: vmovdqa64 32(%rdx), %ymm29
+; AVX512BW-NEXT: vpshufb %ymm0, %ymm29, %ymm0
+; AVX512BW-NEXT: vmovdqa64 32(%rcx), %ymm30
+; AVX512BW-NEXT: vpshufb %ymm20, %ymm30, %ymm8
+; AVX512BW-NEXT: vpor %ymm0, %ymm8, %ymm0
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm20 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
; AVX512BW-NEXT: vpshufb %ymm20, %ymm29, %ymm8
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm22 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512BW-NEXT: vpshufb %ymm22, %ymm30, %ymm23
+; AVX512BW-NEXT: vporq %ymm8, %ymm23, %ymm8
; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm21 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
-; AVX512BW-NEXT: vmovdqa64 32(%rcx), %ymm30
-; AVX512BW-NEXT: vpshufb %ymm21, %ymm30, %ymm25
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm25 = ymm25[2,3,2,3]
-; AVX512BW-NEXT: vporq %ymm8, %ymm25, %ymm8
-; AVX512BW-NEXT: vpshufb %ymm0, %ymm29, %ymm0
-; AVX512BW-NEXT: vpshufb %ymm23, %ymm30, %ymm23
-; AVX512BW-NEXT: vporq %ymm0, %ymm23, %ymm0
; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa64 32(%rsi), %ymm28
-; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm15
-; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm8 = ymm15[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512BW-NEXT: vmovdqa64 32(%rdi), %ymm16
+; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm8 = ymm16[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[0,0,1,1,4,4,5,5]
-; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm25 = [5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6]
+; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm23 = [5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6]
; AVX512BW-NEXT: movl $676341840, %r10d # imm = 0x28502850
; AVX512BW-NEXT: kmovd %r10d, %k1
-; AVX512BW-NEXT: vpshufb %ymm25, %ymm28, %ymm8 {%k1}
+; AVX512BW-NEXT: vpshufb %ymm23, %ymm28, %ymm8 {%k1}
; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512BW-NEXT: vpshufb %ymm24, %ymm15, %ymm23
-; AVX512BW-NEXT: vpshufb %ymm26, %ymm28, %ymm24
-; AVX512BW-NEXT: vporq %ymm23, %ymm24, %ymm23
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm23, %zmm8
+; AVX512BW-NEXT: vpshufb %ymm24, %ymm16, %ymm24
+; AVX512BW-NEXT: vpshufb %ymm25, %ymm28, %ymm25
+; AVX512BW-NEXT: vporq %ymm24, %ymm25, %ymm24
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm24, %zmm8
; AVX512BW-NEXT: movabsq $3485998880071096368, %r10 # imm = 0x3060C183060C1830
; AVX512BW-NEXT: kmovq %r10, %k2
; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm8 {%k2}
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm23 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512BW-NEXT: vmovdqa64 32(%r8), %ymm31
-; AVX512BW-NEXT: vpshufb %ymm23, %ymm31, %ymm0
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm26 = ymm0[2,3,2,3]
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm24 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
-; AVX512BW-NEXT: vmovdqa 32(%r9), %ymm0
-; AVX512BW-NEXT: vpshufb %ymm24, %ymm0, %ymm11
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512BW-NEXT: vporq %ymm26, %ymm11, %ymm11
-; AVX512BW-NEXT: vpshufb %ymm17, %ymm0, %ymm17
-; AVX512BW-NEXT: vpshufb %ymm22, %ymm31, %ymm22
-; AVX512BW-NEXT: vporq %ymm17, %ymm22, %ymm17
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm11, %zmm17, %zmm11
+; AVX512BW-NEXT: vmovdqa64 32(%r9), %ymm31
+; AVX512BW-NEXT: vpshufb %ymm17, %ymm31, %ymm17
+; AVX512BW-NEXT: vmovdqa 32(%r8), %ymm1
+; AVX512BW-NEXT: vpshufb %ymm21, %ymm1, %ymm21
+; AVX512BW-NEXT: vporq %ymm17, %ymm21, %ymm17
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm24 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512BW-NEXT: vpshufb %ymm24, %ymm1, %ymm21
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm25 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512BW-NEXT: vpshufb %ymm25, %ymm31, %ymm27
+; AVX512BW-NEXT: vporq %ymm21, %ymm27, %ymm21
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm21 = ymm21[2,3,2,3]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm21, %zmm17, %zmm21
; AVX512BW-NEXT: vmovdqa64 32(%rax), %ymm17
-; AVX512BW-NEXT: vpshufb %ymm27, %ymm17, %ymm22
+; AVX512BW-NEXT: vpshufb %ymm26, %ymm17, %ymm27
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm26 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
-; AVX512BW-NEXT: vpermw %ymm17, %ymm26, %ymm27
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm27, %zmm22, %zmm22
+; AVX512BW-NEXT: vpermw %ymm17, %ymm26, %ymm11
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm11, %zmm27, %zmm11
; AVX512BW-NEXT: movabsq $145249953336295682, %r10 # imm = 0x204081020408102
; AVX512BW-NEXT: kmovq %r10, %k3
-; AVX512BW-NEXT: vmovdqu8 %zmm22, %zmm11 {%k3}
+; AVX512BW-NEXT: vmovdqu8 %zmm11, %zmm21 {%k3}
; AVX512BW-NEXT: movabsq $-4357498600088870461, %r10 # imm = 0xC3870E1C3870E1C3
; AVX512BW-NEXT: kmovq %r10, %k3
-; AVX512BW-NEXT: vmovdqu8 %zmm11, %zmm8 {%k3}
-; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm11 = ymm15[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512BW-NEXT: vmovdqu8 %zmm21, %zmm8 {%k3}
+; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm11 = ymm16[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm11 = ymm11[2,2,3,3,6,6,7,7]
; AVX512BW-NEXT: movl $338170920, %r10d # imm = 0x14281428
; AVX512BW-NEXT: kmovd %r10d, %k4
; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm27 = [13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14]
; AVX512BW-NEXT: vpshufb %ymm27, %ymm28, %ymm11 {%k4}
-; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
-; AVX512BW-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512BW-NEXT: vpshufb %ymm1, %ymm28, %ymm22
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm22 = ymm22[2,3,2,3]
+; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
+; AVX512BW-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX512BW-NEXT: vpshufb %ymm2, %ymm28, %ymm21
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm28 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512BW-NEXT: vpshufb %ymm28, %ymm15, %ymm15
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
-; AVX512BW-NEXT: vporq %ymm22, %ymm15, %ymm15
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm11, %zmm15, %zmm15
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm29[24,25],zero,ymm29[23],zero,ymm29[21,22,23,26],zero,ymm29[24],zero,ymm29[28,29,26,27]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm22 = ymm30[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm30[25],zero,ymm30[23],zero,zero,zero,zero,ymm30[26],zero,ymm30[24],zero,zero,zero,zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm22 = ymm22[2,3,2,3]
-; AVX512BW-NEXT: vporq %ymm11, %ymm22, %ymm22
+; AVX512BW-NEXT: vpshufb %ymm28, %ymm16, %ymm16
+; AVX512BW-NEXT: vporq %ymm21, %ymm16, %ymm16
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm11, %zmm16, %zmm16
+; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm11 = ymm29[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512BW-NEXT: vpshufd {{.*#+}} ymm21 = ymm11[0,2,3,3,4,6,7,7]
; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm11 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512BW-NEXT: vpshufb %ymm11, %ymm30, %ymm30
-; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm29 = ymm29[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512BW-NEXT: vpshufd {{.*#+}} ymm29 = ymm29[0,2,3,3,4,6,7,7]
-; AVX512BW-NEXT: vmovdqu8 %ymm29, %ymm30 {%k1}
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm29 = ymm30[2,3,2,3]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm29, %zmm22, %zmm22
+; AVX512BW-NEXT: vpshufb %ymm11, %ymm30, %ymm0
+; AVX512BW-NEXT: vmovdqu8 %ymm21, %ymm0 {%k1}
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm21 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm29[24,25],zero,ymm29[23],zero,ymm29[21,22,23,26],zero,ymm29[24],zero,ymm29[28,29,26,27]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm29 = ymm30[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm30[25],zero,ymm30[23],zero,zero,zero,zero,ymm30[26],zero,ymm30[24],zero,zero,zero,zero
+; AVX512BW-NEXT: vporq %ymm21, %ymm29, %ymm21
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm21, %zmm0
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
; AVX512BW-NEXT: movabsq $1742999440035548184, %r10 # imm = 0x183060C183060C18
; AVX512BW-NEXT: kmovq %r10, %k3
-; AVX512BW-NEXT: vmovdqu8 %zmm22, %zmm15 {%k3}
-; AVX512BW-NEXT: vmovdqa64 (%r9), %zmm29
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm31[0,1,2,3],zmm29[4,5,6,7]
-; AVX512BW-NEXT: vpshufb {{.*#+}} zmm22 = zmm22[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm22[23],zero,zmm22[23,24,25,26],zero,zmm22[24],zero,zmm22[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm22[59],zero,zero,zero,zero,zmm22[62],zero,zmm22[60],zero,zero,zero,zero,zmm22[63],zero
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm22 = zmm22[2,3,2,3,6,7,6,7]
-; AVX512BW-NEXT: vmovdqa64 (%r8), %zmm30
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm30[4,5,6,7]
-; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm0[25],zero,zmm0[23],zero,zero,zero,zero,zmm0[26],zero,zmm0[24],zero,zero,zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm0[59],zero,zero,zero,zero,zmm0[62],zero,zmm0[60],zero,zero,zero,zero,zmm0[63],zero,zmm0[61]
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
-; AVX512BW-NEXT: vporq %zmm22, %zmm0, %zmm0
+; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm16 {%k3}
+; AVX512BW-NEXT: vmovdqa64 (%r9), %zmm0
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm0[4,5,6,7]
+; AVX512BW-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm1[23],zero,zmm1[23,24,25,26],zero,zmm1[24],zero,zmm1[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm1[59],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero
+; AVX512BW-NEXT: vmovdqa64 (%r8), %zmm29
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm31[0,1,2,3],zmm29[4,5,6,7]
+; AVX512BW-NEXT: vpshufb {{.*#+}} zmm21 = zmm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm21[25],zero,zmm21[23],zero,zero,zero,zero,zmm21[26],zero,zmm21[24],zero,zero,zmm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm21[59],zero,zero,zero,zero,zmm21[62],zero,zmm21[60],zero,zero,zero,zero,zmm21[63],zero,zmm21[61]
+; AVX512BW-NEXT: vporq %zmm1, %zmm21, %zmm1
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
; AVX512BW-NEXT: movabsq $6971997760142192736, %r10 # imm = 0x60C183060C183060
; AVX512BW-NEXT: kmovq %r10, %k3
-; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm15 {%k3}
-; AVX512BW-NEXT: vmovdqa64 (%rax), %zmm22
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm0 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
-; AVX512BW-NEXT: vpermi2w %zmm22, %zmm17, %zmm0
+; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm16 {%k3}
+; AVX512BW-NEXT: vmovdqa64 (%rax), %zmm21
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm1 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
+; AVX512BW-NEXT: vpermi2w %zmm21, %zmm17, %zmm1
; AVX512BW-NEXT: movabsq $-9150747060186627967, %rax # imm = 0x8102040810204081
; AVX512BW-NEXT: kmovq %rax, %k5
-; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm15 {%k5}
-; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm0 = ymm18[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512BW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
-; AVX512BW-NEXT: vpshufb %ymm25, %ymm19, %ymm0 {%k1}
-; AVX512BW-NEXT: vpshufb %ymm1, %ymm19, %ymm1
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512BW-NEXT: vpshufb %ymm28, %ymm18, %ymm25
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm25 = ymm25[2,3,2,3]
-; AVX512BW-NEXT: vporq %ymm1, %ymm25, %ymm1
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovdqa64 (%rdx), %zmm1
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm14, %zmm1, %zmm1
-; AVX512BW-NEXT: vpshufb %zmm20, %zmm1, %zmm1
+; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm16 {%k5}
+; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm1 = ymm18[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512BW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,0,1,1,4,4,5,5]
+; AVX512BW-NEXT: vpshufb %ymm23, %ymm19, %ymm1 {%k1}
+; AVX512BW-NEXT: vpshufb %ymm2, %ymm19, %ymm2
+; AVX512BW-NEXT: vpshufb %ymm28, %ymm18, %ymm23
+; AVX512BW-NEXT: vporq %ymm2, %ymm23, %ymm2
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512BW-NEXT: vmovdqa64 (%rdx), %zmm2
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm14, %zmm2, %zmm2
+; AVX512BW-NEXT: vpshufb %zmm20, %zmm2, %zmm2
; AVX512BW-NEXT: vmovdqa64 (%rcx), %zmm20
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm16, %zmm20, %zmm20
-; AVX512BW-NEXT: vpshufb %zmm21, %zmm20, %zmm20
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm15, %zmm20, %zmm20
+; AVX512BW-NEXT: vpshufb %zmm22, %zmm20, %zmm20
+; AVX512BW-NEXT: vporq %zmm2, %zmm20, %zmm2
; AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm20 = zmm20[2,3,2,3,6,7,6,7]
-; AVX512BW-NEXT: vporq %zmm1, %zmm20, %zmm20
-; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm20 {%k3}
-; AVX512BW-NEXT: vmovdqa64 32(%r9), %xmm21
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm10, %zmm30, %zmm0
-; AVX512BW-NEXT: vpshufb %zmm23, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovdqa64 32(%r8), %xmm23
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm9, %zmm29, %zmm1
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm20 = zmm2[2,3,2,3,6,7,6,7]
+; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm20 {%k3}
+; AVX512BW-NEXT: vmovdqa64 32(%r9), %xmm22
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm10, %zmm29, %zmm1
; AVX512BW-NEXT: vpshufb %zmm24, %zmm1, %zmm1
-; AVX512BW-NEXT: vmovdqa64 32(%rdx), %xmm24
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
+; AVX512BW-NEXT: vmovdqa64 32(%r8), %xmm23
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm0
+; AVX512BW-NEXT: vpshufb %zmm25, %zmm0, %zmm2
+; AVX512BW-NEXT: vmovdqa 32(%rdx), %xmm0
+; AVX512BW-NEXT: vporq %zmm1, %zmm2, %zmm1
+; AVX512BW-NEXT: vmovdqa 32(%rcx), %xmm2
; AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm1
-; AVX512BW-NEXT: vmovdqa64 32(%rcx), %xmm25
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm13, %zmm22, %zmm13
-; AVX512BW-NEXT: vpermw %zmm13, %zmm26, %zmm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm13, %zmm21, %zmm13
+; AVX512BW-NEXT: vpermw %zmm13, %zmm26, %zmm24
; AVX512BW-NEXT: movabsq $1161999626690365456, %rax # imm = 0x1020408102040810
; AVX512BW-NEXT: kmovq %rax, %k5
-; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm1 {%k5}
-; AVX512BW-NEXT: vmovdqa 32(%rdi), %xmm0
+; AVX512BW-NEXT: vmovdqu8 %zmm24, %zmm1 {%k5}
+; AVX512BW-NEXT: vmovdqa64 32(%rdi), %xmm24
; AVX512BW-NEXT: movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
; AVX512BW-NEXT: kmovq %rax, %k5
; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm20 {%k5}
; AVX512BW-NEXT: vmovdqa 32(%rsi), %xmm1
; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm18 = ymm18[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512BW-NEXT: vpshufd {{.*#+}} ymm26 = ymm18[2,2,3,3,6,6,7,7]
-; AVX512BW-NEXT: vpshufb %ymm27, %ymm19, %ymm26 {%k4}
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm19 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512BW-NEXT: vpshufd {{.*#+}} ymm25 = ymm18[2,2,3,3,6,6,7,7]
+; AVX512BW-NEXT: vpshufb %ymm27, %ymm19, %ymm25 {%k4}
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm19 = xmm24[0],xmm1[0],xmm24[1],xmm1[1],xmm24[2],xmm1[2],xmm24[3],xmm1[3],xmm24[4],xmm1[4],xmm24[5],xmm1[5],xmm24[6],xmm1[6],xmm24[7],xmm1[7]
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm18 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
; AVX512BW-NEXT: vpshufb %xmm18, %xmm19, %xmm19
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm19 = zmm26[2,3,2,3],zmm19[0,1,0,1]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm26 = xmm24[0],xmm25[0],xmm24[1],xmm25[1],xmm24[2],xmm25[2],xmm24[3],xmm25[3],xmm24[4],xmm25[4],xmm24[5],xmm25[5],xmm24[6],xmm25[6],xmm24[7],xmm25[7]
-; AVX512BW-NEXT: vpshufb %ymm11, %ymm16, %ymm11
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm16 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512BW-NEXT: vpshufb %xmm16, %xmm26, %xmm26
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm25 = zmm25[2,3,2,3],zmm19[0,1,0,1]
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm26 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX512BW-NEXT: vpshufb %ymm11, %ymm15, %ymm11
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm19 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512BW-NEXT: vpshufb %xmm19, %xmm26, %xmm15
; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm14 = ymm14[0,2,3,3,4,6,7,7]
; AVX512BW-NEXT: vmovdqu8 %ymm14, %ymm11 {%k1}
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm14 = zmm11[2,3,2,3],zmm26[0,1,0,1]
-; AVX512BW-NEXT: vmovdqu8 %zmm19, %zmm14 {%k2}
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm14 = zmm11[2,3,2,3],zmm15[0,1,0,1]
+; AVX512BW-NEXT: vmovdqu8 %zmm25, %zmm14 {%k2}
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm23[0],xmm22[0],xmm23[1],xmm22[1],xmm23[2],xmm22[2],xmm23[3],xmm22[3],xmm23[4],xmm22[4],xmm23[5],xmm22[5],xmm23[6],xmm22[6],xmm23[7],xmm22[7]
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm15 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512BW-NEXT: vpshufb %xmm15, %xmm11, %xmm11
; AVX512BW-NEXT: vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm9[27],zero,zero,zero,zero,ymm9[30],zero,ymm9[28],zero,zero,zero,zero,ymm9[31],zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
; AVX512BW-NEXT: vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm10[27],zero,zero,zero,zero,ymm10[30],zero,ymm10[28],zero,zero,zero,zero,ymm10[31],zero,ymm10[29]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512BW-NEXT: vpor %ymm9, %ymm10, %ymm10
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm23[0],xmm21[0],xmm23[1],xmm21[1],xmm23[2],xmm21[2],xmm23[3],xmm21[3],xmm23[4],xmm21[4],xmm23[5],xmm21[5],xmm23[6],xmm21[6],xmm23[7],xmm21[7]
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm9 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512BW-NEXT: vpshufb %xmm9, %xmm11, %xmm11
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm11 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
-; AVX512BW-NEXT: vpermw %zmm22, %zmm11, %zmm11
+; AVX512BW-NEXT: vpor %ymm9, %ymm10, %ymm9
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm9[2,3,2,3],zmm11[0,1,0,1]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm10 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
+; AVX512BW-NEXT: vpermw %zmm21, %zmm10, %zmm10
; AVX512BW-NEXT: movabsq $580999813345182728, %rax # imm = 0x810204081020408
; AVX512BW-NEXT: kmovq %rax, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm11, %zmm10 {%k1}
+; AVX512BW-NEXT: vmovdqu8 %zmm10, %zmm9 {%k1}
; AVX512BW-NEXT: movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
; AVX512BW-NEXT: kmovq %rax, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm10, %zmm14 {%k1}
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm11 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512BW-NEXT: vpshufb %xmm11, %xmm25, %xmm10
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm19 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512BW-NEXT: vpshufb %xmm19, %xmm24, %xmm26
-; AVX512BW-NEXT: vporq %xmm10, %xmm26, %xmm10
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm24 = xmm25[8],xmm24[8],xmm25[9],xmm24[9],xmm25[10],xmm24[10],xmm25[11],xmm24[11],xmm25[12],xmm24[12],xmm25[13],xmm24[13],xmm25[14],xmm24[14],xmm25[15],xmm24[15]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm24 = xmm24[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512BW-NEXT: vinserti32x4 $2, %xmm24, %zmm10, %zmm10
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm24 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512BW-NEXT: vpshufb %xmm24, %xmm1, %xmm25
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm26 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512BW-NEXT: vpshufb %xmm26, %xmm0, %xmm27
-; AVX512BW-NEXT: vporq %xmm25, %xmm27, %xmm25
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm10[0,1,0,1,4,5,4,5]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm25, %zmm0
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm10 = zmm0[0,1,0,1,4,5,4,5]
-; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm10 {%k3}
+; AVX512BW-NEXT: vmovdqu8 %zmm9, %zmm14 {%k1}
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm10 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512BW-NEXT: vpshufb %xmm10, %xmm2, %xmm9
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm11 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512BW-NEXT: vpshufb %xmm11, %xmm0, %xmm25
+; AVX512BW-NEXT: vporq %xmm9, %xmm25, %xmm9
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm9, %zmm0
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm9
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm25 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512BW-NEXT: vpshufb %xmm25, %xmm24, %xmm26
+; AVX512BW-NEXT: vporq %xmm9, %xmm26, %xmm9
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm24[8],xmm1[9],xmm24[9],xmm1[10],xmm24[10],xmm1[11],xmm24[11],xmm1[12],xmm24[12],xmm1[13],xmm24[13],xmm1[14],xmm24[14],xmm1[15],xmm24[15]
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512BW-NEXT: vinserti32x4 $2, %xmm1, %zmm9, %zmm1
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm9 = zmm1[0,1,0,1,4,5,4,5]
+; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm9 {%k3}
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm0 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512BW-NEXT: vpshufb %xmm0, %xmm21, %xmm1
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm25 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
-; AVX512BW-NEXT: vpermi2w %zmm22, %zmm17, %zmm25
+; AVX512BW-NEXT: vpshufb %xmm0, %xmm22, %xmm1
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm24 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
+; AVX512BW-NEXT: vpermi2w %zmm21, %zmm17, %zmm24
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512BW-NEXT: vpshufb %xmm17, %xmm23, %xmm22
-; AVX512BW-NEXT: vporq %xmm1, %xmm22, %xmm1
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm21 = xmm23[8],xmm21[8],xmm23[9],xmm21[9],xmm23[10],xmm21[10],xmm23[11],xmm21[11],xmm23[12],xmm21[12],xmm23[13],xmm21[13],xmm23[14],xmm21[14],xmm23[15],xmm21[15]
+; AVX512BW-NEXT: vpshufb %xmm17, %xmm23, %xmm21
+; AVX512BW-NEXT: vporq %xmm1, %xmm21, %xmm1
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm21 = xmm23[8],xmm22[8],xmm23[9],xmm22[9],xmm23[10],xmm22[10],xmm23[11],xmm22[11],xmm23[12],xmm22[12],xmm23[13],xmm22[13],xmm23[14],xmm22[14],xmm23[15],xmm22[15]
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm21 = xmm21[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
; AVX512BW-NEXT: vinserti32x4 $2, %xmm21, %zmm1, %zmm1
; AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
; AVX512BW-NEXT: movabsq $290499906672591364, %rax # imm = 0x408102040810204
; AVX512BW-NEXT: kmovq %rax, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm25, %zmm1 {%k1}
+; AVX512BW-NEXT: vmovdqu8 %zmm24, %zmm1 {%k1}
; AVX512BW-NEXT: movabsq $-8714997200177740921, %rax # imm = 0x870E1C3870E1C387
; AVX512BW-NEXT: kmovq %rax, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm10 {%k1}
-; AVX512BW-NEXT: vpshufb %xmm11, %xmm5, %xmm1
-; AVX512BW-NEXT: vpshufb %xmm19, %xmm4, %xmm11
-; AVX512BW-NEXT: vpor %xmm1, %xmm11, %xmm1
+; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm9 {%k1}
+; AVX512BW-NEXT: vpshufb %xmm10, %xmm5, %xmm1
+; AVX512BW-NEXT: vpshufb %xmm11, %xmm4, %xmm10
+; AVX512BW-NEXT: vpor %xmm1, %xmm10, %xmm1
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; AVX512BW-NEXT: vpshufb %xmm16, %xmm4, %xmm4
+; AVX512BW-NEXT: vpshufb %xmm19, %xmm4, %xmm4
; AVX512BW-NEXT: vinserti32x4 $2, %xmm1, %zmm4, %zmm1
-; AVX512BW-NEXT: vpshufb %xmm24, %xmm7, %xmm4
-; AVX512BW-NEXT: vpshufb %xmm26, %xmm6, %xmm5
-; AVX512BW-NEXT: vpor %xmm4, %xmm5, %xmm4
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
-; AVX512BW-NEXT: vpshufb %xmm18, %xmm5, %xmm5
-; AVX512BW-NEXT: vinserti32x4 $2, %xmm4, %zmm5, %zmm4
+; AVX512BW-NEXT: vpshufb %xmm2, %xmm7, %xmm2
+; AVX512BW-NEXT: vpshufb %xmm25, %xmm6, %xmm4
+; AVX512BW-NEXT: vpor %xmm2, %xmm4, %xmm2
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; AVX512BW-NEXT: vpshufb %xmm18, %xmm4, %xmm4
+; AVX512BW-NEXT: vinserti32x4 $2, %xmm2, %zmm4, %zmm2
; AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm4 = zmm4[0,1,0,1,4,5,4,5]
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm2 = zmm2[0,1,0,1,4,5,4,5]
; AVX512BW-NEXT: movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
; AVX512BW-NEXT: kmovq %rax, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm4 {%k1}
-; AVX512BW-NEXT: vpshufb %xmm0, %xmm12, %xmm0
-; AVX512BW-NEXT: vpshufb %xmm17, %xmm3, %xmm1
+; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm2 {%k1}
+; AVX512BW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512BW-NEXT: vpshufb %xmm0, %xmm4, %xmm0
+; AVX512BW-NEXT: vpshufb %xmm17, %xmm12, %xmm1
; AVX512BW-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm12[0],xmm3[1],xmm12[1],xmm3[2],xmm12[2],xmm3[3],xmm12[3],xmm3[4],xmm12[4],xmm3[5],xmm12[5],xmm3[6],xmm12[6],xmm3[7],xmm12[7]
-; AVX512BW-NEXT: vpshufb %xmm9, %xmm1, %xmm1
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3],xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
+; AVX512BW-NEXT: vpshufb %xmm15, %xmm1, %xmm1
; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm1 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
; AVX512BW-NEXT: vpermw %zmm13, %zmm1, %zmm1
@@ -10734,334 +10506,319 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
; AVX512BW-NEXT: movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
; AVX512BW-NEXT: kmovq %rax, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm4 {%k1}
+; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT: vmovdqa64 %zmm4, (%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm2, (%rax)
; AVX512BW-NEXT: vmovdqa64 %zmm8, 320(%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm10, 256(%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm9, 256(%rax)
; AVX512BW-NEXT: vmovdqa64 %zmm14, 192(%rax)
; AVX512BW-NEXT: vmovdqa64 %zmm20, 128(%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm2, 64(%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm15, 384(%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm3, 64(%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm16, 384(%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: store_i8_stride7_vf64:
; AVX512BW-FCP: # %bb.0:
-; AVX512BW-FCP-NEXT: subq $40, %rsp
+; AVX512BW-FCP-NEXT: subq $104, %rsp
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FCP-NEXT: vmovdqa (%rax), %ymm0
-; AVX512BW-FCP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm28 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX512BW-FCP-NEXT: vpshufb %ymm28, %ymm0, %ymm1
-; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
-; AVX512BW-FCP-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpermw %ymm0, %ymm2, %ymm2
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm2
-; AVX512BW-FCP-NEXT: vmovdqa (%r9), %ymm0
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm3
-; AVX512BW-FCP-NEXT: vmovdqa64 %ymm0, %ymm19
-; AVX512BW-FCP-NEXT: vmovdqa64 (%r8), %ymm20
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm22 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm22, %ymm20, %ymm4
+; AVX512BW-FCP-NEXT: vmovdqa (%rax), %ymm2
+; AVX512BW-FCP-NEXT: vmovdqu %ymm2, (%rsp) # 32-byte Spill
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm20 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX512BW-FCP-NEXT: vpshufb %ymm20, %ymm2, %ymm0
+; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
+; AVX512BW-FCP-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512BW-FCP-NEXT: vpermw %ymm2, %ymm1, %ymm1
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm2
+; AVX512BW-FCP-NEXT: vmovdqa (%r9), %ymm1
+; AVX512BW-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm0, %ymm1, %ymm3
+; AVX512BW-FCP-NEXT: vmovdqa (%r8), %ymm4
+; AVX512BW-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm1, %ymm4, %ymm4
; AVX512BW-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
-; AVX512BW-FCP-NEXT: vmovdqa (%r9), %xmm0
-; AVX512BW-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512BW-FCP-NEXT: vmovdqa (%r8), %xmm4
-; AVX512BW-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; AVX512BW-FCP-NEXT: vmovdqa64 (%r9), %xmm16
+; AVX512BW-FCP-NEXT: vmovdqa (%r8), %xmm15
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm15[8],xmm16[8],xmm15[9],xmm16[9],xmm15[10],xmm16[10],xmm15[11],xmm16[11],xmm15[12],xmm16[12],xmm15[13],xmm16[13],xmm15[14],xmm16[14],xmm15[15],xmm16[15]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm4, %zmm3
; AVX512BW-FCP-NEXT: movabsq $2323999253380730912, %r10 # imm = 0x2040810204081020
; AVX512BW-FCP-NEXT: kmovq %r10, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %ymm0
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm0, %ymm2
-; AVX512BW-FCP-NEXT: vmovdqa %ymm0, %ymm9
-; AVX512BW-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %ymm0
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm5
-; AVX512BW-FCP-NEXT: vmovdqa64 %ymm0, %ymm18
-; AVX512BW-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-FCP-NEXT: vpor %ymm2, %ymm5, %ymm2
-; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %xmm14
-; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %xmm15
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm15[8],xmm14[8],xmm15[9],xmm14[9],xmm15[10],xmm14[10],xmm15[11],xmm14[11],xmm15[12],xmm14[12],xmm15[13],xmm14[13],xmm15[14],xmm14[14],xmm15[15],xmm14[15]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm12, %ymm0, %ymm5
-; AVX512BW-FCP-NEXT: vmovdqa %ymm0, %ymm6
-; AVX512BW-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %ymm24
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm21 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512BW-FCP-NEXT: vpshufb %ymm21, %ymm24, %ymm16
-; AVX512BW-FCP-NEXT: vporq %ymm5, %ymm16, %ymm5
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %xmm16
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %xmm17
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm25 = xmm17[8],xmm16[8],xmm17[9],xmm16[9],xmm17[10],xmm16[10],xmm17[11],xmm16[11],xmm17[12],xmm16[12],xmm17[13],xmm16[13],xmm17[14],xmm16[14],xmm17[15],xmm16[15]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm25 = xmm25[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm25[0,1,0,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm25, %zmm13
+; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %ymm4
+; AVX512BW-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512BW-FCP-NEXT: vpshufb %ymm2, %ymm4, %ymm4
+; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %ymm6
+; AVX512BW-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm5, %ymm6, %ymm6
+; AVX512BW-FCP-NEXT: vpor %ymm4, %ymm6, %ymm4
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdx), %xmm17
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rcx), %xmm19
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm19[8],xmm17[8],xmm19[9],xmm17[9],xmm19[10],xmm17[10],xmm19[11],xmm17[11],xmm19[12],xmm17[12],xmm19[13],xmm17[13],xmm19[14],xmm17[14],xmm19[15],xmm17[15]
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm29 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512BW-FCP-NEXT: vpshufb %xmm29, %xmm6, %xmm6
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm6, %zmm4
+; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm6
+; AVX512BW-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm9, %ymm6, %ymm6
+; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm7
+; AVX512BW-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm23 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512BW-FCP-NEXT: vpshufb %ymm23, %ymm7, %ymm21
+; AVX512BW-FCP-NEXT: vporq %ymm6, %ymm21, %ymm6
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %xmm21
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %xmm22
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm24 = xmm22[8],xmm21[8],xmm22[9],xmm21[9],xmm22[10],xmm21[10],xmm22[11],xmm21[11],xmm22[12],xmm21[12],xmm22[13],xmm21[13],xmm22[14],xmm21[14],xmm22[15],xmm21[15]
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm24, %xmm24
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm24 = ymm24[0,1,0,1]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm24, %zmm14
; AVX512BW-FCP-NEXT: movabsq $435749860008887046, %r10 # imm = 0x60C183060C18306
; AVX512BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm13 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm14 {%k1}
; AVX512BW-FCP-NEXT: movabsq $4066998693416279096, %r10 # imm = 0x3870E1C3870E1C38
; AVX512BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm13 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdx), %ymm2
-; AVX512BW-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm3
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm3[2,3,2,3]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm3
-; AVX512BW-FCP-NEXT: vpshufb %ymm0, %ymm3, %ymm26
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,3,2,3]
-; AVX512BW-FCP-NEXT: vporq %ymm25, %ymm26, %ymm25
-; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm4
-; AVX512BW-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm11
-; AVX512BW-FCP-NEXT: vpor %ymm4, %ymm11, %ymm4
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm25, %zmm4, %zmm4
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm11
-; AVX512BW-FCP-NEXT: vpshufb %ymm0, %ymm11, %ymm25
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm25[2,3,2,3]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm27
-; AVX512BW-FCP-NEXT: vpshufb %ymm0, %ymm27, %ymm26
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,3,2,3]
-; AVX512BW-FCP-NEXT: vporq %ymm25, %ymm26, %ymm25
-; AVX512BW-FCP-NEXT: vpshufb %ymm12, %ymm11, %ymm12
-; AVX512BW-FCP-NEXT: vpshufb %ymm21, %ymm27, %ymm21
-; AVX512BW-FCP-NEXT: vporq %ymm12, %ymm21, %ymm12
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm25, %zmm12, %zmm12
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm14 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdx), %ymm3
+; AVX512BW-FCP-NEXT: vpshufb %ymm2, %ymm3, %ymm4
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm2
+; AVX512BW-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm5
+; AVX512BW-FCP-NEXT: vpor %ymm4, %ymm5, %ymm4
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm5 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512BW-FCP-NEXT: vpshufb %ymm5, %ymm3, %ymm5
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm24
+; AVX512BW-FCP-NEXT: vporq %ymm5, %ymm24, %ymm5
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
+; AVX512BW-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm9
+; AVX512BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm25
+; AVX512BW-FCP-NEXT: vpshufb %ymm23, %ymm25, %ymm23
+; AVX512BW-FCP-NEXT: vporq %ymm9, %ymm23, %ymm9
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm23
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
+; AVX512BW-FCP-NEXT: vpshufb %ymm6, %ymm25, %ymm24
+; AVX512BW-FCP-NEXT: vporq %ymm23, %ymm24, %ymm23
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm23 = ymm23[2,3,2,3]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm23, %zmm9, %zmm9
; AVX512BW-FCP-NEXT: movabsq $3485998880071096368, %r10 # imm = 0x3060C183060C1830
-; AVX512BW-FCP-NEXT: kmovq %r10, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm12 {%k2}
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512BW-FCP-NEXT: vmovdqa 32(%r8), %ymm4
-; AVX512BW-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm21
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm21[2,3,2,3]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm5 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
-; AVX512BW-FCP-NEXT: vmovdqa 32(%r9), %ymm0
-; AVX512BW-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm26
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,3,2,3]
-; AVX512BW-FCP-NEXT: vporq %ymm25, %ymm26, %ymm25
-; AVX512BW-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm1
-; AVX512BW-FCP-NEXT: vpshufb %ymm22, %ymm4, %ymm22
-; AVX512BW-FCP-NEXT: vporq %ymm1, %ymm22, %ymm1
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm25, %zmm1, %zmm1
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rax), %ymm31
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm22 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
-; AVX512BW-FCP-NEXT: vpermw %ymm31, %ymm22, %ymm22
-; AVX512BW-FCP-NEXT: vpshufb %ymm28, %ymm31, %ymm25
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm25, %zmm22
-; AVX512BW-FCP-NEXT: movabsq $145249953336295682, %r10 # imm = 0x204081020408102
; AVX512BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm22, %zmm1 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm9 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa 32(%r9), %ymm4
+; AVX512BW-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm0
+; AVX512BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm28
+; AVX512BW-FCP-NEXT: vpshufb %ymm1, %ymm28, %ymm1
+; AVX512BW-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512BW-FCP-NEXT: vpshufb %ymm1, %ymm28, %ymm1
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm6, %ymm4, %ymm23
+; AVX512BW-FCP-NEXT: vporq %ymm1, %ymm23, %ymm1
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rax), %ymm6
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm23 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
+; AVX512BW-FCP-NEXT: vpermw %ymm6, %ymm23, %ymm23
+; AVX512BW-FCP-NEXT: vpshufb %ymm20, %ymm6, %ymm20
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm23, %zmm20, %zmm20
+; AVX512BW-FCP-NEXT: movabsq $145249953336295682, %r10 # imm = 0x204081020408102
+; AVX512BW-FCP-NEXT: kmovq %r10, %k2
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm20, %zmm1 {%k2}
; AVX512BW-FCP-NEXT: movabsq $-4357498600088870461, %r10 # imm = 0xC3870E1C3870E1C3
-; AVX512BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm12 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rcx), %zmm25
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm2[0,1,2,3],zmm25[4,5,6,7]
+; AVX512BW-FCP-NEXT: kmovq %r10, %k2
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm9 {%k2}
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rcx), %zmm23
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm3[0,1,2,3],zmm23[4,5,6,7]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,zmm1[23],zero,zmm1[21,22,23,26],zero,zmm1[24],zero,zmm1[28,29,26,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,59],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero,zmm1[61],zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rdx), %zmm26
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm3[0,1,2,3],zmm26[4,5,6,7]
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdx), %zmm24
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm24[4,5,6,7]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm2[25],zero,zmm2[23],zero,zero,zero,zero,zmm2[26],zero,zmm2[24],zero,zero,zero,zero,zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[62],zero,zmm2[60],zero,zero,zero,zero,zmm2[63],zero,zmm2[61],zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm2[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: vporq %zmm1, %zmm2, %zmm1
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm28
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm27[0,1,2,3],zmm28[4,5,6,7]
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm26
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm25[0,1,2,3],zmm26[4,5,6,7]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,zmm2[23],zero,zero,zero,zero,zmm2[26],zero,zmm2[24],zero,zero,zero,zero,zmm2[27],zero,zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,60,61,62],zero,zmm2[60],zero,zmm2[62,63,62,63],zero,zmm2[61],zero,zmm2[63,60,61]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm2[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %zmm29
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm11[0,1,2,3],zmm29[4,5,6,7]
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %zmm27
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm5[0,1,2,3],zmm27[4,5,6,7]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm3[23],zero,zero,zero,zero,zmm3[26],zero,zmm3[24],zero,zero,zero,zero,zmm3[27],zero,zmm3[25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zmm3[62],zero,zmm3[60],zero,zero,zero,zero,zmm3[63],zero,zmm3[61],zero,zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vporq %zmm2, %zmm3, %zmm22
+; AVX512BW-FCP-NEXT: vporq %zmm2, %zmm3, %zmm2
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm20 = zmm2[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: movabsq $1742999440035548184, %r10 # imm = 0x183060C183060C18
-; AVX512BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm22 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa64 (%r9), %zmm27
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm4[0,1,2,3],zmm27[4,5,6,7]
+; AVX512BW-FCP-NEXT: kmovq %r10, %k2
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm20 {%k2}
+; AVX512BW-FCP-NEXT: vmovdqa64 (%r9), %zmm25
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm28[0,1,2,3],zmm25[4,5,6,7]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm1[23],zero,zmm1[23,24,25,26],zero,zmm1[24],zero,zmm1[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm1[59],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero
+; AVX512BW-FCP-NEXT: vmovdqa64 (%r8), %zmm28
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm4[0,1,2,3],zmm28[4,5,6,7]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[25],zero,zmm2[23],zero,zero,zero,zero,zmm2[26],zero,zmm2[24],zero,zero,zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm2[59],zero,zero,zero,zero,zmm2[62],zero,zmm2[60],zero,zero,zero,zero,zmm2[63],zero,zmm2[61]
+; AVX512BW-FCP-NEXT: vporq %zmm1, %zmm2, %zmm1
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa64 (%r8), %zmm30
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm30[4,5,6,7]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm0[25],zero,zmm0[23],zero,zero,zero,zero,zmm0[26],zero,zmm0[24],zero,zero,zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm0[59],zero,zero,zero,zero,zmm0[62],zero,zmm0[60],zero,zero,zero,zero,zmm0[63],zero,zmm0[61]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512BW-FCP-NEXT: movabsq $6971997760142192736, %r10 # imm = 0x60C183060C183060
-; AVX512BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm22 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rax), %zmm10
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm0 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
-; AVX512BW-FCP-NEXT: vpermi2w %zmm10, %zmm31, %zmm0
+; AVX512BW-FCP-NEXT: kmovq %r10, %k2
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm20 {%k2}
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rax), %zmm31
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm1 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
+; AVX512BW-FCP-NEXT: vpermi2w %zmm31, %zmm6, %zmm1
; AVX512BW-FCP-NEXT: movabsq $-9150747060186627967, %rax # imm = 0x8102040810204081
; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm22 {%k3}
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm6[28],zero,ymm6[30,31,30,31],zero,ymm6[29],zero,ymm6[31,28,29]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm24[30],zero,ymm24[28],zero,zero,zero,zero,ymm24[31],zero,ymm24[29],zero,zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512BW-FCP-NEXT: vpor %ymm0, %ymm1, %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm8
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rsi), %xmm7
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm1, %zmm6
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm18[30],zero,ymm18[28],zero,zero,zero,zero,ymm18[31],zero,ymm18[29],zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm9[30],zero,ymm9[28],zero,zero,zero,zero,ymm9[31],zero,ymm9[29],zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512BW-FCP-NEXT: vpor %ymm1, %ymm4, %ymm4
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdx), %xmm2
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rcx), %xmm1
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm18 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512BW-FCP-NEXT: vpshufb %xmm18, %xmm0, %xmm0
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm4, %zmm9
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm9 {%k2}
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm19[27],zero,zero,zero,zero,ymm19[30],zero,ymm19[28],zero,zero,zero,zero,ymm19[31],zero
-; AVX512BW-FCP-NEXT: vmovdqa64 %ymm19, %ymm21
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm20[27],zero,zero,zero,zero,ymm20[30],zero,ymm20[28],zero,zero,zero,zero,ymm20[31],zero,ymm20[29]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX512BW-FCP-NEXT: vpor %ymm0, %ymm6, %ymm3
-; AVX512BW-FCP-NEXT: vmovdqa 32(%r9), %xmm5
-; AVX512BW-FCP-NEXT: vmovdqa 32(%r8), %xmm4
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm19 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512BW-FCP-NEXT: vpshufb %xmm19, %xmm0, %xmm0
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
-; AVX512BW-FCP-NEXT: vpermw %zmm10, %zmm3, %zmm3
-; AVX512BW-FCP-NEXT: movabsq $580999813345182728, %rax # imm = 0x810204081020408
-; AVX512BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm0 {%k2}
-; AVX512BW-FCP-NEXT: movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
-; AVX512BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm9 {%k2}
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512BW-FCP-NEXT: vpshufb %xmm11, %xmm1, %xmm3
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512BW-FCP-NEXT: vpshufb %xmm6, %xmm2, %xmm23
-; AVX512BW-FCP-NEXT: vporq %xmm3, %xmm23, %xmm3
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm3, %zmm1
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512BW-FCP-NEXT: vpshufb %xmm2, %xmm7, %xmm3
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512BW-FCP-NEXT: vpshufb %xmm0, %xmm8, %xmm23
-; AVX512BW-FCP-NEXT: vporq %xmm3, %xmm23, %xmm3
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm7[8],xmm8[8],xmm7[9],xmm8[9],xmm7[10],xmm8[10],xmm7[11],xmm8[11],xmm7[12],xmm8[12],xmm7[13],xmm8[13],xmm7[14],xmm8[14],xmm7[15],xmm8[15]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm3, %zmm3
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm23 = zmm3[0,1,0,1,4,5,4,5]
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm23 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512BW-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm3
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm4, %xmm8
-; AVX512BW-FCP-NEXT: vpor %xmm3, %xmm8, %xmm3
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm4, %zmm3, %zmm3
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm4 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
-; AVX512BW-FCP-NEXT: vpermi2w %zmm10, %zmm31, %zmm4
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm20 {%k3}
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdx), %xmm5
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rcx), %xmm4
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512BW-FCP-NEXT: vpshufb %xmm13, %xmm4, %xmm1
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm5, %xmm2
+; AVX512BW-FCP-NEXT: vpor %xmm1, %xmm2, %xmm1
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
+; AVX512BW-FCP-NEXT: vpshufb %xmm29, %xmm2, %xmm2
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm2, %zmm1, %zmm3
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm2
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rsi), %xmm1
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512BW-FCP-NEXT: vpshufb %xmm11, %xmm1, %xmm29
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm2, %xmm30
+; AVX512BW-FCP-NEXT: vporq %xmm29, %xmm30, %xmm29
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm30 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm30, %xmm30
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm30, %zmm29, %zmm29
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[0,1,0,1,4,5,4,5]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm29 = zmm29[0,1,0,1,4,5,4,5]
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm29 {%k2}
+; AVX512BW-FCP-NEXT: vmovdqa64 32(%r9), %xmm30
+; AVX512BW-FCP-NEXT: vmovdqa 32(%r8), %xmm3
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512BW-FCP-NEXT: vpshufb %xmm10, %xmm30, %xmm0
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm3, %xmm18
+; AVX512BW-FCP-NEXT: vporq %xmm0, %xmm18, %xmm0
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm18 = xmm3[8],xmm30[8],xmm3[9],xmm30[9],xmm3[10],xmm30[10],xmm3[11],xmm30[11],xmm3[12],xmm30[12],xmm3[13],xmm30[13],xmm3[14],xmm30[14],xmm3[15],xmm30[15]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm18 = xmm18[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm18, %zmm0, %zmm0
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm18 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
+; AVX512BW-FCP-NEXT: vpermi2w %zmm31, %zmm6, %zmm18
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
; AVX512BW-FCP-NEXT: movabsq $290499906672591364, %rax # imm = 0x408102040810204
-; AVX512BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm3 {%k2}
+; AVX512BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm0 {%k3}
; AVX512BW-FCP-NEXT: movabsq $-8714997200177740921, %rax # imm = 0x870E1C3870E1C387
-; AVX512BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm23 {%k2}
-; AVX512BW-FCP-NEXT: vpshufb %xmm11, %xmm15, %xmm3
-; AVX512BW-FCP-NEXT: vpshufb %xmm6, %xmm14, %xmm4
-; AVX512BW-FCP-NEXT: vpor %xmm3, %xmm4, %xmm3
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm14[0],xmm15[0],xmm14[1],xmm15[1],xmm14[2],xmm15[2],xmm14[3],xmm15[3],xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
-; AVX512BW-FCP-NEXT: vpshufb %xmm18, %xmm4, %xmm4
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm3, %zmm4, %zmm3
-; AVX512BW-FCP-NEXT: vpshufb %xmm2, %xmm17, %xmm2
-; AVX512BW-FCP-NEXT: vpshufb %xmm0, %xmm16, %xmm0
-; AVX512BW-FCP-NEXT: vpor %xmm2, %xmm0, %xmm0
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm16[0],xmm17[0],xmm16[1],xmm17[1],xmm16[2],xmm17[2],xmm16[3],xmm17[3],xmm16[4],xmm17[4],xmm16[5],xmm17[5],xmm16[6],xmm17[6],xmm16[7],xmm17[7]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm0
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm3[0,1,0,1,4,5,4,5]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm29 {%k3}
+; AVX512BW-FCP-NEXT: vpshufb %xmm13, %xmm19, %xmm0
+; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm17, %xmm6
+; AVX512BW-FCP-NEXT: vpor %xmm0, %xmm6, %xmm0
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm17[0],xmm19[0],xmm17[1],xmm19[1],xmm17[2],xmm19[2],xmm17[3],xmm19[3],xmm17[4],xmm19[4],xmm17[5],xmm19[5],xmm17[6],xmm19[6],xmm17[7],xmm19[7]
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm6, %xmm6
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm6, %zmm0
+; AVX512BW-FCP-NEXT: vpshufb %xmm11, %xmm22, %xmm6
+; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm21, %xmm8
+; AVX512BW-FCP-NEXT: vpor %xmm6, %xmm8, %xmm6
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm21[0],xmm22[0],xmm21[1],xmm22[1],xmm21[2],xmm22[2],xmm21[3],xmm22[3],xmm21[4],xmm22[4],xmm21[5],xmm22[5],xmm21[6],xmm22[6],xmm21[7],xmm22[7]
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512BW-FCP-NEXT: vpshufb %xmm11, %xmm8, %xmm8
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm6, %zmm8, %zmm6
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm6[0,1,0,1,4,5,4,5]
; AVX512BW-FCP-NEXT: movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
-; AVX512BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm0 {%k2}
-; AVX512BW-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX512BW-FCP-NEXT: vpshufb %xmm1, %xmm3, %xmm1
-; AVX512BW-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm4, %xmm2
-; AVX512BW-FCP-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; AVX512BW-FCP-NEXT: vpshufb %xmm19, %xmm2, %xmm2
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm1
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, (%rsp), %zmm10, %zmm2 # 32-byte Folded Reload
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
-; AVX512BW-FCP-NEXT: vpermw %zmm2, %zmm3, %zmm3
+; AVX512BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm8, %zmm0 {%k3}
+; AVX512BW-FCP-NEXT: vpshufb %xmm10, %xmm16, %xmm6
+; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm15, %xmm7
+; AVX512BW-FCP-NEXT: vpor %xmm6, %xmm7, %xmm6
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm15[0],xmm16[0],xmm15[1],xmm16[1],xmm15[2],xmm16[2],xmm15[3],xmm16[3],xmm15[4],xmm16[4],xmm15[5],xmm16[5],xmm15[6],xmm16[6],xmm15[7],xmm16[7]
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512BW-FCP-NEXT: vpshufb %xmm10, %xmm7, %xmm7
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm6, %zmm7, %zmm6
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm6 = zmm6[0,1,0,1,4,5,4,5]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, (%rsp), %zmm31, %zmm8 # 32-byte Folded Reload
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm7 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
+; AVX512BW-FCP-NEXT: vpermw %zmm8, %zmm7, %zmm7
; AVX512BW-FCP-NEXT: movabsq $4647998506761461824, %rax # imm = 0x4081020408102040
-; AVX512BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm1 {%k2}
+; AVX512BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm6 {%k3}
; AVX512BW-FCP-NEXT: movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
-; AVX512BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm0 {%k2}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm24, %zmm28, %zmm1
+; AVX512BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm0 {%k3}
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512BW-FCP-NEXT: vpshufb %xmm11, %xmm1, %xmm1
+; AVX512BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm13[28],zero,ymm13[30,31,30,31],zero,ymm13[29],zero,ymm13[31,28,29]
+; AVX512BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm15[30],zero,ymm15[28],zero,zero,zero,zero,ymm15[31],zero,ymm15[29],zero,zero,zero
+; AVX512BW-FCP-NEXT: vpor %ymm2, %ymm6, %ymm2
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm2[2,3,2,3],zmm1[0,1,0,1]
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
+; AVX512BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm11[30],zero,ymm11[28],zero,zero,zero,zero,ymm11[31],zero,ymm11[29],zero
+; AVX512BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm7[30],zero,ymm7[28],zero,zero,zero,zero,ymm7[31],zero,ymm7[29],zero,zero
+; AVX512BW-FCP-NEXT: vpor %ymm4, %ymm5, %ymm4
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm4[2,3,2,3],zmm2[0,1,0,1]
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm2 {%k1}
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm30[0],xmm3[1],xmm30[1],xmm3[2],xmm30[2],xmm3[3],xmm30[3],xmm3[4],xmm30[4],xmm3[5],xmm30[5],xmm3[6],xmm30[6],xmm3[7],xmm30[7]
+; AVX512BW-FCP-NEXT: vpshufb %xmm10, %xmm1, %xmm1
+; AVX512BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero
+; AVX512BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,zero,zero,ymm6[30],zero,ymm6[28],zero,zero,zero,zero,ymm6[31],zero,ymm6[29]
+; AVX512BW-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm3[2,3,2,3],zmm1[0,1,0,1]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
+; AVX512BW-FCP-NEXT: vpermw %zmm31, %zmm3, %zmm3
+; AVX512BW-FCP-NEXT: movabsq $580999813345182728, %rax # imm = 0x810204081020408
+; AVX512BW-FCP-NEXT: kmovq %rax, %k1
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm1 {%k1}
+; AVX512BW-FCP-NEXT: movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
+; AVX512BW-FCP-NEXT: kmovq %rax, %k1
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm2 {%k1}
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm26, %zmm1
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm1[18,19,20,21],zero,zmm1[19],zero,zmm1[21,20,21,22],zero,zmm1[20],zero,zmm1[22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm1[55],zero,zero,zero,zero,zmm1[58],zero,zmm1[56],zero,zero,zero,zero,zmm1[59],zero
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm3 # 32-byte Folded Reload
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm27, %zmm3
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm3[21],zero,zmm3[19],zero,zero,zero,zero,zmm3[22],zero,zmm3[20],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm3[55],zero,zero,zero,zero,zmm3[58],zero,zmm3[56],zero,zero,zero,zero,zmm3[59],zero,zmm3[57]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: vporq %zmm1, %zmm3, %zmm1
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm3 # 32-byte Folded Reload
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm24, %zmm3
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm3[18],zero,zmm3[18,19,20,21],zero,zmm3[19],zero,zmm3[25,26,27,22],zero,zmm3[20],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm3[56,57],zero,zmm3[55],zero,zmm3[53,54,55,58],zero,zmm3[56],zero,zmm3[60,61,58,59]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm4 # 32-byte Folded Reload
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm23, %zmm4
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm4[18],zero,zero,zero,zero,zmm4[21],zero,zmm4[19],zero,zero,zero,zero,zmm4[22],zero,zmm4[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm4[57],zero,zmm4[55],zero,zero,zero,zero,zmm4[58],zero,zmm4[56],zero,zero,zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm4 = zmm4[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: vporq %zmm3, %zmm4, %zmm3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm3 {%k1}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm20, %zmm30, %zmm1
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm3 {%k2}
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm28, %zmm1
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm1[20],zero,zmm1[18],zero,zmm1[20,21,20,21],zero,zmm1[19],zero,zmm1[19,20,21,22],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm1[56,57,56,57],zero,zmm1[55],zero,zmm1[55,56,57,58],zero,zmm1[56],zero,zmm1[62,63]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm21, %zmm27, %zmm4
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm25, %zmm4
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm4[20],zero,zmm4[18],zero,zero,zero,zero,zmm4[21],zero,zmm4[19],zero,zero,zero,zero,zmm4[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm4[57],zero,zmm4[55],zero,zero,zero,zero,zmm4[58],zero,zmm4[56],zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm4 = zmm4[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: vporq %zmm1, %zmm4, %zmm1
; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm4 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
-; AVX512BW-FCP-NEXT: vpermw %zmm2, %zmm4, %zmm2
+; AVX512BW-FCP-NEXT: vpermw %zmm8, %zmm4, %zmm4
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: movabsq $1161999626690365456, %rax # imm = 0x1020408102040810
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm1 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm1 {%k1}
; AVX512BW-FCP-NEXT: movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm3 {%k1}
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, 128(%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm12, 320(%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, 320(%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, 192(%rax)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, (%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm23, 256(%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, 192(%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm13, 64(%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm22, 384(%rax)
-; AVX512BW-FCP-NEXT: addq $40, %rsp
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm29, 256(%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm14, 64(%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm20, 384(%rax)
+; AVX512BW-FCP-NEXT: addq $104, %rsp
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
@@ -11069,8 +10826,8 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-BW-NEXT: vmovdqa (%rax), %ymm13
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm27 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX512DQ-BW-NEXT: vpshufb %ymm27, %ymm13, %ymm0
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm26 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX512DQ-BW-NEXT: vpshufb %ymm26, %ymm13, %ymm0
; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
; AVX512DQ-BW-NEXT: # ymm1 = mem[0,1,0,1]
; AVX512DQ-BW-NEXT: vpermw %ymm13, %ymm1, %ymm1
@@ -11079,12 +10836,13 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm17 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
; AVX512DQ-BW-NEXT: vpshufb %ymm17, %ymm9, %ymm1
; AVX512DQ-BW-NEXT: vmovdqa (%r8), %ymm10
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm22 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512DQ-BW-NEXT: vpshufb %ymm22, %ymm10, %ymm2
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm21 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512DQ-BW-NEXT: vpshufb %ymm21, %ymm10, %ymm2
; AVX512DQ-BW-NEXT: vpor %ymm1, %ymm2, %ymm2
-; AVX512DQ-BW-NEXT: vmovdqa (%r9), %xmm12
-; AVX512DQ-BW-NEXT: vmovdqa (%r8), %xmm3
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm12[8],xmm3[9],xmm12[9],xmm3[10],xmm12[10],xmm3[11],xmm12[11],xmm3[12],xmm12[12],xmm3[13],xmm12[13],xmm3[14],xmm12[14],xmm3[15],xmm12[15]
+; AVX512DQ-BW-NEXT: vmovdqa (%r9), %xmm1
+; AVX512DQ-BW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-BW-NEXT: vmovdqa (%r8), %xmm12
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm12[8],xmm1[8],xmm12[9],xmm1[9],xmm12[10],xmm1[10],xmm12[11],xmm1[11],xmm12[12],xmm1[12],xmm12[13],xmm1[13],xmm12[14],xmm1[14],xmm12[15],xmm1[15]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm8
@@ -11094,270 +10852,257 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %ymm14
; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} ymm0 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
; AVX512DQ-BW-NEXT: vpshufb %ymm0, %ymm14, %ymm2
-; AVX512DQ-BW-NEXT: vmovdqa64 (%rcx), %ymm16
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm23 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512DQ-BW-NEXT: vpshufb %ymm23, %ymm16, %ymm4
+; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %ymm15
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm20 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512DQ-BW-NEXT: vpshufb %ymm20, %ymm15, %ymm4
; AVX512DQ-BW-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %xmm4
; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %xmm5
; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm6, %zmm20
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm6, %zmm22
; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %ymm18
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm24 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
; AVX512DQ-BW-NEXT: vpshufb %ymm24, %ymm18, %ymm2
; AVX512DQ-BW-NEXT: vmovdqa64 (%rsi), %ymm19
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm26 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512DQ-BW-NEXT: vpshufb %ymm26, %ymm19, %ymm6
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm25 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm19, %ymm6
; AVX512DQ-BW-NEXT: vpor %ymm2, %ymm6, %ymm2
; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm6
; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %xmm7
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm21 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm21 = xmm21[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm21 = ymm21[0,1,0,1]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm21, %zmm2
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm23 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm23 = xmm23[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm23 = ymm23[0,1,0,1]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm23, %zmm3
; AVX512DQ-BW-NEXT: movabsq $435749860008887046, %r10 # imm = 0x60C183060C18306
; AVX512DQ-BW-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm20, %zmm2 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm22, %zmm3 {%k1}
; AVX512DQ-BW-NEXT: movabsq $4066998693416279096, %r10 # imm = 0x3870E1C3870E1C38
; AVX512DQ-BW-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm8, %zmm2 {%k1}
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm20 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm8, %zmm3 {%k1}
; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdx), %ymm29
+; AVX512DQ-BW-NEXT: vpshufb %ymm0, %ymm29, %ymm0
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%rcx), %ymm30
+; AVX512DQ-BW-NEXT: vpshufb %ymm20, %ymm30, %ymm8
+; AVX512DQ-BW-NEXT: vpor %ymm0, %ymm8, %ymm0
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm20 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
; AVX512DQ-BW-NEXT: vpshufb %ymm20, %ymm29, %ymm8
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm22 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512DQ-BW-NEXT: vpshufb %ymm22, %ymm30, %ymm23
+; AVX512DQ-BW-NEXT: vporq %ymm8, %ymm23, %ymm8
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm21 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%rcx), %ymm30
-; AVX512DQ-BW-NEXT: vpshufb %ymm21, %ymm30, %ymm25
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm25 = ymm25[2,3,2,3]
-; AVX512DQ-BW-NEXT: vporq %ymm8, %ymm25, %ymm8
-; AVX512DQ-BW-NEXT: vpshufb %ymm0, %ymm29, %ymm0
-; AVX512DQ-BW-NEXT: vpshufb %ymm23, %ymm30, %ymm23
-; AVX512DQ-BW-NEXT: vporq %ymm0, %ymm23, %ymm0
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
; AVX512DQ-BW-NEXT: vmovdqa64 32(%rsi), %ymm28
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm15
-; AVX512DQ-BW-NEXT: vpshuflw {{.*#+}} ymm8 = ymm15[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdi), %ymm16
+; AVX512DQ-BW-NEXT: vpshuflw {{.*#+}} ymm8 = ymm16[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[0,0,1,1,4,4,5,5]
-; AVX512DQ-BW-NEXT: vpbroadcastd {{.*#+}} ymm25 = [5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6]
+; AVX512DQ-BW-NEXT: vpbroadcastd {{.*#+}} ymm23 = [5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6]
; AVX512DQ-BW-NEXT: movl $676341840, %r10d # imm = 0x28502850
; AVX512DQ-BW-NEXT: kmovd %r10d, %k1
-; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm28, %ymm8 {%k1}
+; AVX512DQ-BW-NEXT: vpshufb %ymm23, %ymm28, %ymm8 {%k1}
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512DQ-BW-NEXT: vpshufb %ymm24, %ymm15, %ymm23
-; AVX512DQ-BW-NEXT: vpshufb %ymm26, %ymm28, %ymm24
-; AVX512DQ-BW-NEXT: vporq %ymm23, %ymm24, %ymm23
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm23, %zmm8
+; AVX512DQ-BW-NEXT: vpshufb %ymm24, %ymm16, %ymm24
+; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm28, %ymm25
+; AVX512DQ-BW-NEXT: vporq %ymm24, %ymm25, %ymm24
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm24, %zmm8
; AVX512DQ-BW-NEXT: movabsq $3485998880071096368, %r10 # imm = 0x3060C183060C1830
; AVX512DQ-BW-NEXT: kmovq %r10, %k2
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm8 {%k2}
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm23 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%r8), %ymm31
-; AVX512DQ-BW-NEXT: vpshufb %ymm23, %ymm31, %ymm0
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm26 = ymm0[2,3,2,3]
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm24 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
-; AVX512DQ-BW-NEXT: vmovdqa 32(%r9), %ymm0
-; AVX512DQ-BW-NEXT: vpshufb %ymm24, %ymm0, %ymm11
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512DQ-BW-NEXT: vporq %ymm26, %ymm11, %ymm11
-; AVX512DQ-BW-NEXT: vpshufb %ymm17, %ymm0, %ymm17
-; AVX512DQ-BW-NEXT: vpshufb %ymm22, %ymm31, %ymm22
-; AVX512DQ-BW-NEXT: vporq %ymm17, %ymm22, %ymm17
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm11, %zmm17, %zmm11
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%r9), %ymm31
+; AVX512DQ-BW-NEXT: vpshufb %ymm17, %ymm31, %ymm17
+; AVX512DQ-BW-NEXT: vmovdqa 32(%r8), %ymm1
+; AVX512DQ-BW-NEXT: vpshufb %ymm21, %ymm1, %ymm21
+; AVX512DQ-BW-NEXT: vporq %ymm17, %ymm21, %ymm17
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm24 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512DQ-BW-NEXT: vpshufb %ymm24, %ymm1, %ymm21
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm25 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm31, %ymm27
+; AVX512DQ-BW-NEXT: vporq %ymm21, %ymm27, %ymm21
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm21 = ymm21[2,3,2,3]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm21, %zmm17, %zmm21
; AVX512DQ-BW-NEXT: vmovdqa64 32(%rax), %ymm17
-; AVX512DQ-BW-NEXT: vpshufb %ymm27, %ymm17, %ymm22
+; AVX512DQ-BW-NEXT: vpshufb %ymm26, %ymm17, %ymm27
; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm26 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
-; AVX512DQ-BW-NEXT: vpermw %ymm17, %ymm26, %ymm27
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm27, %zmm22, %zmm22
+; AVX512DQ-BW-NEXT: vpermw %ymm17, %ymm26, %ymm11
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm11, %zmm27, %zmm11
; AVX512DQ-BW-NEXT: movabsq $145249953336295682, %r10 # imm = 0x204081020408102
; AVX512DQ-BW-NEXT: kmovq %r10, %k3
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm22, %zmm11 {%k3}
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm11, %zmm21 {%k3}
; AVX512DQ-BW-NEXT: movabsq $-4357498600088870461, %r10 # imm = 0xC3870E1C3870E1C3
; AVX512DQ-BW-NEXT: kmovq %r10, %k3
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm11, %zmm8 {%k3}
-; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm11 = ymm15[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm21, %zmm8 {%k3}
+; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm11 = ymm16[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm11 = ymm11[2,2,3,3,6,6,7,7]
; AVX512DQ-BW-NEXT: movl $338170920, %r10d # imm = 0x14281428
; AVX512DQ-BW-NEXT: kmovd %r10d, %k4
; AVX512DQ-BW-NEXT: vpbroadcastd {{.*#+}} ymm27 = [13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14]
; AVX512DQ-BW-NEXT: vpshufb %ymm27, %ymm28, %ymm11 {%k4}
-; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
-; AVX512DQ-BW-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpshufb %ymm1, %ymm28, %ymm22
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm22 = ymm22[2,3,2,3]
+; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
+; AVX512DQ-BW-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX512DQ-BW-NEXT: vpshufb %ymm2, %ymm28, %ymm21
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm28 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512DQ-BW-NEXT: vpshufb %ymm28, %ymm15, %ymm15
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
-; AVX512DQ-BW-NEXT: vporq %ymm22, %ymm15, %ymm15
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm11, %zmm15, %zmm15
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm29[24,25],zero,ymm29[23],zero,ymm29[21,22,23,26],zero,ymm29[24],zero,ymm29[28,29,26,27]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm22 = ymm30[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm30[25],zero,ymm30[23],zero,zero,zero,zero,ymm30[26],zero,ymm30[24],zero,zero,zero,zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm22 = ymm22[2,3,2,3]
-; AVX512DQ-BW-NEXT: vporq %ymm11, %ymm22, %ymm22
+; AVX512DQ-BW-NEXT: vpshufb %ymm28, %ymm16, %ymm16
+; AVX512DQ-BW-NEXT: vporq %ymm21, %ymm16, %ymm16
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm11, %zmm16, %zmm16
+; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm11 = ymm29[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm21 = ymm11[0,2,3,3,4,6,7,7]
; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm11 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm30, %ymm30
-; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm29 = ymm29[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm29 = ymm29[0,2,3,3,4,6,7,7]
-; AVX512DQ-BW-NEXT: vmovdqu8 %ymm29, %ymm30 {%k1}
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm29 = ymm30[2,3,2,3]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm29, %zmm22, %zmm22
+; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm30, %ymm0
+; AVX512DQ-BW-NEXT: vmovdqu8 %ymm21, %ymm0 {%k1}
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm21 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm29[24,25],zero,ymm29[23],zero,ymm29[21,22,23,26],zero,ymm29[24],zero,ymm29[28,29,26,27]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm29 = ymm30[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm30[25],zero,ymm30[23],zero,zero,zero,zero,ymm30[26],zero,ymm30[24],zero,zero,zero,zero
+; AVX512DQ-BW-NEXT: vporq %ymm21, %ymm29, %ymm21
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm21, %zmm0
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-NEXT: movabsq $1742999440035548184, %r10 # imm = 0x183060C183060C18
; AVX512DQ-BW-NEXT: kmovq %r10, %k3
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm22, %zmm15 {%k3}
-; AVX512DQ-BW-NEXT: vmovdqa64 (%r9), %zmm29
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm31[0,1,2,3],zmm29[4,5,6,7]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm22 = zmm22[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm22[23],zero,zmm22[23,24,25,26],zero,zmm22[24],zero,zmm22[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm22[59],zero,zero,zero,zero,zmm22[62],zero,zmm22[60],zero,zero,zero,zero,zmm22[63],zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm22 = zmm22[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-NEXT: vmovdqa64 (%r8), %zmm30
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm30[4,5,6,7]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm0[25],zero,zmm0[23],zero,zero,zero,zero,zmm0[26],zero,zmm0[24],zero,zero,zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm0[59],zero,zero,zero,zero,zmm0[62],zero,zmm0[60],zero,zero,zero,zero,zmm0[63],zero,zmm0[61]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-NEXT: vporq %zmm22, %zmm0, %zmm0
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm16 {%k3}
+; AVX512DQ-BW-NEXT: vmovdqa64 (%r9), %zmm0
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm0[4,5,6,7]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm1[23],zero,zmm1[23,24,25,26],zero,zmm1[24],zero,zmm1[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm1[59],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero
+; AVX512DQ-BW-NEXT: vmovdqa64 (%r8), %zmm29
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm31[0,1,2,3],zmm29[4,5,6,7]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm21 = zmm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm21[25],zero,zmm21[23],zero,zero,zero,zero,zmm21[26],zero,zmm21[24],zero,zero,zmm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm21[59],zero,zero,zero,zero,zmm21[62],zero,zmm21[60],zero,zero,zero,zero,zmm21[63],zero,zmm21[61]
+; AVX512DQ-BW-NEXT: vporq %zmm1, %zmm21, %zmm1
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-NEXT: movabsq $6971997760142192736, %r10 # imm = 0x60C183060C183060
; AVX512DQ-BW-NEXT: kmovq %r10, %k3
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm15 {%k3}
-; AVX512DQ-BW-NEXT: vmovdqa64 (%rax), %zmm22
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm0 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
-; AVX512DQ-BW-NEXT: vpermi2w %zmm22, %zmm17, %zmm0
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm16 {%k3}
+; AVX512DQ-BW-NEXT: vmovdqa64 (%rax), %zmm21
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm1 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
+; AVX512DQ-BW-NEXT: vpermi2w %zmm21, %zmm17, %zmm1
; AVX512DQ-BW-NEXT: movabsq $-9150747060186627967, %rax # imm = 0x8102040810204081
; AVX512DQ-BW-NEXT: kmovq %rax, %k5
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm15 {%k5}
-; AVX512DQ-BW-NEXT: vpshuflw {{.*#+}} ymm0 = ymm18[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
-; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm19, %ymm0 {%k1}
-; AVX512DQ-BW-NEXT: vpshufb %ymm1, %ymm19, %ymm1
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-BW-NEXT: vpshufb %ymm28, %ymm18, %ymm25
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm25 = ymm25[2,3,2,3]
-; AVX512DQ-BW-NEXT: vporq %ymm1, %ymm25, %ymm1
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512DQ-BW-NEXT: vmovdqa64 (%rdx), %zmm1
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm14, %zmm1, %zmm1
-; AVX512DQ-BW-NEXT: vpshufb %zmm20, %zmm1, %zmm1
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm16 {%k5}
+; AVX512DQ-BW-NEXT: vpshuflw {{.*#+}} ymm1 = ymm18[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,0,1,1,4,4,5,5]
+; AVX512DQ-BW-NEXT: vpshufb %ymm23, %ymm19, %ymm1 {%k1}
+; AVX512DQ-BW-NEXT: vpshufb %ymm2, %ymm19, %ymm2
+; AVX512DQ-BW-NEXT: vpshufb %ymm28, %ymm18, %ymm23
+; AVX512DQ-BW-NEXT: vporq %ymm2, %ymm23, %ymm2
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512DQ-BW-NEXT: vmovdqa64 (%rdx), %zmm2
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm14, %zmm2, %zmm2
+; AVX512DQ-BW-NEXT: vpshufb %zmm20, %zmm2, %zmm2
; AVX512DQ-BW-NEXT: vmovdqa64 (%rcx), %zmm20
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm16, %zmm20, %zmm20
-; AVX512DQ-BW-NEXT: vpshufb %zmm21, %zmm20, %zmm20
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm15, %zmm20, %zmm20
+; AVX512DQ-BW-NEXT: vpshufb %zmm22, %zmm20, %zmm20
+; AVX512DQ-BW-NEXT: vporq %zmm2, %zmm20, %zmm2
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm20 = zmm20[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-NEXT: vporq %zmm1, %zmm20, %zmm20
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm20 {%k3}
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%r9), %xmm21
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm10, %zmm30, %zmm0
-; AVX512DQ-BW-NEXT: vpshufb %zmm23, %zmm0, %zmm0
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%r8), %xmm23
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm9, %zmm29, %zmm1
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm20 = zmm2[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm20 {%k3}
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%r9), %xmm22
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm10, %zmm29, %zmm1
; AVX512DQ-BW-NEXT: vpshufb %zmm24, %zmm1, %zmm1
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdx), %xmm24
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%r8), %xmm23
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm0
+; AVX512DQ-BW-NEXT: vpshufb %zmm25, %zmm0, %zmm2
+; AVX512DQ-BW-NEXT: vmovdqa 32(%rdx), %xmm0
+; AVX512DQ-BW-NEXT: vporq %zmm1, %zmm2, %zmm1
+; AVX512DQ-BW-NEXT: vmovdqa 32(%rcx), %xmm2
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-NEXT: vporq %zmm0, %zmm1, %zmm1
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%rcx), %xmm25
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm13, %zmm22, %zmm13
-; AVX512DQ-BW-NEXT: vpermw %zmm13, %zmm26, %zmm0
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm13, %zmm21, %zmm13
+; AVX512DQ-BW-NEXT: vpermw %zmm13, %zmm26, %zmm24
; AVX512DQ-BW-NEXT: movabsq $1161999626690365456, %rax # imm = 0x1020408102040810
; AVX512DQ-BW-NEXT: kmovq %rax, %k5
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm1 {%k5}
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %xmm0
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm24, %zmm1 {%k5}
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdi), %xmm24
; AVX512DQ-BW-NEXT: movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
; AVX512DQ-BW-NEXT: kmovq %rax, %k5
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm20 {%k5}
; AVX512DQ-BW-NEXT: vmovdqa 32(%rsi), %xmm1
; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm18 = ymm18[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm26 = ymm18[2,2,3,3,6,6,7,7]
-; AVX512DQ-BW-NEXT: vpshufb %ymm27, %ymm19, %ymm26 {%k4}
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm19 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm25 = ymm18[2,2,3,3,6,6,7,7]
+; AVX512DQ-BW-NEXT: vpshufb %ymm27, %ymm19, %ymm25 {%k4}
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm19 = xmm24[0],xmm1[0],xmm24[1],xmm1[1],xmm24[2],xmm1[2],xmm24[3],xmm1[3],xmm24[4],xmm1[4],xmm24[5],xmm1[5],xmm24[6],xmm1[6],xmm24[7],xmm1[7]
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm18 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
; AVX512DQ-BW-NEXT: vpshufb %xmm18, %xmm19, %xmm19
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm19 = zmm26[2,3,2,3],zmm19[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm26 = xmm24[0],xmm25[0],xmm24[1],xmm25[1],xmm24[2],xmm25[2],xmm24[3],xmm25[3],xmm24[4],xmm25[4],xmm24[5],xmm25[5],xmm24[6],xmm25[6],xmm24[7],xmm25[7]
-; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm16, %ymm11
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm16 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512DQ-BW-NEXT: vpshufb %xmm16, %xmm26, %xmm26
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm25 = zmm25[2,3,2,3],zmm19[0,1,0,1]
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm26 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm15, %ymm11
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm19 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512DQ-BW-NEXT: vpshufb %xmm19, %xmm26, %xmm15
; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm14 = ymm14[0,2,3,3,4,6,7,7]
; AVX512DQ-BW-NEXT: vmovdqu8 %ymm14, %ymm11 {%k1}
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm14 = zmm11[2,3,2,3],zmm26[0,1,0,1]
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm19, %zmm14 {%k2}
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm14 = zmm11[2,3,2,3],zmm15[0,1,0,1]
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm25, %zmm14 {%k2}
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm23[0],xmm22[0],xmm23[1],xmm22[1],xmm23[2],xmm22[2],xmm23[3],xmm22[3],xmm23[4],xmm22[4],xmm23[5],xmm22[5],xmm23[6],xmm22[6],xmm23[7],xmm22[7]
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm15 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512DQ-BW-NEXT: vpshufb %xmm15, %xmm11, %xmm11
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm9[27],zero,zero,zero,zero,ymm9[30],zero,ymm9[28],zero,zero,zero,zero,ymm9[31],zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm10[27],zero,zero,zero,zero,ymm10[30],zero,ymm10[28],zero,zero,zero,zero,ymm10[31],zero,ymm10[29]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512DQ-BW-NEXT: vpor %ymm9, %ymm10, %ymm10
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm23[0],xmm21[0],xmm23[1],xmm21[1],xmm23[2],xmm21[2],xmm23[3],xmm21[3],xmm23[4],xmm21[4],xmm23[5],xmm21[5],xmm23[6],xmm21[6],xmm23[7],xmm21[7]
-; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm9 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-BW-NEXT: vpshufb %xmm9, %xmm11, %xmm11
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm11 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
-; AVX512DQ-BW-NEXT: vpermw %zmm22, %zmm11, %zmm11
+; AVX512DQ-BW-NEXT: vpor %ymm9, %ymm10, %ymm9
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm9[2,3,2,3],zmm11[0,1,0,1]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm10 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
+; AVX512DQ-BW-NEXT: vpermw %zmm21, %zmm10, %zmm10
; AVX512DQ-BW-NEXT: movabsq $580999813345182728, %rax # imm = 0x810204081020408
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm11, %zmm10 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm10, %zmm9 {%k1}
; AVX512DQ-BW-NEXT: movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm10, %zmm14 {%k1}
-; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm11 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512DQ-BW-NEXT: vpshufb %xmm11, %xmm25, %xmm10
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm19 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512DQ-BW-NEXT: vpshufb %xmm19, %xmm24, %xmm26
-; AVX512DQ-BW-NEXT: vporq %xmm10, %xmm26, %xmm10
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm24 = xmm25[8],xmm24[8],xmm25[9],xmm24[9],xmm25[10],xmm24[10],xmm25[11],xmm24[11],xmm25[12],xmm24[12],xmm25[13],xmm24[13],xmm25[14],xmm24[14],xmm25[15],xmm24[15]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm24 = xmm24[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm24, %zmm10, %zmm10
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm24 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512DQ-BW-NEXT: vpshufb %xmm24, %xmm1, %xmm25
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm26 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512DQ-BW-NEXT: vpshufb %xmm26, %xmm0, %xmm27
-; AVX512DQ-BW-NEXT: vporq %xmm25, %xmm27, %xmm25
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm1 = zmm10[0,1,0,1,4,5,4,5]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm0, %zmm25, %zmm0
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm10 = zmm0[0,1,0,1,4,5,4,5]
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm10 {%k3}
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm9, %zmm14 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm10 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512DQ-BW-NEXT: vpshufb %xmm10, %xmm2, %xmm9
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm11 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512DQ-BW-NEXT: vpshufb %xmm11, %xmm0, %xmm25
+; AVX512DQ-BW-NEXT: vporq %xmm9, %xmm25, %xmm9
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm0, %zmm9, %zmm0
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm2 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512DQ-BW-NEXT: vpshufb %xmm2, %xmm1, %xmm9
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm25 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512DQ-BW-NEXT: vpshufb %xmm25, %xmm24, %xmm26
+; AVX512DQ-BW-NEXT: vporq %xmm9, %xmm26, %xmm9
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm24[8],xmm1[9],xmm24[9],xmm1[10],xmm24[10],xmm1[11],xmm24[11],xmm1[12],xmm24[12],xmm1[13],xmm24[13],xmm1[14],xmm24[14],xmm1[15],xmm24[15]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm1, %zmm9, %zmm1
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm9 = zmm1[0,1,0,1,4,5,4,5]
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm9 {%k3}
; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm0 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512DQ-BW-NEXT: vpshufb %xmm0, %xmm21, %xmm1
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm25 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
-; AVX512DQ-BW-NEXT: vpermi2w %zmm22, %zmm17, %zmm25
+; AVX512DQ-BW-NEXT: vpshufb %xmm0, %xmm22, %xmm1
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm24 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
+; AVX512DQ-BW-NEXT: vpermi2w %zmm21, %zmm17, %zmm24
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm23, %xmm22
-; AVX512DQ-BW-NEXT: vporq %xmm1, %xmm22, %xmm1
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm21 = xmm23[8],xmm21[8],xmm23[9],xmm21[9],xmm23[10],xmm21[10],xmm23[11],xmm21[11],xmm23[12],xmm21[12],xmm23[13],xmm21[13],xmm23[14],xmm21[14],xmm23[15],xmm21[15]
+; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm23, %xmm21
+; AVX512DQ-BW-NEXT: vporq %xmm1, %xmm21, %xmm1
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm21 = xmm23[8],xmm22[8],xmm23[9],xmm22[9],xmm23[10],xmm22[10],xmm23[11],xmm22[11],xmm23[12],xmm22[12],xmm23[13],xmm22[13],xmm23[14],xmm22[14],xmm23[15],xmm22[15]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm21 = xmm21[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm21, %zmm1, %zmm1
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
; AVX512DQ-BW-NEXT: movabsq $290499906672591364, %rax # imm = 0x408102040810204
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm25, %zmm1 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm24, %zmm1 {%k1}
; AVX512DQ-BW-NEXT: movabsq $-8714997200177740921, %rax # imm = 0x870E1C3870E1C387
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm10 {%k1}
-; AVX512DQ-BW-NEXT: vpshufb %xmm11, %xmm5, %xmm1
-; AVX512DQ-BW-NEXT: vpshufb %xmm19, %xmm4, %xmm11
-; AVX512DQ-BW-NEXT: vpor %xmm1, %xmm11, %xmm1
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm9 {%k1}
+; AVX512DQ-BW-NEXT: vpshufb %xmm10, %xmm5, %xmm1
+; AVX512DQ-BW-NEXT: vpshufb %xmm11, %xmm4, %xmm10
+; AVX512DQ-BW-NEXT: vpor %xmm1, %xmm10, %xmm1
; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; AVX512DQ-BW-NEXT: vpshufb %xmm16, %xmm4, %xmm4
+; AVX512DQ-BW-NEXT: vpshufb %xmm19, %xmm4, %xmm4
; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm1, %zmm4, %zmm1
-; AVX512DQ-BW-NEXT: vpshufb %xmm24, %xmm7, %xmm4
-; AVX512DQ-BW-NEXT: vpshufb %xmm26, %xmm6, %xmm5
-; AVX512DQ-BW-NEXT: vpor %xmm4, %xmm5, %xmm4
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
-; AVX512DQ-BW-NEXT: vpshufb %xmm18, %xmm5, %xmm5
-; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm4, %zmm5, %zmm4
+; AVX512DQ-BW-NEXT: vpshufb %xmm2, %xmm7, %xmm2
+; AVX512DQ-BW-NEXT: vpshufb %xmm25, %xmm6, %xmm4
+; AVX512DQ-BW-NEXT: vpor %xmm2, %xmm4, %xmm2
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; AVX512DQ-BW-NEXT: vpshufb %xmm18, %xmm4, %xmm4
+; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm2, %zmm4, %zmm2
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm4 = zmm4[0,1,0,1,4,5,4,5]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm2 = zmm2[0,1,0,1,4,5,4,5]
; AVX512DQ-BW-NEXT: movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm4 {%k1}
-; AVX512DQ-BW-NEXT: vpshufb %xmm0, %xmm12, %xmm0
-; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm3, %xmm1
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm2 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512DQ-BW-NEXT: vpshufb %xmm0, %xmm4, %xmm0
+; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm12, %xmm1
; AVX512DQ-BW-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm12[0],xmm3[1],xmm12[1],xmm3[2],xmm12[2],xmm3[3],xmm12[3],xmm3[4],xmm12[4],xmm3[5],xmm12[5],xmm3[6],xmm12[6],xmm3[7],xmm12[7]
-; AVX512DQ-BW-NEXT: vpshufb %xmm9, %xmm1, %xmm1
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3],xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
+; AVX512DQ-BW-NEXT: vpshufb %xmm15, %xmm1, %xmm1
; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm1 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
; AVX512DQ-BW-NEXT: vpermw %zmm13, %zmm1, %zmm1
@@ -11367,334 +11112,319 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
; AVX512DQ-BW-NEXT: movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm4 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm2 {%k1}
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, (%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, (%rax)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm8, 320(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm10, 256(%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, 256(%rax)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm14, 192(%rax)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm20, 128(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, 64(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm15, 384(%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm3, 64(%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm16, 384(%rax)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: store_i8_stride7_vf64:
; AVX512DQ-BW-FCP: # %bb.0:
-; AVX512DQ-BW-FCP-NEXT: subq $40, %rsp
+; AVX512DQ-BW-FCP-NEXT: subq $104, %rsp
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rax), %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm28 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm28, %ymm0, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
-; AVX512DQ-BW-FCP-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm0, %ymm2, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r9), %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %ymm0, %ymm19
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r8), %ymm20
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm22 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm22, %ymm20, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rax), %ymm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm2, (%rsp) # 32-byte Spill
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm20 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm20, %ymm2, %ymm0
+; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
+; AVX512DQ-BW-FCP-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm2, %ymm1, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r9), %ymm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm0, %ymm1, %ymm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r8), %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm1, %ymm4, %ymm4
; AVX512DQ-BW-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r9), %xmm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r8), %xmm4
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r9), %xmm16
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r8), %xmm15
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm15[8],xmm16[8],xmm15[9],xmm16[9],xmm15[10],xmm16[10],xmm15[11],xmm16[11],xmm15[12],xmm16[12],xmm15[13],xmm16[13],xmm15[14],xmm16[14],xmm15[15],xmm16[15]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm4, %zmm3
; AVX512DQ-BW-FCP-NEXT: movabsq $2323999253380730912, %r10 # imm = 0x2040810204081020
; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm0, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm0, %ymm9
-; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %ymm0, %ymm18
-; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm2, %ymm5, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %xmm14
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %xmm15
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm15[8],xmm14[8],xmm15[9],xmm14[9],xmm15[10],xmm14[10],xmm15[11],xmm14[11],xmm15[12],xmm14[12],xmm15[13],xmm14[13],xmm15[14],xmm14[14],xmm15[15],xmm14[15]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm12, %ymm0, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm0, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %ymm24
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm21 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm21, %ymm24, %ymm16
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm5, %ymm16, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %xmm16
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %xmm17
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm25 = xmm17[8],xmm16[8],xmm17[9],xmm16[9],xmm17[10],xmm16[10],xmm17[11],xmm16[11],xmm17[12],xmm16[12],xmm17[13],xmm16[13],xmm17[14],xmm16[14],xmm17[15],xmm16[15]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm25 = xmm25[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm25[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm25, %zmm13
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm2, %ymm4, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %ymm6
+; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm5, %ymm6, %ymm6
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm4, %ymm6, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdx), %xmm17
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rcx), %xmm19
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm19[8],xmm17[8],xmm19[9],xmm17[9],xmm19[10],xmm17[10],xmm19[11],xmm17[11],xmm19[12],xmm17[12],xmm19[13],xmm17[13],xmm19[14],xmm17[14],xmm19[15],xmm17[15]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm29 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm29, %xmm6, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm6, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm6
+; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm9, %ymm6, %ymm6
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm7
+; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm23 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm23, %ymm7, %ymm21
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm6, %ymm21, %ymm6
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %xmm21
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %xmm22
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm24 = xmm22[8],xmm21[8],xmm22[9],xmm21[9],xmm22[10],xmm21[10],xmm22[11],xmm21[11],xmm22[12],xmm21[12],xmm22[13],xmm21[13],xmm22[14],xmm21[14],xmm22[15],xmm21[15]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm24, %xmm24
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm24 = ymm24[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm24, %zmm14
; AVX512DQ-BW-FCP-NEXT: movabsq $435749860008887046, %r10 # imm = 0x60C183060C18306
; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm13 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm14 {%k1}
; AVX512DQ-BW-FCP-NEXT: movabsq $4066998693416279096, %r10 # imm = 0x3870E1C3870E1C38
; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm13 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdx), %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm3[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm0, %ymm3, %ymm26
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm25, %ymm26, %ymm25
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm4
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm11
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm4, %ymm11, %ymm4
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm25, %zmm4, %zmm4
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm11
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm0, %ymm11, %ymm25
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm25[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm27
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm0, %ymm27, %ymm26
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm25, %ymm26, %ymm25
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm12, %ymm11, %ymm12
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm21, %ymm27, %ymm21
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm12, %ymm21, %ymm12
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm25, %zmm12, %zmm12
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm14 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdx), %ymm3
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm2, %ymm3, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm2
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm5
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm4, %ymm5, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm5 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm5, %ymm3, %ymm5
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm24
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm5, %ymm24, %ymm5
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm9
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm25
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm23, %ymm25, %ymm23
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm9, %ymm23, %ymm9
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm23
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm6, %ymm25, %ymm24
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm23, %ymm24, %ymm23
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm23 = ymm23[2,3,2,3]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm23, %zmm9, %zmm9
; AVX512DQ-BW-FCP-NEXT: movabsq $3485998880071096368, %r10 # imm = 0x3060C183060C1830
-; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm12 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%r8), %ymm4
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm21
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm21[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm5 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%r9), %ymm0
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm26
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm25, %ymm26, %ymm25
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm22, %ymm4, %ymm22
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm1, %ymm22, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm25, %zmm1, %zmm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rax), %ymm31
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm22 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm31, %ymm22, %ymm22
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm28, %ymm31, %ymm25
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm25, %zmm22
-; AVX512DQ-BW-FCP-NEXT: movabsq $145249953336295682, %r10 # imm = 0x204081020408102
; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm22, %zmm1 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm9 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%r9), %ymm4
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm0
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm28
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm1, %ymm28, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm1, %ymm28, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm6, %ymm4, %ymm23
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm1, %ymm23, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rax), %ymm6
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm23 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm6, %ymm23, %ymm23
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm20, %ymm6, %ymm20
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm23, %zmm20, %zmm20
+; AVX512DQ-BW-FCP-NEXT: movabsq $145249953336295682, %r10 # imm = 0x204081020408102
+; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k2
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm20, %zmm1 {%k2}
; AVX512DQ-BW-FCP-NEXT: movabsq $-4357498600088870461, %r10 # imm = 0xC3870E1C3870E1C3
-; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm12 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rcx), %zmm25
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm2[0,1,2,3],zmm25[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k2
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm9 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rcx), %zmm23
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm3[0,1,2,3],zmm23[4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,zmm1[23],zero,zmm1[21,22,23,26],zero,zmm1[24],zero,zmm1[28,29,26,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,59],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero,zmm1[61],zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdx), %zmm26
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm3[0,1,2,3],zmm26[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdx), %zmm24
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm24[4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm2[25],zero,zmm2[23],zero,zero,zero,zero,zmm2[26],zero,zmm2[24],zero,zero,zero,zero,zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[62],zero,zmm2[60],zero,zero,zero,zero,zmm2[63],zero,zmm2[61],zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm2[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: vporq %zmm1, %zmm2, %zmm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm28
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm27[0,1,2,3],zmm28[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm26
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm25[0,1,2,3],zmm26[4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,zmm2[23],zero,zero,zero,zero,zmm2[26],zero,zmm2[24],zero,zero,zero,zero,zmm2[27],zero,zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,60,61,62],zero,zmm2[60],zero,zmm2[62,63,62,63],zero,zmm2[61],zero,zmm2[63,60,61]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm2[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %zmm29
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm11[0,1,2,3],zmm29[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %zmm27
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm5[0,1,2,3],zmm27[4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm3[23],zero,zero,zero,zero,zmm3[26],zero,zmm3[24],zero,zero,zero,zero,zmm3[27],zero,zmm3[25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zmm3[62],zero,zmm3[60],zero,zero,zero,zero,zmm3[63],zero,zmm3[61],zero,zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vporq %zmm2, %zmm3, %zmm22
+; AVX512DQ-BW-FCP-NEXT: vporq %zmm2, %zmm3, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm20 = zmm2[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: movabsq $1742999440035548184, %r10 # imm = 0x183060C183060C18
-; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm22 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r9), %zmm27
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm4[0,1,2,3],zmm27[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k2
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm20 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r9), %zmm25
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm28[0,1,2,3],zmm25[4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm1[23],zero,zmm1[23,24,25,26],zero,zmm1[24],zero,zmm1[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm1[59],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r8), %zmm28
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm4[0,1,2,3],zmm28[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[25],zero,zmm2[23],zero,zero,zero,zero,zmm2[26],zero,zmm2[24],zero,zero,zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm2[59],zero,zero,zero,zero,zmm2[62],zero,zmm2[60],zero,zero,zero,zero,zmm2[63],zero,zmm2[61]
+; AVX512DQ-BW-FCP-NEXT: vporq %zmm1, %zmm2, %zmm1
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r8), %zmm30
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm30[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm0[25],zero,zmm0[23],zero,zero,zero,zero,zmm0[26],zero,zmm0[24],zero,zero,zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm0[59],zero,zero,zero,zero,zmm0[62],zero,zmm0[60],zero,zero,zero,zero,zmm0[63],zero,zmm0[61]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512DQ-BW-FCP-NEXT: movabsq $6971997760142192736, %r10 # imm = 0x60C183060C183060
-; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm22 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rax), %zmm10
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm0 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm10, %zmm31, %zmm0
+; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k2
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm20 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rax), %zmm31
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm1 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
+; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm31, %zmm6, %zmm1
; AVX512DQ-BW-FCP-NEXT: movabsq $-9150747060186627967, %rax # imm = 0x8102040810204081
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm22 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm6[28],zero,ymm6[30,31,30,31],zero,ymm6[29],zero,ymm6[31,28,29]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm24[30],zero,ymm24[28],zero,zero,zero,zero,ymm24[31],zero,ymm24[29],zero,zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm0, %ymm1, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm8
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rsi), %xmm7
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm1, %zmm6
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm18[30],zero,ymm18[28],zero,zero,zero,zero,ymm18[31],zero,ymm18[29],zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm9[30],zero,ymm9[28],zero,zero,zero,zero,ymm9[31],zero,ymm9[29],zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm1, %ymm4, %ymm4
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdx), %xmm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rcx), %xmm1
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm18 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm18, %xmm0, %xmm0
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm4, %zmm9
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm9 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm19[27],zero,zero,zero,zero,ymm19[30],zero,ymm19[28],zero,zero,zero,zero,ymm19[31],zero
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %ymm19, %ymm21
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm20[27],zero,zero,zero,zero,ymm20[30],zero,ymm20[28],zero,zero,zero,zero,ymm20[31],zero,ymm20[29]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm0, %ymm6, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%r9), %xmm5
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%r8), %xmm4
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm19 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm19, %xmm0, %xmm0
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
-; AVX512DQ-BW-FCP-NEXT: vpermw %zmm10, %zmm3, %zmm3
-; AVX512DQ-BW-FCP-NEXT: movabsq $580999813345182728, %rax # imm = 0x810204081020408
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm0 {%k2}
-; AVX512DQ-BW-FCP-NEXT: movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm9 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm11, %xmm1, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm6, %xmm2, %xmm23
-; AVX512DQ-BW-FCP-NEXT: vporq %xmm3, %xmm23, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm3, %zmm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm2, %xmm7, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm0, %xmm8, %xmm23
-; AVX512DQ-BW-FCP-NEXT: vporq %xmm3, %xmm23, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm7[8],xmm8[8],xmm7[9],xmm8[9],xmm7[10],xmm8[10],xmm7[11],xmm8[11],xmm7[12],xmm8[12],xmm7[13],xmm8[13],xmm7[14],xmm8[14],xmm7[15],xmm8[15]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm3, %zmm3
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm23 = zmm3[0,1,0,1,4,5,4,5]
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm23 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm4, %xmm8
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm3, %xmm8, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm4, %zmm3, %zmm3
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm4 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm10, %zmm31, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm20 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdx), %xmm5
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rcx), %xmm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm13, %xmm4, %xmm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm5, %xmm2
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm1, %xmm2, %xmm1
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm29, %xmm2, %xmm2
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm2, %zmm1, %zmm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rsi), %xmm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm11, %xmm1, %xmm29
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm2, %xmm30
+; AVX512DQ-BW-FCP-NEXT: vporq %xmm29, %xmm30, %xmm29
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm30 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm30, %xmm30
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm30, %zmm29, %zmm29
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[0,1,0,1,4,5,4,5]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm29 = zmm29[0,1,0,1,4,5,4,5]
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm29 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%r9), %xmm30
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%r8), %xmm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm10, %xmm30, %xmm0
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm3, %xmm18
+; AVX512DQ-BW-FCP-NEXT: vporq %xmm0, %xmm18, %xmm0
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm18 = xmm3[8],xmm30[8],xmm3[9],xmm30[9],xmm3[10],xmm30[10],xmm3[11],xmm30[11],xmm3[12],xmm30[12],xmm3[13],xmm30[13],xmm3[14],xmm30[14],xmm3[15],xmm30[15]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm18 = xmm18[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm18, %zmm0, %zmm0
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm18 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
+; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm31, %zmm6, %zmm18
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
; AVX512DQ-BW-FCP-NEXT: movabsq $290499906672591364, %rax # imm = 0x408102040810204
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm3 {%k2}
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm0 {%k3}
; AVX512DQ-BW-FCP-NEXT: movabsq $-8714997200177740921, %rax # imm = 0x870E1C3870E1C387
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm23 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm11, %xmm15, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm6, %xmm14, %xmm4
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm3, %xmm4, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm14[0],xmm15[0],xmm14[1],xmm15[1],xmm14[2],xmm15[2],xmm14[3],xmm15[3],xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm18, %xmm4, %xmm4
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm3, %zmm4, %zmm3
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm2, %xmm17, %xmm2
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm0, %xmm16, %xmm0
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm2, %xmm0, %xmm0
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm16[0],xmm17[0],xmm16[1],xmm17[1],xmm16[2],xmm17[2],xmm16[3],xmm17[3],xmm16[4],xmm17[4],xmm16[5],xmm17[5],xmm16[6],xmm17[6],xmm16[7],xmm17[7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm0
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm3[0,1,0,1,4,5,4,5]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm29 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm13, %xmm19, %xmm0
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm17, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm0, %xmm6, %xmm0
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm17[0],xmm19[0],xmm17[1],xmm19[1],xmm17[2],xmm19[2],xmm17[3],xmm19[3],xmm17[4],xmm19[4],xmm17[5],xmm19[5],xmm17[6],xmm19[6],xmm17[7],xmm19[7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm6, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm6, %zmm0
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm11, %xmm22, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm21, %xmm8
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm6, %xmm8, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm21[0],xmm22[0],xmm21[1],xmm22[1],xmm21[2],xmm22[2],xmm21[3],xmm22[3],xmm21[4],xmm22[4],xmm21[5],xmm22[5],xmm21[6],xmm22[6],xmm21[7],xmm22[7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm11, %xmm8, %xmm8
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm6, %zmm8, %zmm6
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm6[0,1,0,1,4,5,4,5]
; AVX512DQ-BW-FCP-NEXT: movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm0 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm1, %xmm3, %xmm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm4, %xmm2
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm19, %xmm2, %xmm2
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm1
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, (%rsp), %zmm10, %zmm2 # 32-byte Folded Reload
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
-; AVX512DQ-BW-FCP-NEXT: vpermw %zmm2, %zmm3, %zmm3
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm8, %zmm0 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm10, %xmm16, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm15, %xmm7
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm6, %xmm7, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm15[0],xmm16[0],xmm15[1],xmm16[1],xmm15[2],xmm16[2],xmm15[3],xmm16[3],xmm15[4],xmm16[4],xmm15[5],xmm16[5],xmm15[6],xmm16[6],xmm15[7],xmm16[7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm10, %xmm7, %xmm7
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm6, %zmm7, %zmm6
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm6 = zmm6[0,1,0,1,4,5,4,5]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, (%rsp), %zmm31, %zmm8 # 32-byte Folded Reload
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm7 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm8, %zmm7, %zmm7
; AVX512DQ-BW-FCP-NEXT: movabsq $4647998506761461824, %rax # imm = 0x4081020408102040
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm1 {%k2}
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm6 {%k3}
; AVX512DQ-BW-FCP-NEXT: movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm0 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm24, %zmm28, %zmm1
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm0 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm11, %xmm1, %xmm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm13[28],zero,ymm13[30,31,30,31],zero,ymm13[29],zero,ymm13[31,28,29]
+; AVX512DQ-BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm15[30],zero,ymm15[28],zero,zero,zero,zero,ymm15[31],zero,ymm15[29],zero,zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm2, %ymm6, %ymm2
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm2[2,3,2,3],zmm1[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm11[30],zero,ymm11[28],zero,zero,zero,zero,ymm11[31],zero,ymm11[29],zero
+; AVX512DQ-BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm7[30],zero,ymm7[28],zero,zero,zero,zero,ymm7[31],zero,ymm7[29],zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm4, %ymm5, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm4[2,3,2,3],zmm2[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm2 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm30[0],xmm3[1],xmm30[1],xmm3[2],xmm30[2],xmm3[3],xmm30[3],xmm3[4],xmm30[4],xmm3[5],xmm30[5],xmm3[6],xmm30[6],xmm3[7],xmm30[7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm10, %xmm1, %xmm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero
+; AVX512DQ-BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,zero,zero,ymm6[30],zero,ymm6[28],zero,zero,zero,zero,ymm6[31],zero,ymm6[29]
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm3[2,3,2,3],zmm1[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm31, %zmm3, %zmm3
+; AVX512DQ-BW-FCP-NEXT: movabsq $580999813345182728, %rax # imm = 0x810204081020408
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm1 {%k1}
+; AVX512DQ-BW-FCP-NEXT: movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm2 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm26, %zmm1
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm1[18,19,20,21],zero,zmm1[19],zero,zmm1[21,20,21,22],zero,zmm1[20],zero,zmm1[22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm1[55],zero,zero,zero,zero,zmm1[58],zero,zmm1[56],zero,zero,zero,zero,zmm1[59],zero
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm3 # 32-byte Folded Reload
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm27, %zmm3
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm3[21],zero,zmm3[19],zero,zero,zero,zero,zmm3[22],zero,zmm3[20],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm3[55],zero,zero,zero,zero,zmm3[58],zero,zmm3[56],zero,zero,zero,zero,zmm3[59],zero,zmm3[57]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: vporq %zmm1, %zmm3, %zmm1
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm3 # 32-byte Folded Reload
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm24, %zmm3
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm3[18],zero,zmm3[18,19,20,21],zero,zmm3[19],zero,zmm3[25,26,27,22],zero,zmm3[20],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm3[56,57],zero,zmm3[55],zero,zmm3[53,54,55,58],zero,zmm3[56],zero,zmm3[60,61,58,59]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm4 # 32-byte Folded Reload
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm23, %zmm4
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm4[18],zero,zero,zero,zero,zmm4[21],zero,zmm4[19],zero,zero,zero,zero,zmm4[22],zero,zmm4[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm4[57],zero,zmm4[55],zero,zero,zero,zero,zmm4[58],zero,zmm4[56],zero,zero,zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm4 = zmm4[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: vporq %zmm3, %zmm4, %zmm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm3 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm20, %zmm30, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm3 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm28, %zmm1
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm1[20],zero,zmm1[18],zero,zmm1[20,21,20,21],zero,zmm1[19],zero,zmm1[19,20,21,22],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm1[56,57,56,57],zero,zmm1[55],zero,zmm1[55,56,57,58],zero,zmm1[56],zero,zmm1[62,63]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm21, %zmm27, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm25, %zmm4
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm4[20],zero,zmm4[18],zero,zero,zero,zero,zmm4[21],zero,zmm4[19],zero,zero,zero,zero,zmm4[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm4[57],zero,zmm4[55],zero,zero,zero,zero,zmm4[58],zero,zmm4[56],zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm4 = zmm4[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: vporq %zmm1, %zmm4, %zmm1
; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm4 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
-; AVX512DQ-BW-FCP-NEXT: vpermw %zmm2, %zmm4, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm8, %zmm4, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: movabsq $1161999626690365456, %rax # imm = 0x1020408102040810
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm1 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm1 {%k1}
; AVX512DQ-BW-FCP-NEXT: movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm3 {%k1}
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, 128(%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm12, 320(%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, 320(%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, 192(%rax)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, (%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm23, 256(%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, 192(%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm13, 64(%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm22, 384(%rax)
-; AVX512DQ-BW-FCP-NEXT: addq $40, %rsp
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm29, 256(%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm14, 64(%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm20, 384(%rax)
+; AVX512DQ-BW-FCP-NEXT: addq $104, %rsp
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <64 x i8>, ptr %in.vecptr0, align 64
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll b/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll
index dfa7f2d..c981d97 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll
@@ -177,6 +177,36 @@ define <16 x float> @shuffle_v16f32_02_03_16_17_06_07_20_21_10_11_24_25_14_15_28
ret <16 x float> %shuffle
}
+; PR86076
+define <16 x float> @shuffle_f32_v16f32_00_08_01_09_02_10_03_11_04_12_05_13_06_14_07_15(float %a0, float %a1) {
+; ALL-LABEL: shuffle_f32_v16f32_00_08_01_09_02_10_03_11_04_12_05_13_06_14_07_15:
+; ALL: # %bb.0:
+; ALL-NEXT: vbroadcastss %xmm0, %ymm0
+; ALL-NEXT: vbroadcastss %xmm1, %ymm1
+; ALL-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
+; ALL-NEXT: retq
+ %v0 = insertelement <8 x float> poison, float %a0, i64 0
+ %v1 = insertelement <8 x float> poison, float %a1, i64 0
+ %b0 = shufflevector <8 x float> %v0, <8 x float> poison, <8 x i32> zeroinitializer
+ %b1 = shufflevector <8 x float> %v1, <8 x float> poison, <8 x i32> zeroinitializer
+ %r = shufflevector <8 x float> %b0, <8 x float> %b1, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <16 x float> %r
+}
+
+; PR86076
+define <16 x float> @shuffle_f32_v16f32_00_08_00_08_00_08_00_08_00_08_00_08_00_08_00_08(float %a0, float %a1) {
+; ALL-LABEL: shuffle_f32_v16f32_00_08_00_08_00_08_00_08_00_08_00_08_00_08_00_08:
+; ALL: # %bb.0:
+; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
+; ALL-NEXT: vbroadcastsd %xmm0, %zmm0
+; ALL-NEXT: retq
+ %v0 = insertelement <8 x float> poison, float %a0, i64 0
+ %v1 = insertelement <8 x float> poison, float %a1, i64 0
+ %sv = shufflevector <8 x float> %v0, <8 x float> %v1, <16 x i32> <i32 0, i32 8, i32 0, i32 8, i32 0, i32 8, i32 0, i32 8, i32 0, i32 8, i32 0, i32 8, i32 0, i32 8, i32 0, i32 8>
+ ret <16 x float> %sv
+}
+
define <16 x i32> @shuffle_v16i32_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i32_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; ALL: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/vpdpwssd.ll b/llvm/test/CodeGen/X86/vpdpwssd.ll
new file mode 100644
index 0000000..e6a07b4
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vpdpwssd.ll
@@ -0,0 +1,12 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=znver4 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni,+fast-dpwssd | FileCheck %s
+
+define <16 x i32> @vpdpwssd_test(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2) {
+; CHECK-LABEL: vpdpwssd_test:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpdpwssd %zmm2, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %4 = tail call <16 x i32> @llvm.x86.avx512.vpdpwssd.512(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2)
+ ret <16 x i32> %4
+}
diff --git a/llvm/test/CodeGen/X86/widen_fadd.ll b/llvm/test/CodeGen/X86/widen_fadd.ll
index 68f2ed4..be249dd 100644
--- a/llvm/test/CodeGen/X86/widen_fadd.ll
+++ b/llvm/test/CodeGen/X86/widen_fadd.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512F
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512VL
define void @widen_fadd_v2f32_v4f32(ptr %a0, ptr %b0, ptr %c0) {
; SSE-LABEL: widen_fadd_v2f32_v4f32:
@@ -364,3 +364,86 @@ define void @widen_fadd_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
store <2 x float> %vc14, ptr %c14, align 4
ret void
}
+
+define <8 x float> @widen_fadd_v4f32_v8f32_const(<4 x float> %x, <4 x float> %y) {
+; SSE-LABEL: widen_fadd_v4f32_v8f32_const:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; SSE-NEXT: addps %xmm2, %xmm0
+; SSE-NEXT: addps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: widen_fadd_v4f32_v8f32_const:
+; AVX1: # %bb.0:
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: widen_fadd_v4f32_v8f32_const:
+; AVX2: # %bb.0:
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: widen_fadd_v4f32_v8f32_const:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: vbroadcastss {{.*#+}} ymm1 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; AVX512F-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: widen_fadd_v4f32_v8f32_const:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
+ %x2 = fadd <4 x float> %x, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %y2 = fadd <4 x float> %y, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %r = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %r
+}
+
+define <16 x float> @widen_fadd_v4f32_v16f32_const(<4 x float> %x, <4 x float> %y, <4 x float> %z, <4 x float> %w) {
+; SSE-LABEL: widen_fadd_v4f32_v16f32_const:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm4 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; SSE-NEXT: addps %xmm4, %xmm0
+; SSE-NEXT: addps %xmm4, %xmm1
+; SSE-NEXT: addps %xmm4, %xmm2
+; SSE-NEXT: addps %xmm4, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: widen_fadd_v4f32_v16f32_const:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX1OR2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vbroadcastss {{.*#+}} ymm1 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; AVX1OR2-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1OR2-NEXT: vaddps %ymm1, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: widen_fadd_v4f32_v16f32_const:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x2 = fadd <4 x float> %x, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %y2 = fadd <4 x float> %y, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %z2 = fadd <4 x float> %z, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %w2 = fadd <4 x float> %w, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %r0 = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r1 = shufflevector <4 x float> %z2, <4 x float> %w2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r = shufflevector <8 x float> %r0, <8 x float> %r1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %r
+}
diff --git a/llvm/test/CodeGen/X86/widen_fmul.ll b/llvm/test/CodeGen/X86/widen_fmul.ll
index ac208da..9aa9d63 100644
--- a/llvm/test/CodeGen/X86/widen_fmul.ll
+++ b/llvm/test/CodeGen/X86/widen_fmul.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512F
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512VL
define void @widen_fmul_v2f32_v4f32(ptr %a0, ptr %b0, ptr %c0) {
; SSE-LABEL: widen_fmul_v2f32_v4f32:
@@ -364,3 +364,86 @@ define void @widen_fmul_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
store <2 x float> %vc14, ptr %c14, align 4
ret void
}
+
+define <8 x float> @widen_fmul_v4f32_v8f32_const(<4 x float> %x, <4 x float> %y) {
+; SSE-LABEL: widen_fmul_v4f32_v8f32_const:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; SSE-NEXT: mulps %xmm2, %xmm0
+; SSE-NEXT: mulps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: widen_fmul_v4f32_v8f32_const:
+; AVX1: # %bb.0:
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: widen_fmul_v4f32_v8f32_const:
+; AVX2: # %bb.0:
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; AVX2-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: widen_fmul_v4f32_v8f32_const:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: vbroadcastss {{.*#+}} ymm1 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; AVX512F-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: widen_fmul_v4f32_v8f32_const:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
+ %x2 = fmul <4 x float> %x, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+ %y2 = fmul <4 x float> %y, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+ %r = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %r
+}
+
+define <16 x float> @widen_fmul_v4f32_v16f32_const(<4 x float> %x, <4 x float> %y, <4 x float> %z, <4 x float> %w) {
+; SSE-LABEL: widen_fmul_v4f32_v16f32_const:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm4 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; SSE-NEXT: mulps %xmm4, %xmm0
+; SSE-NEXT: mulps %xmm4, %xmm1
+; SSE-NEXT: mulps %xmm4, %xmm2
+; SSE-NEXT: mulps %xmm4, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: widen_fmul_v4f32_v16f32_const:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX1OR2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vbroadcastss {{.*#+}} ymm1 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; AVX1OR2-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1OR2-NEXT: vmulps %ymm1, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: widen_fmul_v4f32_v16f32_const:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x2 = fmul <4 x float> %x, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+ %y2 = fmul <4 x float> %y, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+ %z2 = fmul <4 x float> %z, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+ %w2 = fmul <4 x float> %w, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+ %r0 = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r1 = shufflevector <4 x float> %z2, <4 x float> %w2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r = shufflevector <8 x float> %r0, <8 x float> %r1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %r
+}
diff --git a/llvm/test/CodeGen/X86/widen_fsub.ll b/llvm/test/CodeGen/X86/widen_fsub.ll
index 90cf455..60e54ab 100644
--- a/llvm/test/CodeGen/X86/widen_fsub.ll
+++ b/llvm/test/CodeGen/X86/widen_fsub.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512F
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512VL
define void @widen_fsub_v2f32_v4f32(ptr %a0, ptr %b0, ptr %c0) {
; SSE-LABEL: widen_fsub_v2f32_v4f32:
@@ -364,3 +364,86 @@ define void @widen_fsub_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
store <2 x float> %vc14, ptr %c14, align 4
ret void
}
+
+define <8 x float> @widen_fsub_v4f32_v8f32_const(<4 x float> %x, <4 x float> %y) {
+; SSE-LABEL: widen_fsub_v4f32_v8f32_const:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; SSE-NEXT: subps %xmm2, %xmm0
+; SSE-NEXT: subps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: widen_fsub_v4f32_v8f32_const:
+; AVX1: # %bb.0:
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: widen_fsub_v4f32_v8f32_const:
+; AVX2: # %bb.0:
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0]
+; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: widen_fsub_v4f32_v8f32_const:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: vbroadcastss {{.*#+}} ymm1 = [-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0]
+; AVX512F-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: widen_fsub_v4f32_v8f32_const:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
+ %x2 = fsub <4 x float> %x, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %y2 = fsub <4 x float> %y, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %r = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %r
+}
+
+define <16 x float> @widen_fsub_v4f32_v16f32_const(<4 x float> %x, <4 x float> %y, <4 x float> %z, <4 x float> %w) {
+; SSE-LABEL: widen_fsub_v4f32_v16f32_const:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm4 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; SSE-NEXT: subps %xmm4, %xmm0
+; SSE-NEXT: subps %xmm4, %xmm1
+; SSE-NEXT: subps %xmm4, %xmm2
+; SSE-NEXT: subps %xmm4, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: widen_fsub_v4f32_v16f32_const:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX1OR2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0]
+; AVX1OR2-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1OR2-NEXT: vaddps %ymm1, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: widen_fsub_v4f32_v16f32_const:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x2 = fsub <4 x float> %x, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %y2 = fsub <4 x float> %y, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %z2 = fsub <4 x float> %z, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %w2 = fsub <4 x float> %w, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %r0 = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r1 = shufflevector <4 x float> %z2, <4 x float> %w2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r = shufflevector <8 x float> %r0, <8 x float> %r1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %r
+}