aboutsummaryrefslogtreecommitdiff
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/split-wide-shifts-multiway.ll888
-rw-r--r--llvm/test/CodeGen/AArch64/adc.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/fsh.ll473
-rw-r--r--llvm/test/CodeGen/AArch64/funnel-shift.ll55
-rw-r--r--llvm/test/CodeGen/AArch64/rem-by-const.ll173
-rw-r--r--llvm/test/CodeGen/AArch64/sme-zt0-state.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.ll24
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.mir36
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.dim.gfx90a.ll22
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.noret.ll581
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.pk.add.ll30
-rw-r--r--llvm/test/CodeGen/DirectX/ShaderFlags/wave-ops.ll14
-rw-r--r--llvm/test/CodeGen/DirectX/WaveActiveMin.ll143
-rw-r--r--llvm/test/CodeGen/Hexagon/inst_masked_store_bug1.ll94
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/fp-max-min.ll160
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/fp-max-min.ll112
-rw-r--r--llvm/test/CodeGen/PowerPC/bittest.ll193
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-rmw-minmax.ll642
-rw-r--r--llvm/test/CodeGen/RISCV/features-info.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveActiveMin.ll57
-rw-r--r--llvm/test/CodeGen/X86/bfloat-calling-conv.ll6
-rw-r--r--llvm/test/CodeGen/X86/trunc-srl-load.ll1652
-rw-r--r--llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll50
-rw-r--r--llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll53
-rw-r--r--llvm/test/MC/AMDGPU/buffer-op-swz-operand.s8
-rw-r--r--llvm/test/MC/Disassembler/PowerPC/ppc-encoding-ISAFuture.txt3
-rw-r--r--llvm/test/MC/Disassembler/PowerPC/ppc64le-encoding-ISAFuture.txt3
-rw-r--r--llvm/test/MC/PowerPC/ppc-encoding-ISAFuture.s4
-rw-r--r--llvm/test/MC/RISCV/xqcili-linker-relaxation.s37
-rw-r--r--llvm/test/Transforms/InstCombine/assume.ll28
-rw-r--r--llvm/test/Transforms/InstCombine/ptrtoaddr.ll72
-rw-r--r--llvm/test/Transforms/InstSimplify/ConstProp/vecreduce.ll418
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop.ll82
-rw-r--r--llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll542
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/parent-node-schedulable-with-multi-copyables.ll170
-rw-r--r--llvm/test/Transforms/SimplifyCFG/pr165088.ll186
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/switch_case.ll54
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/switch_case.ll.expected106
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/switch_case.test3
-rw-r--r--llvm/test/tools/llvm-dwarfdump/X86/type_units_split_dwp_v4.s6
42 files changed, 4575 insertions, 2650 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/split-wide-shifts-multiway.ll b/llvm/test/CodeGen/AArch64/GlobalISel/split-wide-shifts-multiway.ll
index 41f7ab8..480fcbd 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/split-wide-shifts-multiway.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/split-wide-shifts-multiway.ll
@@ -4992,28 +4992,21 @@ define void @test_shl_i512_const_32(ptr %result, ptr %input) {
; GISEL-LABEL: test_shl_i512_const_32:
; GISEL: ; %bb.0: ; %entry
; GISEL-NEXT: ldp x8, x9, [x1]
-; GISEL-NEXT: ldp x11, x12, [x1, #16]
-; GISEL-NEXT: ldp x14, x15, [x1, #32]
-; GISEL-NEXT: lsr x10, x8, #32
-; GISEL-NEXT: lsr x13, x9, #32
-; GISEL-NEXT: lsl x8, x8, #32
-; GISEL-NEXT: orr x9, x10, x9, lsl #32
-; GISEL-NEXT: lsr x10, x11, #32
-; GISEL-NEXT: orr x11, x13, x11, lsl #32
-; GISEL-NEXT: ldp x13, x16, [x1, #48]
-; GISEL-NEXT: stp x8, x9, [x0]
-; GISEL-NEXT: lsr x8, x12, #32
-; GISEL-NEXT: orr x10, x10, x12, lsl #32
-; GISEL-NEXT: lsr x12, x14, #32
-; GISEL-NEXT: lsr x9, x15, #32
-; GISEL-NEXT: orr x8, x8, x14, lsl #32
-; GISEL-NEXT: stp x11, x10, [x0, #16]
-; GISEL-NEXT: orr x11, x12, x15, lsl #32
-; GISEL-NEXT: lsr x12, x13, #32
-; GISEL-NEXT: orr x9, x9, x13, lsl #32
-; GISEL-NEXT: stp x8, x11, [x0, #32]
-; GISEL-NEXT: orr x8, x12, x16, lsl #32
-; GISEL-NEXT: stp x9, x8, [x0, #48]
+; GISEL-NEXT: ldp x10, x11, [x1, #16]
+; GISEL-NEXT: ldp x13, x14, [x1, #32]
+; GISEL-NEXT: lsl x12, x8, #32
+; GISEL-NEXT: extr x8, x9, x8, #32
+; GISEL-NEXT: extr x9, x10, x9, #32
+; GISEL-NEXT: extr x10, x11, x10, #32
+; GISEL-NEXT: ldp x15, x16, [x1, #48]
+; GISEL-NEXT: stp x12, x8, [x0]
+; GISEL-NEXT: extr x8, x13, x11, #32
+; GISEL-NEXT: stp x9, x10, [x0, #16]
+; GISEL-NEXT: extr x9, x14, x13, #32
+; GISEL-NEXT: extr x10, x15, x14, #32
+; GISEL-NEXT: stp x8, x9, [x0, #32]
+; GISEL-NEXT: extr x8, x16, x15, #32
+; GISEL-NEXT: stp x10, x8, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -5044,30 +5037,22 @@ define void @test_lshr_i512_const_32(ptr %result, ptr %input) {
;
; GISEL-LABEL: test_lshr_i512_const_32:
; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: ldp x8, x9, [x1, #8]
-; GISEL-NEXT: ldr x11, [x1]
-; GISEL-NEXT: ldp x10, x14, [x1, #24]
-; GISEL-NEXT: ldr x16, [x1, #56]
-; GISEL-NEXT: lsl x12, x8, #32
-; GISEL-NEXT: lsl x13, x9, #32
-; GISEL-NEXT: lsl x15, x10, #32
-; GISEL-NEXT: orr x11, x12, x11, lsr #32
-; GISEL-NEXT: orr x8, x13, x8, lsr #32
-; GISEL-NEXT: lsl x13, x14, #32
-; GISEL-NEXT: orr x9, x15, x9, lsr #32
-; GISEL-NEXT: ldp x12, x15, [x1, #40]
-; GISEL-NEXT: stp x11, x8, [x0]
-; GISEL-NEXT: orr x10, x13, x10, lsr #32
-; GISEL-NEXT: lsl x8, x16, #32
-; GISEL-NEXT: lsl x11, x12, #32
-; GISEL-NEXT: lsl x13, x15, #32
-; GISEL-NEXT: stp x9, x10, [x0, #16]
-; GISEL-NEXT: orr x8, x8, x15, lsr #32
-; GISEL-NEXT: lsr x10, x16, #32
-; GISEL-NEXT: orr x11, x11, x14, lsr #32
-; GISEL-NEXT: orr x9, x13, x12, lsr #32
-; GISEL-NEXT: stp x8, x10, [x0, #48]
-; GISEL-NEXT: stp x11, x9, [x0, #32]
+; GISEL-NEXT: ldp x8, x9, [x1]
+; GISEL-NEXT: ldp x10, x11, [x1, #16]
+; GISEL-NEXT: ldp x12, x13, [x1, #32]
+; GISEL-NEXT: extr x8, x9, x8, #32
+; GISEL-NEXT: ldp x14, x15, [x1, #48]
+; GISEL-NEXT: extr x9, x10, x9, #32
+; GISEL-NEXT: extr x10, x11, x10, #32
+; GISEL-NEXT: stp x8, x9, [x0]
+; GISEL-NEXT: extr x8, x12, x11, #32
+; GISEL-NEXT: extr x9, x13, x12, #32
+; GISEL-NEXT: stp x10, x8, [x0, #16]
+; GISEL-NEXT: extr x10, x14, x13, #32
+; GISEL-NEXT: extr x8, x15, x14, #32
+; GISEL-NEXT: stp x9, x10, [x0, #32]
+; GISEL-NEXT: lsr x9, x15, #32
+; GISEL-NEXT: stp x8, x9, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -5098,32 +5083,24 @@ define void @test_ashr_i512_const_32(ptr %result, ptr %input) {
;
; GISEL-LABEL: test_ashr_i512_const_32:
; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: ldp x8, x9, [x1, #8]
-; GISEL-NEXT: ldr x11, [x1]
-; GISEL-NEXT: ldp x10, x13, [x1, #24]
-; GISEL-NEXT: ldr x17, [x1, #56]
-; GISEL-NEXT: lsl x12, x8, #32
-; GISEL-NEXT: lsl x15, x9, #32
-; GISEL-NEXT: lsl x16, x10, #32
-; GISEL-NEXT: orr x11, x12, x11, lsr #32
-; GISEL-NEXT: ldp x14, x12, [x1, #40]
-; GISEL-NEXT: orr x8, x15, x8, lsr #32
-; GISEL-NEXT: lsl x15, x13, #32
-; GISEL-NEXT: orr x9, x16, x9, lsr #32
-; GISEL-NEXT: asr x16, x17, #63
-; GISEL-NEXT: stp x11, x8, [x0]
-; GISEL-NEXT: lsl x11, x14, #32
-; GISEL-NEXT: orr x10, x15, x10, lsr #32
-; GISEL-NEXT: lsl x15, x12, #32
-; GISEL-NEXT: orr x8, x11, x13, lsr #32
-; GISEL-NEXT: lsl x11, x17, #32
-; GISEL-NEXT: stp x9, x10, [x0, #16]
-; GISEL-NEXT: orr x9, x15, x14, lsr #32
-; GISEL-NEXT: lsl x13, x16, #32
-; GISEL-NEXT: orr x10, x11, x12, lsr #32
-; GISEL-NEXT: stp x8, x9, [x0, #32]
-; GISEL-NEXT: orr x8, x13, x17, asr #32
-; GISEL-NEXT: stp x10, x8, [x0, #48]
+; GISEL-NEXT: ldp x8, x9, [x1]
+; GISEL-NEXT: ldp x10, x11, [x1, #16]
+; GISEL-NEXT: ldp x12, x13, [x1, #48]
+; GISEL-NEXT: extr x8, x9, x8, #32
+; GISEL-NEXT: ldp x14, x15, [x1, #32]
+; GISEL-NEXT: extr x9, x10, x9, #32
+; GISEL-NEXT: extr x10, x11, x10, #32
+; GISEL-NEXT: stp x8, x9, [x0]
+; GISEL-NEXT: asr x8, x13, #63
+; GISEL-NEXT: extr x11, x14, x11, #32
+; GISEL-NEXT: extr x9, x15, x14, #32
+; GISEL-NEXT: lsl x8, x8, #32
+; GISEL-NEXT: stp x10, x11, [x0, #16]
+; GISEL-NEXT: extr x10, x12, x15, #32
+; GISEL-NEXT: extr x11, x13, x12, #32
+; GISEL-NEXT: orr x8, x8, x13, asr #32
+; GISEL-NEXT: stp x9, x10, [x0, #32]
+; GISEL-NEXT: stp x11, x8, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -5252,23 +5229,17 @@ define void @test_shl_i512_const_96(ptr %result, ptr %input) {
; GISEL-NEXT: ldr x15, [x1, #48]
; GISEL-NEXT: ldp x10, x11, [x1, #16]
; GISEL-NEXT: ldp x12, x13, [x1, #32]
-; GISEL-NEXT: lsr x14, x8, #32
-; GISEL-NEXT: lsr x16, x9, #32
-; GISEL-NEXT: lsl x8, x8, #32
-; GISEL-NEXT: orr x9, x14, x9, lsl #32
-; GISEL-NEXT: lsr x14, x10, #32
-; GISEL-NEXT: orr x10, x16, x10, lsl #32
-; GISEL-NEXT: stp xzr, x8, [x0]
-; GISEL-NEXT: lsr x8, x11, #32
-; GISEL-NEXT: orr x11, x14, x11, lsl #32
-; GISEL-NEXT: lsr x14, x12, #32
-; GISEL-NEXT: stp x9, x10, [x0, #16]
-; GISEL-NEXT: lsr x9, x13, #32
-; GISEL-NEXT: orr x8, x8, x12, lsl #32
-; GISEL-NEXT: orr x10, x14, x13, lsl #32
-; GISEL-NEXT: orr x9, x9, x15, lsl #32
-; GISEL-NEXT: stp x11, x8, [x0, #32]
-; GISEL-NEXT: stp x10, x9, [x0, #48]
+; GISEL-NEXT: lsl x14, x8, #32
+; GISEL-NEXT: extr x8, x9, x8, #32
+; GISEL-NEXT: extr x9, x10, x9, #32
+; GISEL-NEXT: extr x10, x11, x10, #32
+; GISEL-NEXT: stp xzr, x14, [x0]
+; GISEL-NEXT: stp x8, x9, [x0, #16]
+; GISEL-NEXT: extr x8, x12, x11, #32
+; GISEL-NEXT: extr x9, x13, x12, #32
+; GISEL-NEXT: stp x10, x8, [x0, #32]
+; GISEL-NEXT: extr x10, x15, x13, #32
+; GISEL-NEXT: stp x9, x10, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -5297,27 +5268,21 @@ define void @test_lshr_i512_const_96(ptr %result, ptr %input) {
;
; GISEL-LABEL: test_lshr_i512_const_96:
; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: ldp x8, x9, [x1, #16]
-; GISEL-NEXT: ldr x10, [x1, #8]
-; GISEL-NEXT: ldp x11, x14, [x1, #32]
-; GISEL-NEXT: ldp x15, x16, [x1, #48]
-; GISEL-NEXT: lsl x12, x8, #32
-; GISEL-NEXT: lsl x13, x9, #32
-; GISEL-NEXT: orr x10, x12, x10, lsr #32
-; GISEL-NEXT: lsl x12, x11, #32
-; GISEL-NEXT: orr x8, x13, x8, lsr #32
-; GISEL-NEXT: lsl x13, x14, #32
-; GISEL-NEXT: orr x9, x12, x9, lsr #32
-; GISEL-NEXT: stp x10, x8, [x0]
-; GISEL-NEXT: lsl x10, x15, #32
-; GISEL-NEXT: orr x11, x13, x11, lsr #32
-; GISEL-NEXT: lsl x12, x16, #32
-; GISEL-NEXT: orr x8, x10, x14, lsr #32
-; GISEL-NEXT: lsr x10, x16, #32
-; GISEL-NEXT: stp x9, x11, [x0, #16]
-; GISEL-NEXT: orr x9, x12, x15, lsr #32
-; GISEL-NEXT: stp x10, xzr, [x0, #48]
-; GISEL-NEXT: stp x8, x9, [x0, #32]
+; GISEL-NEXT: ldp x8, x9, [x1, #8]
+; GISEL-NEXT: ldr x14, [x1, #56]
+; GISEL-NEXT: ldp x10, x11, [x1, #24]
+; GISEL-NEXT: ldp x12, x13, [x1, #40]
+; GISEL-NEXT: extr x8, x9, x8, #32
+; GISEL-NEXT: extr x9, x10, x9, #32
+; GISEL-NEXT: extr x10, x11, x10, #32
+; GISEL-NEXT: stp x8, x9, [x0]
+; GISEL-NEXT: extr x8, x12, x11, #32
+; GISEL-NEXT: extr x9, x13, x12, #32
+; GISEL-NEXT: stp x10, x8, [x0, #16]
+; GISEL-NEXT: extr x10, x14, x13, #32
+; GISEL-NEXT: lsr x8, x14, #32
+; GISEL-NEXT: stp x9, x10, [x0, #32]
+; GISEL-NEXT: stp x8, xzr, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -5347,29 +5312,23 @@ define void @test_ashr_i512_const_96(ptr %result, ptr %input) {
;
; GISEL-LABEL: test_ashr_i512_const_96:
; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: ldp x8, x9, [x1, #16]
-; GISEL-NEXT: ldr x11, [x1, #8]
-; GISEL-NEXT: ldp x10, x13, [x1, #32]
-; GISEL-NEXT: lsl x12, x8, #32
-; GISEL-NEXT: lsl x14, x9, #32
-; GISEL-NEXT: lsl x15, x10, #32
-; GISEL-NEXT: orr x11, x12, x11, lsr #32
-; GISEL-NEXT: ldp x12, x16, [x1, #48]
-; GISEL-NEXT: orr x8, x14, x8, lsr #32
-; GISEL-NEXT: lsl x14, x13, #32
-; GISEL-NEXT: orr x9, x15, x9, lsr #32
-; GISEL-NEXT: asr x15, x16, #63
-; GISEL-NEXT: stp x11, x8, [x0]
-; GISEL-NEXT: lsl x11, x12, #32
-; GISEL-NEXT: orr x10, x14, x10, lsr #32
-; GISEL-NEXT: lsl x14, x16, #32
-; GISEL-NEXT: orr x8, x11, x13, lsr #32
+; GISEL-NEXT: ldp x8, x9, [x1, #8]
+; GISEL-NEXT: ldr x13, [x1, #40]
+; GISEL-NEXT: ldp x10, x11, [x1, #24]
+; GISEL-NEXT: ldp x14, x12, [x1, #48]
+; GISEL-NEXT: extr x8, x9, x8, #32
+; GISEL-NEXT: extr x9, x10, x9, #32
+; GISEL-NEXT: extr x10, x11, x10, #32
+; GISEL-NEXT: asr x15, x12, #63
+; GISEL-NEXT: stp x8, x9, [x0]
+; GISEL-NEXT: extr x8, x13, x11, #32
+; GISEL-NEXT: extr x9, x14, x13, #32
; GISEL-NEXT: lsl x11, x15, #32
-; GISEL-NEXT: stp x9, x10, [x0, #16]
-; GISEL-NEXT: orr x9, x14, x12, lsr #32
-; GISEL-NEXT: orr x10, x11, x16, asr #32
-; GISEL-NEXT: stp x8, x9, [x0, #32]
-; GISEL-NEXT: stp x10, x15, [x0, #48]
+; GISEL-NEXT: stp x10, x8, [x0, #16]
+; GISEL-NEXT: extr x10, x12, x14, #32
+; GISEL-NEXT: orr x8, x11, x12, asr #32
+; GISEL-NEXT: stp x9, x10, [x0, #32]
+; GISEL-NEXT: stp x8, x15, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -5404,28 +5363,21 @@ define void @test_shl_i512_const_1(ptr %result, ptr %input) {
; GISEL-LABEL: test_shl_i512_const_1:
; GISEL: ; %bb.0: ; %entry
; GISEL-NEXT: ldp x8, x9, [x1]
-; GISEL-NEXT: ldp x11, x12, [x1, #16]
-; GISEL-NEXT: ldp x14, x15, [x1, #32]
-; GISEL-NEXT: lsr x10, x8, #63
-; GISEL-NEXT: lsr x13, x9, #63
-; GISEL-NEXT: lsl x8, x8, #1
-; GISEL-NEXT: orr x9, x10, x9, lsl #1
-; GISEL-NEXT: lsr x10, x11, #63
-; GISEL-NEXT: orr x11, x13, x11, lsl #1
-; GISEL-NEXT: ldp x13, x16, [x1, #48]
-; GISEL-NEXT: stp x8, x9, [x0]
-; GISEL-NEXT: lsr x8, x12, #63
-; GISEL-NEXT: orr x10, x10, x12, lsl #1
-; GISEL-NEXT: lsr x12, x14, #63
-; GISEL-NEXT: lsr x9, x15, #63
-; GISEL-NEXT: orr x8, x8, x14, lsl #1
-; GISEL-NEXT: stp x11, x10, [x0, #16]
-; GISEL-NEXT: orr x11, x12, x15, lsl #1
-; GISEL-NEXT: lsr x12, x13, #63
-; GISEL-NEXT: orr x9, x9, x13, lsl #1
-; GISEL-NEXT: stp x8, x11, [x0, #32]
-; GISEL-NEXT: orr x8, x12, x16, lsl #1
-; GISEL-NEXT: stp x9, x8, [x0, #48]
+; GISEL-NEXT: ldp x10, x11, [x1, #16]
+; GISEL-NEXT: ldp x13, x14, [x1, #32]
+; GISEL-NEXT: lsl x12, x8, #1
+; GISEL-NEXT: extr x8, x9, x8, #63
+; GISEL-NEXT: extr x9, x10, x9, #63
+; GISEL-NEXT: extr x10, x11, x10, #63
+; GISEL-NEXT: ldp x15, x16, [x1, #48]
+; GISEL-NEXT: stp x12, x8, [x0]
+; GISEL-NEXT: extr x8, x13, x11, #63
+; GISEL-NEXT: stp x9, x10, [x0, #16]
+; GISEL-NEXT: extr x9, x14, x13, #63
+; GISEL-NEXT: extr x10, x15, x14, #63
+; GISEL-NEXT: stp x8, x9, [x0, #32]
+; GISEL-NEXT: extr x8, x16, x15, #63
+; GISEL-NEXT: stp x10, x8, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -5457,30 +5409,22 @@ define void @test_lshr_i512_const_1(ptr %result, ptr %input) {
;
; GISEL-LABEL: test_lshr_i512_const_1:
; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: ldp x8, x9, [x1, #8]
-; GISEL-NEXT: ldr x11, [x1]
-; GISEL-NEXT: ldp x10, x14, [x1, #24]
-; GISEL-NEXT: ldr x16, [x1, #56]
-; GISEL-NEXT: lsl x12, x8, #63
-; GISEL-NEXT: lsl x13, x9, #63
-; GISEL-NEXT: lsl x15, x10, #63
-; GISEL-NEXT: orr x11, x12, x11, lsr #1
-; GISEL-NEXT: orr x8, x13, x8, lsr #1
-; GISEL-NEXT: lsl x13, x14, #63
-; GISEL-NEXT: orr x9, x15, x9, lsr #1
-; GISEL-NEXT: ldp x12, x15, [x1, #40]
-; GISEL-NEXT: stp x11, x8, [x0]
-; GISEL-NEXT: orr x10, x13, x10, lsr #1
-; GISEL-NEXT: lsl x8, x16, #63
-; GISEL-NEXT: lsl x11, x12, #63
-; GISEL-NEXT: lsl x13, x15, #63
-; GISEL-NEXT: stp x9, x10, [x0, #16]
-; GISEL-NEXT: orr x8, x8, x15, lsr #1
-; GISEL-NEXT: lsr x10, x16, #1
-; GISEL-NEXT: orr x11, x11, x14, lsr #1
-; GISEL-NEXT: orr x9, x13, x12, lsr #1
-; GISEL-NEXT: stp x8, x10, [x0, #48]
-; GISEL-NEXT: stp x11, x9, [x0, #32]
+; GISEL-NEXT: ldp x8, x9, [x1]
+; GISEL-NEXT: ldp x10, x11, [x1, #16]
+; GISEL-NEXT: ldp x12, x13, [x1, #32]
+; GISEL-NEXT: extr x8, x9, x8, #1
+; GISEL-NEXT: ldp x14, x15, [x1, #48]
+; GISEL-NEXT: extr x9, x10, x9, #1
+; GISEL-NEXT: extr x10, x11, x10, #1
+; GISEL-NEXT: stp x8, x9, [x0]
+; GISEL-NEXT: extr x8, x12, x11, #1
+; GISEL-NEXT: extr x9, x13, x12, #1
+; GISEL-NEXT: stp x10, x8, [x0, #16]
+; GISEL-NEXT: extr x10, x14, x13, #1
+; GISEL-NEXT: extr x8, x15, x14, #1
+; GISEL-NEXT: stp x9, x10, [x0, #32]
+; GISEL-NEXT: lsr x9, x15, #1
+; GISEL-NEXT: stp x8, x9, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -5512,32 +5456,24 @@ define void @test_ashr_i512_const_1(ptr %result, ptr %input) {
;
; GISEL-LABEL: test_ashr_i512_const_1:
; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: ldp x8, x9, [x1, #8]
-; GISEL-NEXT: ldr x11, [x1]
-; GISEL-NEXT: ldp x10, x13, [x1, #24]
-; GISEL-NEXT: ldr x17, [x1, #56]
-; GISEL-NEXT: lsl x12, x8, #63
-; GISEL-NEXT: lsl x15, x9, #63
-; GISEL-NEXT: lsl x16, x10, #63
-; GISEL-NEXT: orr x11, x12, x11, lsr #1
-; GISEL-NEXT: ldp x14, x12, [x1, #40]
-; GISEL-NEXT: orr x8, x15, x8, lsr #1
-; GISEL-NEXT: lsl x15, x13, #63
-; GISEL-NEXT: orr x9, x16, x9, lsr #1
-; GISEL-NEXT: asr x16, x17, #63
-; GISEL-NEXT: stp x11, x8, [x0]
-; GISEL-NEXT: lsl x11, x14, #63
-; GISEL-NEXT: orr x10, x15, x10, lsr #1
-; GISEL-NEXT: lsl x15, x12, #63
-; GISEL-NEXT: orr x8, x11, x13, lsr #1
-; GISEL-NEXT: lsl x11, x17, #63
-; GISEL-NEXT: stp x9, x10, [x0, #16]
-; GISEL-NEXT: orr x9, x15, x14, lsr #1
-; GISEL-NEXT: lsl x13, x16, #63
-; GISEL-NEXT: orr x10, x11, x12, lsr #1
-; GISEL-NEXT: stp x8, x9, [x0, #32]
-; GISEL-NEXT: orr x8, x13, x17, asr #1
-; GISEL-NEXT: stp x10, x8, [x0, #48]
+; GISEL-NEXT: ldp x8, x9, [x1]
+; GISEL-NEXT: ldp x10, x11, [x1, #16]
+; GISEL-NEXT: ldp x12, x13, [x1, #48]
+; GISEL-NEXT: extr x8, x9, x8, #1
+; GISEL-NEXT: ldp x14, x15, [x1, #32]
+; GISEL-NEXT: extr x9, x10, x9, #1
+; GISEL-NEXT: extr x10, x11, x10, #1
+; GISEL-NEXT: stp x8, x9, [x0]
+; GISEL-NEXT: asr x8, x13, #63
+; GISEL-NEXT: extr x11, x14, x11, #1
+; GISEL-NEXT: extr x9, x15, x14, #1
+; GISEL-NEXT: lsl x8, x8, #63
+; GISEL-NEXT: stp x10, x11, [x0, #16]
+; GISEL-NEXT: extr x10, x12, x15, #1
+; GISEL-NEXT: extr x11, x13, x12, #1
+; GISEL-NEXT: orr x8, x8, x13, asr #1
+; GISEL-NEXT: stp x9, x10, [x0, #32]
+; GISEL-NEXT: stp x11, x8, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -5571,28 +5507,21 @@ define void @test_shl_i512_const_15(ptr %result, ptr %input) {
; GISEL-LABEL: test_shl_i512_const_15:
; GISEL: ; %bb.0: ; %entry
; GISEL-NEXT: ldp x8, x9, [x1]
-; GISEL-NEXT: ldp x11, x12, [x1, #16]
-; GISEL-NEXT: ldp x14, x15, [x1, #32]
-; GISEL-NEXT: lsr x10, x8, #49
-; GISEL-NEXT: lsr x13, x9, #49
-; GISEL-NEXT: lsl x8, x8, #15
-; GISEL-NEXT: orr x9, x10, x9, lsl #15
-; GISEL-NEXT: lsr x10, x11, #49
-; GISEL-NEXT: orr x11, x13, x11, lsl #15
-; GISEL-NEXT: ldp x13, x16, [x1, #48]
-; GISEL-NEXT: stp x8, x9, [x0]
-; GISEL-NEXT: lsr x8, x12, #49
-; GISEL-NEXT: orr x10, x10, x12, lsl #15
-; GISEL-NEXT: lsr x12, x14, #49
-; GISEL-NEXT: lsr x9, x15, #49
-; GISEL-NEXT: orr x8, x8, x14, lsl #15
-; GISEL-NEXT: stp x11, x10, [x0, #16]
-; GISEL-NEXT: orr x11, x12, x15, lsl #15
-; GISEL-NEXT: lsr x12, x13, #49
-; GISEL-NEXT: orr x9, x9, x13, lsl #15
-; GISEL-NEXT: stp x8, x11, [x0, #32]
-; GISEL-NEXT: orr x8, x12, x16, lsl #15
-; GISEL-NEXT: stp x9, x8, [x0, #48]
+; GISEL-NEXT: ldp x10, x11, [x1, #16]
+; GISEL-NEXT: ldp x13, x14, [x1, #32]
+; GISEL-NEXT: lsl x12, x8, #15
+; GISEL-NEXT: extr x8, x9, x8, #49
+; GISEL-NEXT: extr x9, x10, x9, #49
+; GISEL-NEXT: extr x10, x11, x10, #49
+; GISEL-NEXT: ldp x15, x16, [x1, #48]
+; GISEL-NEXT: stp x12, x8, [x0]
+; GISEL-NEXT: extr x8, x13, x11, #49
+; GISEL-NEXT: stp x9, x10, [x0, #16]
+; GISEL-NEXT: extr x9, x14, x13, #49
+; GISEL-NEXT: extr x10, x15, x14, #49
+; GISEL-NEXT: stp x8, x9, [x0, #32]
+; GISEL-NEXT: extr x8, x16, x15, #49
+; GISEL-NEXT: stp x10, x8, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -5624,30 +5553,22 @@ define void @test_lshr_i512_const_15(ptr %result, ptr %input) {
;
; GISEL-LABEL: test_lshr_i512_const_15:
; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: ldp x8, x9, [x1, #8]
-; GISEL-NEXT: ldr x11, [x1]
-; GISEL-NEXT: ldp x10, x14, [x1, #24]
-; GISEL-NEXT: ldr x16, [x1, #56]
-; GISEL-NEXT: lsl x12, x8, #49
-; GISEL-NEXT: lsl x13, x9, #49
-; GISEL-NEXT: lsl x15, x10, #49
-; GISEL-NEXT: orr x11, x12, x11, lsr #15
-; GISEL-NEXT: orr x8, x13, x8, lsr #15
-; GISEL-NEXT: lsl x13, x14, #49
-; GISEL-NEXT: orr x9, x15, x9, lsr #15
-; GISEL-NEXT: ldp x12, x15, [x1, #40]
-; GISEL-NEXT: stp x11, x8, [x0]
-; GISEL-NEXT: orr x10, x13, x10, lsr #15
-; GISEL-NEXT: lsl x8, x16, #49
-; GISEL-NEXT: lsl x11, x12, #49
-; GISEL-NEXT: lsl x13, x15, #49
-; GISEL-NEXT: stp x9, x10, [x0, #16]
-; GISEL-NEXT: orr x8, x8, x15, lsr #15
-; GISEL-NEXT: lsr x10, x16, #15
-; GISEL-NEXT: orr x11, x11, x14, lsr #15
-; GISEL-NEXT: orr x9, x13, x12, lsr #15
-; GISEL-NEXT: stp x8, x10, [x0, #48]
-; GISEL-NEXT: stp x11, x9, [x0, #32]
+; GISEL-NEXT: ldp x8, x9, [x1]
+; GISEL-NEXT: ldp x10, x11, [x1, #16]
+; GISEL-NEXT: ldp x12, x13, [x1, #32]
+; GISEL-NEXT: extr x8, x9, x8, #15
+; GISEL-NEXT: ldp x14, x15, [x1, #48]
+; GISEL-NEXT: extr x9, x10, x9, #15
+; GISEL-NEXT: extr x10, x11, x10, #15
+; GISEL-NEXT: stp x8, x9, [x0]
+; GISEL-NEXT: extr x8, x12, x11, #15
+; GISEL-NEXT: extr x9, x13, x12, #15
+; GISEL-NEXT: stp x10, x8, [x0, #16]
+; GISEL-NEXT: extr x10, x14, x13, #15
+; GISEL-NEXT: extr x8, x15, x14, #15
+; GISEL-NEXT: stp x9, x10, [x0, #32]
+; GISEL-NEXT: lsr x9, x15, #15
+; GISEL-NEXT: stp x8, x9, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -5679,32 +5600,24 @@ define void @test_ashr_i512_const_15(ptr %result, ptr %input) {
;
; GISEL-LABEL: test_ashr_i512_const_15:
; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: ldp x8, x9, [x1, #8]
-; GISEL-NEXT: ldr x11, [x1]
-; GISEL-NEXT: ldp x10, x13, [x1, #24]
-; GISEL-NEXT: ldr x17, [x1, #56]
-; GISEL-NEXT: lsl x12, x8, #49
-; GISEL-NEXT: lsl x15, x9, #49
-; GISEL-NEXT: lsl x16, x10, #49
-; GISEL-NEXT: orr x11, x12, x11, lsr #15
-; GISEL-NEXT: ldp x14, x12, [x1, #40]
-; GISEL-NEXT: orr x8, x15, x8, lsr #15
-; GISEL-NEXT: lsl x15, x13, #49
-; GISEL-NEXT: orr x9, x16, x9, lsr #15
-; GISEL-NEXT: asr x16, x17, #63
-; GISEL-NEXT: stp x11, x8, [x0]
-; GISEL-NEXT: lsl x11, x14, #49
-; GISEL-NEXT: orr x10, x15, x10, lsr #15
-; GISEL-NEXT: lsl x15, x12, #49
-; GISEL-NEXT: orr x8, x11, x13, lsr #15
-; GISEL-NEXT: lsl x11, x17, #49
-; GISEL-NEXT: stp x9, x10, [x0, #16]
-; GISEL-NEXT: orr x9, x15, x14, lsr #15
-; GISEL-NEXT: lsl x13, x16, #49
-; GISEL-NEXT: orr x10, x11, x12, lsr #15
-; GISEL-NEXT: stp x8, x9, [x0, #32]
-; GISEL-NEXT: orr x8, x13, x17, asr #15
-; GISEL-NEXT: stp x10, x8, [x0, #48]
+; GISEL-NEXT: ldp x8, x9, [x1]
+; GISEL-NEXT: ldp x10, x11, [x1, #16]
+; GISEL-NEXT: ldp x12, x13, [x1, #48]
+; GISEL-NEXT: extr x8, x9, x8, #15
+; GISEL-NEXT: ldp x14, x15, [x1, #32]
+; GISEL-NEXT: extr x9, x10, x9, #15
+; GISEL-NEXT: extr x10, x11, x10, #15
+; GISEL-NEXT: stp x8, x9, [x0]
+; GISEL-NEXT: asr x8, x13, #63
+; GISEL-NEXT: extr x11, x14, x11, #15
+; GISEL-NEXT: extr x9, x15, x14, #15
+; GISEL-NEXT: lsl x8, x8, #49
+; GISEL-NEXT: stp x10, x11, [x0, #16]
+; GISEL-NEXT: extr x10, x12, x15, #15
+; GISEL-NEXT: extr x11, x13, x12, #15
+; GISEL-NEXT: orr x8, x8, x13, asr #15
+; GISEL-NEXT: stp x9, x10, [x0, #32]
+; GISEL-NEXT: stp x11, x8, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -5738,28 +5651,21 @@ define void @test_shl_i512_const_63(ptr %result, ptr %input) {
; GISEL-LABEL: test_shl_i512_const_63:
; GISEL: ; %bb.0: ; %entry
; GISEL-NEXT: ldp x8, x9, [x1]
-; GISEL-NEXT: ldp x11, x12, [x1, #16]
-; GISEL-NEXT: ldp x14, x15, [x1, #32]
-; GISEL-NEXT: lsr x10, x8, #1
-; GISEL-NEXT: lsr x13, x9, #1
-; GISEL-NEXT: lsl x8, x8, #63
-; GISEL-NEXT: orr x9, x10, x9, lsl #63
-; GISEL-NEXT: lsr x10, x11, #1
-; GISEL-NEXT: orr x11, x13, x11, lsl #63
-; GISEL-NEXT: ldp x13, x16, [x1, #48]
-; GISEL-NEXT: stp x8, x9, [x0]
-; GISEL-NEXT: lsr x8, x12, #1
-; GISEL-NEXT: orr x10, x10, x12, lsl #63
-; GISEL-NEXT: lsr x12, x14, #1
-; GISEL-NEXT: lsr x9, x15, #1
-; GISEL-NEXT: orr x8, x8, x14, lsl #63
-; GISEL-NEXT: stp x11, x10, [x0, #16]
-; GISEL-NEXT: orr x11, x12, x15, lsl #63
-; GISEL-NEXT: lsr x12, x13, #1
-; GISEL-NEXT: orr x9, x9, x13, lsl #63
-; GISEL-NEXT: stp x8, x11, [x0, #32]
-; GISEL-NEXT: orr x8, x12, x16, lsl #63
-; GISEL-NEXT: stp x9, x8, [x0, #48]
+; GISEL-NEXT: ldp x10, x11, [x1, #16]
+; GISEL-NEXT: ldp x13, x14, [x1, #32]
+; GISEL-NEXT: lsl x12, x8, #63
+; GISEL-NEXT: extr x8, x9, x8, #1
+; GISEL-NEXT: extr x9, x10, x9, #1
+; GISEL-NEXT: extr x10, x11, x10, #1
+; GISEL-NEXT: ldp x15, x16, [x1, #48]
+; GISEL-NEXT: stp x12, x8, [x0]
+; GISEL-NEXT: extr x8, x13, x11, #1
+; GISEL-NEXT: stp x9, x10, [x0, #16]
+; GISEL-NEXT: extr x9, x14, x13, #1
+; GISEL-NEXT: extr x10, x15, x14, #1
+; GISEL-NEXT: stp x8, x9, [x0, #32]
+; GISEL-NEXT: extr x8, x16, x15, #1
+; GISEL-NEXT: stp x10, x8, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -5791,30 +5697,22 @@ define void @test_lshr_i512_const_63(ptr %result, ptr %input) {
;
; GISEL-LABEL: test_lshr_i512_const_63:
; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: ldp x8, x9, [x1, #8]
-; GISEL-NEXT: ldr x11, [x1]
-; GISEL-NEXT: ldp x10, x14, [x1, #24]
-; GISEL-NEXT: ldr x16, [x1, #56]
-; GISEL-NEXT: lsl x12, x8, #1
-; GISEL-NEXT: lsl x13, x9, #1
-; GISEL-NEXT: lsl x15, x10, #1
-; GISEL-NEXT: orr x11, x12, x11, lsr #63
-; GISEL-NEXT: orr x8, x13, x8, lsr #63
-; GISEL-NEXT: lsl x13, x14, #1
-; GISEL-NEXT: orr x9, x15, x9, lsr #63
-; GISEL-NEXT: ldp x12, x15, [x1, #40]
-; GISEL-NEXT: stp x11, x8, [x0]
-; GISEL-NEXT: orr x10, x13, x10, lsr #63
-; GISEL-NEXT: lsl x8, x16, #1
-; GISEL-NEXT: lsl x11, x12, #1
-; GISEL-NEXT: lsl x13, x15, #1
-; GISEL-NEXT: stp x9, x10, [x0, #16]
-; GISEL-NEXT: orr x8, x8, x15, lsr #63
-; GISEL-NEXT: lsr x10, x16, #63
-; GISEL-NEXT: orr x11, x11, x14, lsr #63
-; GISEL-NEXT: orr x9, x13, x12, lsr #63
-; GISEL-NEXT: stp x8, x10, [x0, #48]
-; GISEL-NEXT: stp x11, x9, [x0, #32]
+; GISEL-NEXT: ldp x8, x9, [x1]
+; GISEL-NEXT: ldp x10, x11, [x1, #16]
+; GISEL-NEXT: ldp x12, x13, [x1, #32]
+; GISEL-NEXT: extr x8, x9, x8, #63
+; GISEL-NEXT: ldp x14, x15, [x1, #48]
+; GISEL-NEXT: extr x9, x10, x9, #63
+; GISEL-NEXT: extr x10, x11, x10, #63
+; GISEL-NEXT: stp x8, x9, [x0]
+; GISEL-NEXT: extr x8, x12, x11, #63
+; GISEL-NEXT: extr x9, x13, x12, #63
+; GISEL-NEXT: stp x10, x8, [x0, #16]
+; GISEL-NEXT: extr x10, x14, x13, #63
+; GISEL-NEXT: extr x8, x15, x14, #63
+; GISEL-NEXT: stp x9, x10, [x0, #32]
+; GISEL-NEXT: lsr x9, x15, #63
+; GISEL-NEXT: stp x8, x9, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -5846,30 +5744,22 @@ define void @test_ashr_i512_const_63(ptr %result, ptr %input) {
;
; GISEL-LABEL: test_ashr_i512_const_63:
; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: ldp x8, x9, [x1, #8]
-; GISEL-NEXT: ldr x10, [x1]
-; GISEL-NEXT: ldp x11, x13, [x1, #24]
-; GISEL-NEXT: ldr x17, [x1, #56]
-; GISEL-NEXT: lsl x15, x9, #1
-; GISEL-NEXT: lsl x12, x8, #1
-; GISEL-NEXT: lsl x16, x11, #1
-; GISEL-NEXT: orr x8, x15, x8, lsr #63
-; GISEL-NEXT: lsl x15, x13, #1
-; GISEL-NEXT: orr x10, x12, x10, lsr #63
-; GISEL-NEXT: ldp x14, x12, [x1, #40]
-; GISEL-NEXT: orr x9, x16, x9, lsr #63
-; GISEL-NEXT: orr x11, x15, x11, lsr #63
-; GISEL-NEXT: stp x10, x8, [x0]
-; GISEL-NEXT: lsl x8, x17, #1
-; GISEL-NEXT: lsl x16, x14, #1
-; GISEL-NEXT: lsl x10, x12, #1
-; GISEL-NEXT: stp x9, x11, [x0, #16]
-; GISEL-NEXT: asr x9, x17, #63
-; GISEL-NEXT: orr x8, x8, x12, lsr #63
-; GISEL-NEXT: orr x13, x16, x13, lsr #63
-; GISEL-NEXT: orr x10, x10, x14, lsr #63
-; GISEL-NEXT: orr x9, x9, x9, lsl #1
-; GISEL-NEXT: stp x13, x10, [x0, #32]
+; GISEL-NEXT: ldp x8, x9, [x1]
+; GISEL-NEXT: ldp x10, x11, [x1, #16]
+; GISEL-NEXT: ldp x12, x13, [x1, #32]
+; GISEL-NEXT: extr x8, x9, x8, #63
+; GISEL-NEXT: ldp x14, x15, [x1, #48]
+; GISEL-NEXT: extr x9, x10, x9, #63
+; GISEL-NEXT: extr x10, x11, x10, #63
+; GISEL-NEXT: stp x8, x9, [x0]
+; GISEL-NEXT: extr x8, x12, x11, #63
+; GISEL-NEXT: extr x9, x13, x12, #63
+; GISEL-NEXT: extr x11, x14, x13, #63
+; GISEL-NEXT: stp x10, x8, [x0, #16]
+; GISEL-NEXT: asr x10, x15, #63
+; GISEL-NEXT: extr x8, x15, x14, #63
+; GISEL-NEXT: stp x9, x11, [x0, #32]
+; GISEL-NEXT: orr x9, x10, x10, lsl #1
; GISEL-NEXT: stp x8, x9, [x0, #48]
; GISEL-NEXT: ret
entry:
@@ -5906,23 +5796,17 @@ define void @test_shl_i512_const_65(ptr %result, ptr %input) {
; GISEL-NEXT: ldr x15, [x1, #48]
; GISEL-NEXT: ldp x10, x11, [x1, #16]
; GISEL-NEXT: ldp x12, x13, [x1, #32]
-; GISEL-NEXT: lsr x14, x8, #63
-; GISEL-NEXT: lsr x16, x9, #63
-; GISEL-NEXT: lsl x8, x8, #1
-; GISEL-NEXT: orr x9, x14, x9, lsl #1
-; GISEL-NEXT: lsr x14, x10, #63
-; GISEL-NEXT: orr x10, x16, x10, lsl #1
-; GISEL-NEXT: stp xzr, x8, [x0]
-; GISEL-NEXT: lsr x8, x11, #63
-; GISEL-NEXT: orr x11, x14, x11, lsl #1
-; GISEL-NEXT: lsr x14, x12, #63
-; GISEL-NEXT: stp x9, x10, [x0, #16]
-; GISEL-NEXT: lsr x9, x13, #63
-; GISEL-NEXT: orr x8, x8, x12, lsl #1
-; GISEL-NEXT: orr x10, x14, x13, lsl #1
-; GISEL-NEXT: orr x9, x9, x15, lsl #1
-; GISEL-NEXT: stp x11, x8, [x0, #32]
-; GISEL-NEXT: stp x10, x9, [x0, #48]
+; GISEL-NEXT: lsl x14, x8, #1
+; GISEL-NEXT: extr x8, x9, x8, #63
+; GISEL-NEXT: extr x9, x10, x9, #63
+; GISEL-NEXT: extr x10, x11, x10, #63
+; GISEL-NEXT: stp xzr, x14, [x0]
+; GISEL-NEXT: stp x8, x9, [x0, #16]
+; GISEL-NEXT: extr x8, x12, x11, #63
+; GISEL-NEXT: extr x9, x13, x12, #63
+; GISEL-NEXT: stp x10, x8, [x0, #32]
+; GISEL-NEXT: extr x10, x15, x13, #63
+; GISEL-NEXT: stp x9, x10, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -5953,27 +5837,21 @@ define void @test_lshr_i512_const_65(ptr %result, ptr %input) {
;
; GISEL-LABEL: test_lshr_i512_const_65:
; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: ldp x8, x9, [x1, #16]
-; GISEL-NEXT: ldr x10, [x1, #8]
-; GISEL-NEXT: ldp x11, x14, [x1, #32]
-; GISEL-NEXT: ldp x15, x16, [x1, #48]
-; GISEL-NEXT: lsl x12, x8, #63
-; GISEL-NEXT: lsl x13, x9, #63
-; GISEL-NEXT: orr x10, x12, x10, lsr #1
-; GISEL-NEXT: lsl x12, x11, #63
-; GISEL-NEXT: orr x8, x13, x8, lsr #1
-; GISEL-NEXT: lsl x13, x14, #63
-; GISEL-NEXT: orr x9, x12, x9, lsr #1
-; GISEL-NEXT: stp x10, x8, [x0]
-; GISEL-NEXT: lsl x10, x15, #63
-; GISEL-NEXT: orr x11, x13, x11, lsr #1
-; GISEL-NEXT: lsl x12, x16, #63
-; GISEL-NEXT: orr x8, x10, x14, lsr #1
-; GISEL-NEXT: lsr x10, x16, #1
-; GISEL-NEXT: stp x9, x11, [x0, #16]
-; GISEL-NEXT: orr x9, x12, x15, lsr #1
-; GISEL-NEXT: stp x10, xzr, [x0, #48]
-; GISEL-NEXT: stp x8, x9, [x0, #32]
+; GISEL-NEXT: ldp x8, x9, [x1, #8]
+; GISEL-NEXT: ldr x14, [x1, #56]
+; GISEL-NEXT: ldp x10, x11, [x1, #24]
+; GISEL-NEXT: ldp x12, x13, [x1, #40]
+; GISEL-NEXT: extr x8, x9, x8, #1
+; GISEL-NEXT: extr x9, x10, x9, #1
+; GISEL-NEXT: extr x10, x11, x10, #1
+; GISEL-NEXT: stp x8, x9, [x0]
+; GISEL-NEXT: extr x8, x12, x11, #1
+; GISEL-NEXT: extr x9, x13, x12, #1
+; GISEL-NEXT: stp x10, x8, [x0, #16]
+; GISEL-NEXT: extr x10, x14, x13, #1
+; GISEL-NEXT: lsr x8, x14, #1
+; GISEL-NEXT: stp x9, x10, [x0, #32]
+; GISEL-NEXT: stp x8, xzr, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -6005,29 +5883,23 @@ define void @test_ashr_i512_const_65(ptr %result, ptr %input) {
;
; GISEL-LABEL: test_ashr_i512_const_65:
; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: ldp x8, x9, [x1, #16]
-; GISEL-NEXT: ldr x11, [x1, #8]
-; GISEL-NEXT: ldp x10, x13, [x1, #32]
-; GISEL-NEXT: lsl x12, x8, #63
-; GISEL-NEXT: lsl x14, x9, #63
-; GISEL-NEXT: lsl x15, x10, #63
-; GISEL-NEXT: orr x11, x12, x11, lsr #1
-; GISEL-NEXT: ldp x12, x16, [x1, #48]
-; GISEL-NEXT: orr x8, x14, x8, lsr #1
-; GISEL-NEXT: lsl x14, x13, #63
-; GISEL-NEXT: orr x9, x15, x9, lsr #1
-; GISEL-NEXT: asr x15, x16, #63
-; GISEL-NEXT: stp x11, x8, [x0]
-; GISEL-NEXT: lsl x11, x12, #63
-; GISEL-NEXT: orr x10, x14, x10, lsr #1
-; GISEL-NEXT: lsl x14, x16, #63
-; GISEL-NEXT: orr x8, x11, x13, lsr #1
+; GISEL-NEXT: ldp x8, x9, [x1, #8]
+; GISEL-NEXT: ldr x13, [x1, #40]
+; GISEL-NEXT: ldp x10, x11, [x1, #24]
+; GISEL-NEXT: ldp x14, x12, [x1, #48]
+; GISEL-NEXT: extr x8, x9, x8, #1
+; GISEL-NEXT: extr x9, x10, x9, #1
+; GISEL-NEXT: extr x10, x11, x10, #1
+; GISEL-NEXT: asr x15, x12, #63
+; GISEL-NEXT: stp x8, x9, [x0]
+; GISEL-NEXT: extr x8, x13, x11, #1
+; GISEL-NEXT: extr x9, x14, x13, #1
; GISEL-NEXT: lsl x11, x15, #63
-; GISEL-NEXT: stp x9, x10, [x0, #16]
-; GISEL-NEXT: orr x9, x14, x12, lsr #1
-; GISEL-NEXT: orr x10, x11, x16, asr #1
-; GISEL-NEXT: stp x8, x9, [x0, #32]
-; GISEL-NEXT: stp x10, x15, [x0, #48]
+; GISEL-NEXT: stp x10, x8, [x0, #16]
+; GISEL-NEXT: extr x10, x12, x14, #1
+; GISEL-NEXT: orr x8, x11, x12, asr #1
+; GISEL-NEXT: stp x9, x10, [x0, #32]
+; GISEL-NEXT: stp x8, x15, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -6062,23 +5934,17 @@ define void @test_shl_i512_const_100(ptr %result, ptr %input) {
; GISEL-NEXT: ldr x15, [x1, #48]
; GISEL-NEXT: ldp x10, x11, [x1, #16]
; GISEL-NEXT: ldp x12, x13, [x1, #32]
-; GISEL-NEXT: lsr x14, x8, #28
-; GISEL-NEXT: lsr x16, x9, #28
-; GISEL-NEXT: lsl x8, x8, #36
-; GISEL-NEXT: orr x9, x14, x9, lsl #36
-; GISEL-NEXT: lsr x14, x10, #28
-; GISEL-NEXT: orr x10, x16, x10, lsl #36
-; GISEL-NEXT: stp xzr, x8, [x0]
-; GISEL-NEXT: lsr x8, x11, #28
-; GISEL-NEXT: orr x11, x14, x11, lsl #36
-; GISEL-NEXT: lsr x14, x12, #28
-; GISEL-NEXT: stp x9, x10, [x0, #16]
-; GISEL-NEXT: lsr x9, x13, #28
-; GISEL-NEXT: orr x8, x8, x12, lsl #36
-; GISEL-NEXT: orr x10, x14, x13, lsl #36
-; GISEL-NEXT: orr x9, x9, x15, lsl #36
-; GISEL-NEXT: stp x11, x8, [x0, #32]
-; GISEL-NEXT: stp x10, x9, [x0, #48]
+; GISEL-NEXT: lsl x14, x8, #36
+; GISEL-NEXT: extr x8, x9, x8, #28
+; GISEL-NEXT: extr x9, x10, x9, #28
+; GISEL-NEXT: extr x10, x11, x10, #28
+; GISEL-NEXT: stp xzr, x14, [x0]
+; GISEL-NEXT: stp x8, x9, [x0, #16]
+; GISEL-NEXT: extr x8, x12, x11, #28
+; GISEL-NEXT: extr x9, x13, x12, #28
+; GISEL-NEXT: stp x10, x8, [x0, #32]
+; GISEL-NEXT: extr x10, x15, x13, #28
+; GISEL-NEXT: stp x9, x10, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -6109,27 +5975,21 @@ define void @test_lshr_i512_const_100(ptr %result, ptr %input) {
;
; GISEL-LABEL: test_lshr_i512_const_100:
; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: ldp x8, x9, [x1, #16]
-; GISEL-NEXT: ldr x10, [x1, #8]
-; GISEL-NEXT: ldp x11, x14, [x1, #32]
-; GISEL-NEXT: ldp x15, x16, [x1, #48]
-; GISEL-NEXT: lsl x12, x8, #28
-; GISEL-NEXT: lsl x13, x9, #28
-; GISEL-NEXT: orr x10, x12, x10, lsr #36
-; GISEL-NEXT: lsl x12, x11, #28
-; GISEL-NEXT: orr x8, x13, x8, lsr #36
-; GISEL-NEXT: lsl x13, x14, #28
-; GISEL-NEXT: orr x9, x12, x9, lsr #36
-; GISEL-NEXT: stp x10, x8, [x0]
-; GISEL-NEXT: lsl x10, x15, #28
-; GISEL-NEXT: orr x11, x13, x11, lsr #36
-; GISEL-NEXT: lsl x12, x16, #28
-; GISEL-NEXT: orr x8, x10, x14, lsr #36
-; GISEL-NEXT: lsr x10, x16, #36
-; GISEL-NEXT: stp x9, x11, [x0, #16]
-; GISEL-NEXT: orr x9, x12, x15, lsr #36
-; GISEL-NEXT: stp x10, xzr, [x0, #48]
-; GISEL-NEXT: stp x8, x9, [x0, #32]
+; GISEL-NEXT: ldp x8, x9, [x1, #8]
+; GISEL-NEXT: ldr x14, [x1, #56]
+; GISEL-NEXT: ldp x10, x11, [x1, #24]
+; GISEL-NEXT: ldp x12, x13, [x1, #40]
+; GISEL-NEXT: extr x8, x9, x8, #36
+; GISEL-NEXT: extr x9, x10, x9, #36
+; GISEL-NEXT: extr x10, x11, x10, #36
+; GISEL-NEXT: stp x8, x9, [x0]
+; GISEL-NEXT: extr x8, x12, x11, #36
+; GISEL-NEXT: extr x9, x13, x12, #36
+; GISEL-NEXT: stp x10, x8, [x0, #16]
+; GISEL-NEXT: extr x10, x14, x13, #36
+; GISEL-NEXT: lsr x8, x14, #36
+; GISEL-NEXT: stp x9, x10, [x0, #32]
+; GISEL-NEXT: stp x8, xzr, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -6161,29 +6021,23 @@ define void @test_ashr_i512_const_100(ptr %result, ptr %input) {
;
; GISEL-LABEL: test_ashr_i512_const_100:
; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: ldp x8, x9, [x1, #16]
-; GISEL-NEXT: ldr x11, [x1, #8]
-; GISEL-NEXT: ldp x10, x13, [x1, #32]
-; GISEL-NEXT: lsl x12, x8, #28
-; GISEL-NEXT: lsl x14, x9, #28
-; GISEL-NEXT: lsl x15, x10, #28
-; GISEL-NEXT: orr x11, x12, x11, lsr #36
-; GISEL-NEXT: ldp x12, x16, [x1, #48]
-; GISEL-NEXT: orr x8, x14, x8, lsr #36
-; GISEL-NEXT: lsl x14, x13, #28
-; GISEL-NEXT: orr x9, x15, x9, lsr #36
-; GISEL-NEXT: asr x15, x16, #63
-; GISEL-NEXT: stp x11, x8, [x0]
-; GISEL-NEXT: lsl x11, x12, #28
-; GISEL-NEXT: orr x10, x14, x10, lsr #36
-; GISEL-NEXT: lsl x14, x16, #28
-; GISEL-NEXT: orr x8, x11, x13, lsr #36
+; GISEL-NEXT: ldp x8, x9, [x1, #8]
+; GISEL-NEXT: ldr x13, [x1, #40]
+; GISEL-NEXT: ldp x10, x11, [x1, #24]
+; GISEL-NEXT: ldp x14, x12, [x1, #48]
+; GISEL-NEXT: extr x8, x9, x8, #36
+; GISEL-NEXT: extr x9, x10, x9, #36
+; GISEL-NEXT: extr x10, x11, x10, #36
+; GISEL-NEXT: asr x15, x12, #63
+; GISEL-NEXT: stp x8, x9, [x0]
+; GISEL-NEXT: extr x8, x13, x11, #36
+; GISEL-NEXT: extr x9, x14, x13, #36
; GISEL-NEXT: lsl x11, x15, #28
-; GISEL-NEXT: stp x9, x10, [x0, #16]
-; GISEL-NEXT: orr x9, x14, x12, lsr #36
-; GISEL-NEXT: orr x10, x11, x16, asr #36
-; GISEL-NEXT: stp x8, x9, [x0, #32]
-; GISEL-NEXT: stp x10, x15, [x0, #48]
+; GISEL-NEXT: stp x10, x8, [x0, #16]
+; GISEL-NEXT: extr x10, x12, x14, #36
+; GISEL-NEXT: orr x8, x11, x12, asr #36
+; GISEL-NEXT: stp x9, x10, [x0, #32]
+; GISEL-NEXT: stp x8, x15, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -6219,23 +6073,17 @@ define void @test_shl_i512_const_127(ptr %result, ptr %input) {
; GISEL-NEXT: ldr x15, [x1, #48]
; GISEL-NEXT: ldp x10, x11, [x1, #16]
; GISEL-NEXT: ldp x12, x13, [x1, #32]
-; GISEL-NEXT: lsr x14, x8, #1
-; GISEL-NEXT: lsr x16, x9, #1
-; GISEL-NEXT: lsl x8, x8, #63
-; GISEL-NEXT: orr x9, x14, x9, lsl #63
-; GISEL-NEXT: lsr x14, x10, #1
-; GISEL-NEXT: orr x10, x16, x10, lsl #63
-; GISEL-NEXT: stp xzr, x8, [x0]
-; GISEL-NEXT: lsr x8, x11, #1
-; GISEL-NEXT: orr x11, x14, x11, lsl #63
-; GISEL-NEXT: lsr x14, x12, #1
-; GISEL-NEXT: stp x9, x10, [x0, #16]
-; GISEL-NEXT: lsr x9, x13, #1
-; GISEL-NEXT: orr x8, x8, x12, lsl #63
-; GISEL-NEXT: orr x10, x14, x13, lsl #63
-; GISEL-NEXT: orr x9, x9, x15, lsl #63
-; GISEL-NEXT: stp x11, x8, [x0, #32]
-; GISEL-NEXT: stp x10, x9, [x0, #48]
+; GISEL-NEXT: lsl x14, x8, #63
+; GISEL-NEXT: extr x8, x9, x8, #1
+; GISEL-NEXT: extr x9, x10, x9, #1
+; GISEL-NEXT: extr x10, x11, x10, #1
+; GISEL-NEXT: stp xzr, x14, [x0]
+; GISEL-NEXT: stp x8, x9, [x0, #16]
+; GISEL-NEXT: extr x8, x12, x11, #1
+; GISEL-NEXT: extr x9, x13, x12, #1
+; GISEL-NEXT: stp x10, x8, [x0, #32]
+; GISEL-NEXT: extr x10, x15, x13, #1
+; GISEL-NEXT: stp x9, x10, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -6266,27 +6114,21 @@ define void @test_lshr_i512_const_127(ptr %result, ptr %input) {
;
; GISEL-LABEL: test_lshr_i512_const_127:
; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: ldp x8, x9, [x1, #16]
-; GISEL-NEXT: ldr x10, [x1, #8]
-; GISEL-NEXT: ldp x11, x14, [x1, #32]
-; GISEL-NEXT: ldp x15, x16, [x1, #48]
-; GISEL-NEXT: lsl x12, x8, #1
-; GISEL-NEXT: lsl x13, x9, #1
-; GISEL-NEXT: orr x10, x12, x10, lsr #63
-; GISEL-NEXT: lsl x12, x11, #1
-; GISEL-NEXT: orr x8, x13, x8, lsr #63
-; GISEL-NEXT: lsl x13, x14, #1
-; GISEL-NEXT: orr x9, x12, x9, lsr #63
-; GISEL-NEXT: stp x10, x8, [x0]
-; GISEL-NEXT: lsl x10, x15, #1
-; GISEL-NEXT: orr x11, x13, x11, lsr #63
-; GISEL-NEXT: lsl x12, x16, #1
-; GISEL-NEXT: orr x8, x10, x14, lsr #63
-; GISEL-NEXT: lsr x10, x16, #63
-; GISEL-NEXT: stp x9, x11, [x0, #16]
-; GISEL-NEXT: orr x9, x12, x15, lsr #63
-; GISEL-NEXT: stp x10, xzr, [x0, #48]
-; GISEL-NEXT: stp x8, x9, [x0, #32]
+; GISEL-NEXT: ldp x8, x9, [x1, #8]
+; GISEL-NEXT: ldr x14, [x1, #56]
+; GISEL-NEXT: ldp x10, x11, [x1, #24]
+; GISEL-NEXT: ldp x12, x13, [x1, #40]
+; GISEL-NEXT: extr x8, x9, x8, #63
+; GISEL-NEXT: extr x9, x10, x9, #63
+; GISEL-NEXT: extr x10, x11, x10, #63
+; GISEL-NEXT: stp x8, x9, [x0]
+; GISEL-NEXT: extr x8, x12, x11, #63
+; GISEL-NEXT: extr x9, x13, x12, #63
+; GISEL-NEXT: stp x10, x8, [x0, #16]
+; GISEL-NEXT: extr x10, x14, x13, #63
+; GISEL-NEXT: lsr x8, x14, #63
+; GISEL-NEXT: stp x9, x10, [x0, #32]
+; GISEL-NEXT: stp x8, xzr, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
@@ -6317,28 +6159,22 @@ define void @test_ashr_i512_const_127(ptr %result, ptr %input) {
;
; GISEL-LABEL: test_ashr_i512_const_127:
; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: ldp x8, x9, [x1, #16]
-; GISEL-NEXT: ldr x10, [x1, #8]
-; GISEL-NEXT: ldp x11, x14, [x1, #32]
-; GISEL-NEXT: ldp x15, x16, [x1, #48]
-; GISEL-NEXT: lsl x12, x8, #1
-; GISEL-NEXT: lsl x13, x9, #1
-; GISEL-NEXT: orr x10, x12, x10, lsr #63
-; GISEL-NEXT: lsl x12, x11, #1
-; GISEL-NEXT: orr x8, x13, x8, lsr #63
-; GISEL-NEXT: lsl x13, x14, #1
-; GISEL-NEXT: orr x9, x12, x9, lsr #63
-; GISEL-NEXT: lsl x12, x15, #1
-; GISEL-NEXT: stp x10, x8, [x0]
-; GISEL-NEXT: lsl x10, x16, #1
-; GISEL-NEXT: orr x11, x13, x11, lsr #63
-; GISEL-NEXT: asr x8, x16, #63
-; GISEL-NEXT: orr x12, x12, x14, lsr #63
-; GISEL-NEXT: stp x9, x11, [x0, #16]
-; GISEL-NEXT: orr x9, x10, x15, lsr #63
-; GISEL-NEXT: orr x10, x8, x8, lsl #1
-; GISEL-NEXT: stp x12, x9, [x0, #32]
-; GISEL-NEXT: stp x10, x8, [x0, #48]
+; GISEL-NEXT: ldp x8, x9, [x1, #8]
+; GISEL-NEXT: ldr x14, [x1, #56]
+; GISEL-NEXT: ldp x10, x11, [x1, #24]
+; GISEL-NEXT: ldp x12, x13, [x1, #40]
+; GISEL-NEXT: extr x8, x9, x8, #63
+; GISEL-NEXT: extr x9, x10, x9, #63
+; GISEL-NEXT: extr x10, x11, x10, #63
+; GISEL-NEXT: stp x8, x9, [x0]
+; GISEL-NEXT: extr x8, x12, x11, #63
+; GISEL-NEXT: asr x9, x14, #63
+; GISEL-NEXT: extr x11, x13, x12, #63
+; GISEL-NEXT: stp x10, x8, [x0, #16]
+; GISEL-NEXT: extr x10, x14, x13, #63
+; GISEL-NEXT: orr x8, x9, x9, lsl #1
+; GISEL-NEXT: stp x11, x10, [x0, #32]
+; GISEL-NEXT: stp x8, x9, [x0, #48]
; GISEL-NEXT: ret
entry:
%input_val = load i512, ptr %input, align 64
diff --git a/llvm/test/CodeGen/AArch64/adc.ll b/llvm/test/CodeGen/AArch64/adc.ll
index 12e8bf2..03f3cf1 100644
--- a/llvm/test/CodeGen/AArch64/adc.ll
+++ b/llvm/test/CodeGen/AArch64/adc.ll
@@ -71,9 +71,8 @@ define i128 @test_shifted(i128 %a, i128 %b) {
;
; CHECK-GI-LABEL: test_shifted:
; CHECK-GI: ; %bb.0:
-; CHECK-GI-NEXT: lsr x8, x2, #19
+; CHECK-GI-NEXT: extr x8, x3, x2, #19
; CHECK-GI-NEXT: adds x0, x0, x2, lsl #45
-; CHECK-GI-NEXT: orr x8, x8, x3, lsl #45
; CHECK-GI-NEXT: adc x1, x1, x8
; CHECK-GI-NEXT: ret
%rhs = shl i128 %b, 45
@@ -108,8 +107,7 @@ define i128 @test_extended(i128 %a, i16 %b) {
; CHECK-GI-NEXT: sxth x8, w2
; CHECK-GI-NEXT: adds x0, x0, w2, sxth #3
; CHECK-GI-NEXT: asr x9, x8, #63
-; CHECK-GI-NEXT: lsr x8, x8, #61
-; CHECK-GI-NEXT: orr x8, x8, x9, lsl #3
+; CHECK-GI-NEXT: extr x8, x9, x8, #61
; CHECK-GI-NEXT: adc x1, x1, x8
; CHECK-GI-NEXT: ret
%ext = sext i16 %b to i128
diff --git a/llvm/test/CodeGen/AArch64/fsh.ll b/llvm/test/CodeGen/AArch64/fsh.ll
index 765f6b7..7f07ef4 100644
--- a/llvm/test/CodeGen/AArch64/fsh.ll
+++ b/llvm/test/CodeGen/AArch64/fsh.ll
@@ -510,41 +510,40 @@ define i128 @fshl_i128(i128 %a, i128 %b, i128 %c) {
;
; CHECK-GI-LABEL: fshl_i128:
; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov w8, #64 // =0x40
; CHECK-GI-NEXT: and x9, x4, #0x7f
-; CHECK-GI-NEXT: mov w10, #64 // =0x40
-; CHECK-GI-NEXT: lsl x14, x3, #63
-; CHECK-GI-NEXT: sub x12, x10, x9
+; CHECK-GI-NEXT: mov w10, #127 // =0x7f
+; CHECK-GI-NEXT: sub x12, x8, x9
; CHECK-GI-NEXT: lsl x13, x1, x9
-; CHECK-GI-NEXT: mov w8, #127 // =0x7f
+; CHECK-GI-NEXT: bic x10, x10, x4
; CHECK-GI-NEXT: lsr x12, x0, x12
-; CHECK-GI-NEXT: bic x8, x8, x4
-; CHECK-GI-NEXT: sub x15, x9, #64
+; CHECK-GI-NEXT: sub x14, x9, #64
+; CHECK-GI-NEXT: lsl x15, x0, x9
+; CHECK-GI-NEXT: extr x16, x3, x2, #1
; CHECK-GI-NEXT: cmp x9, #64
-; CHECK-GI-NEXT: lsl x9, x0, x9
-; CHECK-GI-NEXT: lsl x15, x0, x15
-; CHECK-GI-NEXT: orr x12, x12, x13
-; CHECK-GI-NEXT: orr x13, x14, x2, lsr #1
-; CHECK-GI-NEXT: lsr x14, x3, #1
-; CHECK-GI-NEXT: sub x10, x10, x8
-; CHECK-GI-NEXT: sub x16, x8, #64
-; CHECK-GI-NEXT: csel x9, x9, xzr, lo
-; CHECK-GI-NEXT: lsr x17, x13, x8
-; CHECK-GI-NEXT: lsl x10, x14, x10
-; CHECK-GI-NEXT: csel x12, x12, x15, lo
+; CHECK-GI-NEXT: sub x8, x8, x10
+; CHECK-GI-NEXT: orr x9, x12, x13
+; CHECK-GI-NEXT: lsr x12, x3, #1
+; CHECK-GI-NEXT: lsl x13, x0, x14
+; CHECK-GI-NEXT: csel x14, x15, xzr, lo
+; CHECK-GI-NEXT: sub x15, x10, #64
+; CHECK-GI-NEXT: lsr x17, x16, x10
+; CHECK-GI-NEXT: lsl x8, x12, x8
+; CHECK-GI-NEXT: csel x9, x9, x13, lo
; CHECK-GI-NEXT: tst x4, #0x7f
-; CHECK-GI-NEXT: lsr x15, x14, x16
+; CHECK-GI-NEXT: lsr x13, x12, x15
; CHECK-GI-NEXT: mvn x11, x4
-; CHECK-GI-NEXT: csel x12, x1, x12, eq
-; CHECK-GI-NEXT: orr x10, x17, x10
-; CHECK-GI-NEXT: cmp x8, #64
-; CHECK-GI-NEXT: lsr x14, x14, x8
-; CHECK-GI-NEXT: csel x10, x10, x15, lo
+; CHECK-GI-NEXT: csel x9, x1, x9, eq
+; CHECK-GI-NEXT: orr x8, x17, x8
+; CHECK-GI-NEXT: cmp x10, #64
+; CHECK-GI-NEXT: lsr x12, x12, x10
+; CHECK-GI-NEXT: csel x8, x8, x13, lo
; CHECK-GI-NEXT: tst x11, #0x7f
-; CHECK-GI-NEXT: csel x10, x13, x10, eq
-; CHECK-GI-NEXT: cmp x8, #64
-; CHECK-GI-NEXT: csel x8, x14, xzr, lo
-; CHECK-GI-NEXT: orr x0, x9, x10
-; CHECK-GI-NEXT: orr x1, x12, x8
+; CHECK-GI-NEXT: csel x8, x16, x8, eq
+; CHECK-GI-NEXT: cmp x10, #64
+; CHECK-GI-NEXT: csel x10, x12, xzr, lo
+; CHECK-GI-NEXT: orr x0, x14, x8
+; CHECK-GI-NEXT: orr x1, x9, x10
; CHECK-GI-NEXT: ret
entry:
%d = call i128 @llvm.fshl(i128 %a, i128 %b, i128 %c)
@@ -571,41 +570,40 @@ define i128 @fshr_i128(i128 %a, i128 %b, i128 %c) {
;
; CHECK-GI-LABEL: fshr_i128:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: lsr x8, x0, #63
-; CHECK-GI-NEXT: mov w9, #127 // =0x7f
-; CHECK-GI-NEXT: mov w10, #64 // =0x40
-; CHECK-GI-NEXT: bic x9, x9, x4
-; CHECK-GI-NEXT: lsl x11, x0, #1
-; CHECK-GI-NEXT: and x12, x4, #0x7f
-; CHECK-GI-NEXT: orr x8, x8, x1, lsl #1
-; CHECK-GI-NEXT: sub x14, x10, x9
-; CHECK-GI-NEXT: sub x17, x9, #64
-; CHECK-GI-NEXT: lsl x15, x11, x9
-; CHECK-GI-NEXT: lsr x14, x11, x14
-; CHECK-GI-NEXT: cmp x9, #64
-; CHECK-GI-NEXT: lsl x16, x8, x9
-; CHECK-GI-NEXT: sub x9, x10, x12
-; CHECK-GI-NEXT: lsl x10, x11, x17
-; CHECK-GI-NEXT: mvn x13, x4
-; CHECK-GI-NEXT: csel x11, x15, xzr, lo
-; CHECK-GI-NEXT: sub x15, x12, #64
-; CHECK-GI-NEXT: orr x14, x14, x16
-; CHECK-GI-NEXT: lsr x16, x2, x12
-; CHECK-GI-NEXT: lsl x9, x3, x9
-; CHECK-GI-NEXT: csel x10, x14, x10, lo
-; CHECK-GI-NEXT: tst x13, #0x7f
-; CHECK-GI-NEXT: lsr x13, x3, x15
-; CHECK-GI-NEXT: csel x8, x8, x10, eq
-; CHECK-GI-NEXT: orr x9, x16, x9
-; CHECK-GI-NEXT: cmp x12, #64
-; CHECK-GI-NEXT: lsr x10, x3, x12
-; CHECK-GI-NEXT: csel x9, x9, x13, lo
+; CHECK-GI-NEXT: mov w8, #127 // =0x7f
+; CHECK-GI-NEXT: lsl x9, x0, #1
+; CHECK-GI-NEXT: extr x10, x1, x0, #63
+; CHECK-GI-NEXT: bic x8, x8, x4
+; CHECK-GI-NEXT: mov w11, #64 // =0x40
+; CHECK-GI-NEXT: and x14, x4, #0x7f
+; CHECK-GI-NEXT: sub x12, x11, x8
+; CHECK-GI-NEXT: lsl x13, x10, x8
+; CHECK-GI-NEXT: lsl x16, x9, x8
+; CHECK-GI-NEXT: lsr x12, x9, x12
+; CHECK-GI-NEXT: sub x17, x8, #64
+; CHECK-GI-NEXT: cmp x8, #64
+; CHECK-GI-NEXT: lsl x8, x9, x17
+; CHECK-GI-NEXT: sub x11, x11, x14
+; CHECK-GI-NEXT: mvn x15, x4
+; CHECK-GI-NEXT: orr x12, x12, x13
+; CHECK-GI-NEXT: csel x9, x16, xzr, lo
+; CHECK-GI-NEXT: sub x13, x14, #64
+; CHECK-GI-NEXT: lsr x16, x2, x14
+; CHECK-GI-NEXT: lsl x11, x3, x11
+; CHECK-GI-NEXT: csel x8, x12, x8, lo
+; CHECK-GI-NEXT: tst x15, #0x7f
+; CHECK-GI-NEXT: lsr x12, x3, x13
+; CHECK-GI-NEXT: csel x8, x10, x8, eq
+; CHECK-GI-NEXT: orr x10, x16, x11
+; CHECK-GI-NEXT: cmp x14, #64
+; CHECK-GI-NEXT: lsr x11, x3, x14
+; CHECK-GI-NEXT: csel x10, x10, x12, lo
; CHECK-GI-NEXT: tst x4, #0x7f
-; CHECK-GI-NEXT: csel x9, x2, x9, eq
-; CHECK-GI-NEXT: cmp x12, #64
-; CHECK-GI-NEXT: csel x10, x10, xzr, lo
-; CHECK-GI-NEXT: orr x0, x11, x9
-; CHECK-GI-NEXT: orr x1, x8, x10
+; CHECK-GI-NEXT: csel x10, x2, x10, eq
+; CHECK-GI-NEXT: cmp x14, #64
+; CHECK-GI-NEXT: csel x11, x11, xzr, lo
+; CHECK-GI-NEXT: orr x0, x9, x10
+; CHECK-GI-NEXT: orr x1, x8, x11
; CHECK-GI-NEXT: ret
entry:
%d = call i128 @llvm.fshr(i128 %a, i128 %b, i128 %c)
@@ -720,10 +718,9 @@ define i128 @rotl_i128_c(i128 %a) {
;
; CHECK-GI-LABEL: rotl_i128_c:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: lsr x8, x0, #61
-; CHECK-GI-NEXT: lsr x9, x1, #61
-; CHECK-GI-NEXT: orr x1, x8, x1, lsl #3
-; CHECK-GI-NEXT: orr x0, x9, x0, lsl #3
+; CHECK-GI-NEXT: extr x8, x1, x0, #61
+; CHECK-GI-NEXT: extr x0, x0, x1, #61
+; CHECK-GI-NEXT: mov x1, x8
; CHECK-GI-NEXT: ret
entry:
%d = call i128 @llvm.fshl(i128 %a, i128 %a, i128 3)
@@ -731,20 +728,12 @@ entry:
}
define i128 @rotr_i128_c(i128 %a) {
-; CHECK-SD-LABEL: rotr_i128_c:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: extr x8, x1, x0, #3
-; CHECK-SD-NEXT: extr x1, x0, x1, #3
-; CHECK-SD-NEXT: mov x0, x8
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: rotr_i128_c:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: lsl x8, x1, #61
-; CHECK-GI-NEXT: lsl x9, x0, #61
-; CHECK-GI-NEXT: orr x0, x8, x0, lsr #3
-; CHECK-GI-NEXT: orr x1, x9, x1, lsr #3
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: rotr_i128_c:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: extr x8, x1, x0, #3
+; CHECK-NEXT: extr x1, x0, x1, #3
+; CHECK-NEXT: mov x0, x8
+; CHECK-NEXT: ret
entry:
%d = call i128 @llvm.fshr(i128 %a, i128 %a, i128 3)
ret i128 %d
@@ -868,10 +857,8 @@ define i128 @fshl_i128_c(i128 %a, i128 %b) {
;
; CHECK-GI-LABEL: fshl_i128_c:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: lsr x8, x0, #61
-; CHECK-GI-NEXT: lsr x9, x3, #61
-; CHECK-GI-NEXT: orr x1, x8, x1, lsl #3
-; CHECK-GI-NEXT: orr x0, x9, x0, lsl #3
+; CHECK-GI-NEXT: extr x1, x1, x0, #61
+; CHECK-GI-NEXT: extr x0, x0, x3, #61
; CHECK-GI-NEXT: ret
entry:
%d = call i128 @llvm.fshl(i128 %a, i128 %b, i128 3)
@@ -879,21 +866,12 @@ entry:
}
define i128 @fshr_i128_c(i128 %a, i128 %b) {
-; CHECK-SD-LABEL: fshr_i128_c:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: extr x8, x3, x2, #3
-; CHECK-SD-NEXT: extr x1, x0, x3, #3
-; CHECK-SD-NEXT: mov x0, x8
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: fshr_i128_c:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: lsl x8, x3, #61
-; CHECK-GI-NEXT: lsr x9, x3, #3
-; CHECK-GI-NEXT: orr x8, x8, x2, lsr #3
-; CHECK-GI-NEXT: orr x1, x9, x0, lsl #61
-; CHECK-GI-NEXT: mov x0, x8
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: fshr_i128_c:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: extr x8, x3, x2, #3
+; CHECK-NEXT: extr x1, x0, x3, #3
+; CHECK-NEXT: mov x0, x8
+; CHECK-NEXT: ret
entry:
%d = call i128 @llvm.fshr(i128 %a, i128 %b, i128 3)
ret i128 %d
@@ -3013,75 +2991,73 @@ define <2 x i128> @fshl_v2i128(<2 x i128> %a, <2 x i128> %b, <2 x i128> %c) {
; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
; CHECK-GI-NEXT: .cfi_offset w19, -16
; CHECK-GI-NEXT: ldr x11, [sp, #16]
-; CHECK-GI-NEXT: mov w10, #64 // =0x40
+; CHECK-GI-NEXT: mov w9, #64 // =0x40
; CHECK-GI-NEXT: ldr x12, [sp, #32]
; CHECK-GI-NEXT: mov w13, #127 // =0x7f
-; CHECK-GI-NEXT: and x9, x11, #0x7f
+; CHECK-GI-NEXT: and x8, x11, #0x7f
; CHECK-GI-NEXT: and x14, x12, #0x7f
-; CHECK-GI-NEXT: mvn x15, x11
-; CHECK-GI-NEXT: sub x8, x10, x9
-; CHECK-GI-NEXT: sub x16, x9, #64
-; CHECK-GI-NEXT: lsl x19, x1, x9
-; CHECK-GI-NEXT: lsr x18, x0, x8
-; CHECK-GI-NEXT: lsl x17, x0, x9
-; CHECK-GI-NEXT: lsl x16, x0, x16
-; CHECK-GI-NEXT: cmp x9, #64
-; CHECK-GI-NEXT: bic x0, x13, x11
-; CHECK-GI-NEXT: mvn x8, x12
-; CHECK-GI-NEXT: orr x18, x18, x19
-; CHECK-GI-NEXT: csel x9, x17, xzr, lo
+; CHECK-GI-NEXT: mvn x18, x11
+; CHECK-GI-NEXT: sub x10, x9, x8
+; CHECK-GI-NEXT: sub x15, x8, #64
+; CHECK-GI-NEXT: lsl x17, x1, x8
+; CHECK-GI-NEXT: lsr x16, x0, x10
+; CHECK-GI-NEXT: lsl x15, x0, x15
+; CHECK-GI-NEXT: cmp x8, #64
+; CHECK-GI-NEXT: lsl x19, x0, x8
+; CHECK-GI-NEXT: lsl x0, x3, x14
+; CHECK-GI-NEXT: mvn x10, x12
+; CHECK-GI-NEXT: orr x16, x16, x17
; CHECK-GI-NEXT: sub x17, x14, #64
-; CHECK-GI-NEXT: csel x16, x18, x16, lo
+; CHECK-GI-NEXT: csel x15, x16, x15, lo
+; CHECK-GI-NEXT: sub x16, x9, x14
+; CHECK-GI-NEXT: csel x8, x19, xzr, lo
+; CHECK-GI-NEXT: lsr x16, x2, x16
; CHECK-GI-NEXT: tst x11, #0x7f
-; CHECK-GI-NEXT: sub x11, x10, x14
-; CHECK-GI-NEXT: lsr x11, x2, x11
-; CHECK-GI-NEXT: lsl x18, x3, x14
-; CHECK-GI-NEXT: csel x16, x1, x16, eq
-; CHECK-GI-NEXT: lsl x1, x2, x14
+; CHECK-GI-NEXT: lsl x19, x2, x14
; CHECK-GI-NEXT: lsl x17, x2, x17
+; CHECK-GI-NEXT: csel x15, x1, x15, eq
; CHECK-GI-NEXT: cmp x14, #64
-; CHECK-GI-NEXT: lsl x14, x5, #63
-; CHECK-GI-NEXT: orr x11, x11, x18
-; CHECK-GI-NEXT: bic x13, x13, x12
-; CHECK-GI-NEXT: csel x18, x1, xzr, lo
-; CHECK-GI-NEXT: csel x11, x11, x17, lo
+; CHECK-GI-NEXT: orr x16, x16, x0
+; CHECK-GI-NEXT: bic x11, x13, x11
+; CHECK-GI-NEXT: csel x14, x19, xzr, lo
+; CHECK-GI-NEXT: csel x16, x16, x17, lo
; CHECK-GI-NEXT: tst x12, #0x7f
-; CHECK-GI-NEXT: lsr x12, x5, #1
-; CHECK-GI-NEXT: orr x14, x14, x4, lsr #1
-; CHECK-GI-NEXT: lsl x17, x7, #63
-; CHECK-GI-NEXT: sub x1, x10, x0
-; CHECK-GI-NEXT: csel x11, x3, x11, eq
-; CHECK-GI-NEXT: sub x2, x0, #64
-; CHECK-GI-NEXT: lsr x3, x14, x0
-; CHECK-GI-NEXT: lsl x1, x12, x1
-; CHECK-GI-NEXT: lsr x4, x7, #1
-; CHECK-GI-NEXT: orr x17, x17, x6, lsr #1
-; CHECK-GI-NEXT: lsr x2, x12, x2
-; CHECK-GI-NEXT: cmp x0, #64
-; CHECK-GI-NEXT: orr x1, x3, x1
-; CHECK-GI-NEXT: sub x10, x10, x13
-; CHECK-GI-NEXT: lsr x12, x12, x0
-; CHECK-GI-NEXT: csel x1, x1, x2, lo
-; CHECK-GI-NEXT: tst x15, #0x7f
-; CHECK-GI-NEXT: sub x15, x13, #64
-; CHECK-GI-NEXT: lsr x2, x17, x13
-; CHECK-GI-NEXT: lsl x10, x4, x10
-; CHECK-GI-NEXT: csel x14, x14, x1, eq
-; CHECK-GI-NEXT: cmp x0, #64
-; CHECK-GI-NEXT: lsr x15, x4, x15
-; CHECK-GI-NEXT: lsr x0, x4, x13
-; CHECK-GI-NEXT: csel x12, x12, xzr, lo
-; CHECK-GI-NEXT: orr x10, x2, x10
-; CHECK-GI-NEXT: cmp x13, #64
-; CHECK-GI-NEXT: csel x10, x10, x15, lo
-; CHECK-GI-NEXT: tst x8, #0x7f
-; CHECK-GI-NEXT: orr x1, x16, x12
-; CHECK-GI-NEXT: csel x8, x17, x10, eq
-; CHECK-GI-NEXT: cmp x13, #64
-; CHECK-GI-NEXT: csel x10, x0, xzr, lo
-; CHECK-GI-NEXT: orr x0, x9, x14
-; CHECK-GI-NEXT: orr x2, x18, x8
-; CHECK-GI-NEXT: orr x3, x11, x10
+; CHECK-GI-NEXT: lsr x17, x5, #1
+; CHECK-GI-NEXT: extr x0, x5, x4, #1
+; CHECK-GI-NEXT: bic x12, x13, x12
+; CHECK-GI-NEXT: csel x13, x3, x16, eq
+; CHECK-GI-NEXT: sub x16, x9, x11
+; CHECK-GI-NEXT: sub x1, x11, #64
+; CHECK-GI-NEXT: lsr x3, x7, #1
+; CHECK-GI-NEXT: lsr x2, x0, x11
+; CHECK-GI-NEXT: lsl x16, x17, x16
+; CHECK-GI-NEXT: extr x4, x7, x6, #1
+; CHECK-GI-NEXT: lsr x1, x17, x1
+; CHECK-GI-NEXT: cmp x11, #64
+; CHECK-GI-NEXT: sub x9, x9, x12
+; CHECK-GI-NEXT: orr x16, x2, x16
+; CHECK-GI-NEXT: lsr x17, x17, x11
+; CHECK-GI-NEXT: lsl x9, x3, x9
+; CHECK-GI-NEXT: csel x16, x16, x1, lo
+; CHECK-GI-NEXT: tst x18, #0x7f
+; CHECK-GI-NEXT: sub x18, x12, #64
+; CHECK-GI-NEXT: lsr x1, x4, x12
+; CHECK-GI-NEXT: csel x16, x0, x16, eq
+; CHECK-GI-NEXT: cmp x11, #64
+; CHECK-GI-NEXT: lsr x11, x3, x18
+; CHECK-GI-NEXT: csel x17, x17, xzr, lo
+; CHECK-GI-NEXT: cmp x12, #64
+; CHECK-GI-NEXT: orr x9, x1, x9
+; CHECK-GI-NEXT: lsr x18, x3, x12
+; CHECK-GI-NEXT: orr x0, x8, x16
+; CHECK-GI-NEXT: csel x9, x9, x11, lo
+; CHECK-GI-NEXT: tst x10, #0x7f
+; CHECK-GI-NEXT: orr x1, x15, x17
+; CHECK-GI-NEXT: csel x9, x4, x9, eq
+; CHECK-GI-NEXT: cmp x12, #64
+; CHECK-GI-NEXT: csel x10, x18, xzr, lo
+; CHECK-GI-NEXT: orr x2, x14, x9
+; CHECK-GI-NEXT: orr x3, x13, x10
; CHECK-GI-NEXT: ldr x19, [sp], #16 // 8-byte Folded Reload
; CHECK-GI-NEXT: ret
entry:
@@ -3125,75 +3101,73 @@ define <2 x i128> @fshr_v2i128(<2 x i128> %a, <2 x i128> %b, <2 x i128> %c) {
; CHECK-GI-LABEL: fshr_v2i128:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: ldr x9, [sp]
-; CHECK-GI-NEXT: lsl x12, x1, #1
-; CHECK-GI-NEXT: mov w11, #127 // =0x7f
-; CHECK-GI-NEXT: mov w14, #64 // =0x40
-; CHECK-GI-NEXT: lsl x15, x0, #1
+; CHECK-GI-NEXT: mov w10, #127 // =0x7f
+; CHECK-GI-NEXT: mov w12, #64 // =0x40
+; CHECK-GI-NEXT: lsl x13, x0, #1
+; CHECK-GI-NEXT: extr x14, x1, x0, #63
; CHECK-GI-NEXT: ldr x8, [sp, #16]
-; CHECK-GI-NEXT: bic x13, x11, x9
-; CHECK-GI-NEXT: orr x12, x12, x0, lsr #63
-; CHECK-GI-NEXT: lsl x1, x3, #1
-; CHECK-GI-NEXT: sub x17, x14, x13
-; CHECK-GI-NEXT: sub x18, x13, #64
-; CHECK-GI-NEXT: lsl x3, x15, x13
-; CHECK-GI-NEXT: lsr x17, x15, x17
-; CHECK-GI-NEXT: lsl x0, x12, x13
-; CHECK-GI-NEXT: lsl x15, x15, x18
-; CHECK-GI-NEXT: bic x11, x11, x8
+; CHECK-GI-NEXT: bic x11, x10, x9
+; CHECK-GI-NEXT: mvn x16, x9
+; CHECK-GI-NEXT: and x15, x9, #0x7f
+; CHECK-GI-NEXT: sub x17, x12, x11
+; CHECK-GI-NEXT: sub x18, x11, #64
+; CHECK-GI-NEXT: lsl x0, x14, x11
+; CHECK-GI-NEXT: lsr x17, x13, x17
+; CHECK-GI-NEXT: lsl x1, x13, x11
+; CHECK-GI-NEXT: lsl x13, x13, x18
+; CHECK-GI-NEXT: bic x10, x10, x8
; CHECK-GI-NEXT: lsl x18, x2, #1
-; CHECK-GI-NEXT: cmp x13, #64
+; CHECK-GI-NEXT: cmp x11, #64
; CHECK-GI-NEXT: orr x17, x17, x0
-; CHECK-GI-NEXT: orr x13, x1, x2, lsr #63
-; CHECK-GI-NEXT: mvn x16, x9
-; CHECK-GI-NEXT: csel x15, x17, x15, lo
-; CHECK-GI-NEXT: sub x17, x14, x11
-; CHECK-GI-NEXT: csel x0, x3, xzr, lo
+; CHECK-GI-NEXT: extr x11, x3, x2, #63
+; CHECK-GI-NEXT: csel x0, x1, xzr, lo
+; CHECK-GI-NEXT: csel x13, x17, x13, lo
+; CHECK-GI-NEXT: sub x17, x12, x10
; CHECK-GI-NEXT: tst x16, #0x7f
-; CHECK-GI-NEXT: sub x16, x11, #64
+; CHECK-GI-NEXT: sub x16, x10, #64
; CHECK-GI-NEXT: lsr x17, x18, x17
-; CHECK-GI-NEXT: lsl x2, x13, x11
-; CHECK-GI-NEXT: lsl x1, x18, x11
-; CHECK-GI-NEXT: csel x12, x12, x15, eq
-; CHECK-GI-NEXT: lsl x15, x18, x16
-; CHECK-GI-NEXT: and x10, x9, #0x7f
-; CHECK-GI-NEXT: cmp x11, #64
-; CHECK-GI-NEXT: mvn x11, x8
+; CHECK-GI-NEXT: lsl x2, x11, x10
+; CHECK-GI-NEXT: lsl x1, x18, x10
+; CHECK-GI-NEXT: csel x13, x14, x13, eq
+; CHECK-GI-NEXT: lsl x14, x18, x16
+; CHECK-GI-NEXT: cmp x10, #64
+; CHECK-GI-NEXT: mvn x10, x8
; CHECK-GI-NEXT: orr x16, x17, x2
; CHECK-GI-NEXT: csel x17, x1, xzr, lo
-; CHECK-GI-NEXT: csel x15, x16, x15, lo
-; CHECK-GI-NEXT: tst x11, #0x7f
-; CHECK-GI-NEXT: sub x11, x14, x10
-; CHECK-GI-NEXT: sub x16, x10, #64
-; CHECK-GI-NEXT: lsr x18, x4, x10
-; CHECK-GI-NEXT: lsl x11, x5, x11
-; CHECK-GI-NEXT: csel x13, x13, x15, eq
-; CHECK-GI-NEXT: lsr x15, x5, x16
+; CHECK-GI-NEXT: csel x14, x16, x14, lo
+; CHECK-GI-NEXT: tst x10, #0x7f
+; CHECK-GI-NEXT: sub x10, x12, x15
+; CHECK-GI-NEXT: sub x16, x15, #64
+; CHECK-GI-NEXT: lsr x18, x4, x15
+; CHECK-GI-NEXT: lsl x10, x5, x10
+; CHECK-GI-NEXT: csel x11, x11, x14, eq
+; CHECK-GI-NEXT: lsr x14, x5, x16
; CHECK-GI-NEXT: and x1, x8, #0x7f
-; CHECK-GI-NEXT: orr x11, x18, x11
-; CHECK-GI-NEXT: cmp x10, #64
-; CHECK-GI-NEXT: lsr x16, x5, x10
-; CHECK-GI-NEXT: csel x11, x11, x15, lo
+; CHECK-GI-NEXT: cmp x15, #64
+; CHECK-GI-NEXT: lsr x16, x5, x15
+; CHECK-GI-NEXT: orr x10, x18, x10
+; CHECK-GI-NEXT: csel x10, x10, x14, lo
; CHECK-GI-NEXT: tst x9, #0x7f
-; CHECK-GI-NEXT: sub x9, x14, x1
-; CHECK-GI-NEXT: sub x14, x1, #64
-; CHECK-GI-NEXT: lsr x15, x6, x1
+; CHECK-GI-NEXT: sub x9, x12, x1
+; CHECK-GI-NEXT: sub x12, x1, #64
+; CHECK-GI-NEXT: lsr x14, x6, x1
; CHECK-GI-NEXT: lsl x9, x7, x9
-; CHECK-GI-NEXT: csel x11, x4, x11, eq
-; CHECK-GI-NEXT: cmp x10, #64
-; CHECK-GI-NEXT: lsr x10, x7, x14
-; CHECK-GI-NEXT: csel x14, x16, xzr, lo
-; CHECK-GI-NEXT: orr x9, x15, x9
+; CHECK-GI-NEXT: csel x10, x4, x10, eq
+; CHECK-GI-NEXT: cmp x15, #64
+; CHECK-GI-NEXT: lsr x12, x7, x12
+; CHECK-GI-NEXT: csel x15, x16, xzr, lo
+; CHECK-GI-NEXT: orr x9, x14, x9
; CHECK-GI-NEXT: cmp x1, #64
-; CHECK-GI-NEXT: lsr x15, x7, x1
-; CHECK-GI-NEXT: csel x9, x9, x10, lo
+; CHECK-GI-NEXT: lsr x14, x7, x1
+; CHECK-GI-NEXT: csel x9, x9, x12, lo
; CHECK-GI-NEXT: tst x8, #0x7f
; CHECK-GI-NEXT: csel x8, x6, x9, eq
; CHECK-GI-NEXT: cmp x1, #64
-; CHECK-GI-NEXT: orr x0, x0, x11
-; CHECK-GI-NEXT: csel x9, x15, xzr, lo
-; CHECK-GI-NEXT: orr x1, x12, x14
+; CHECK-GI-NEXT: orr x0, x0, x10
+; CHECK-GI-NEXT: csel x9, x14, xzr, lo
+; CHECK-GI-NEXT: orr x1, x13, x15
; CHECK-GI-NEXT: orr x2, x17, x8
-; CHECK-GI-NEXT: orr x3, x13, x9
+; CHECK-GI-NEXT: orr x3, x11, x9
; CHECK-GI-NEXT: ret
entry:
%d = call <2 x i128> @llvm.fshr(<2 x i128> %a, <2 x i128> %b, <2 x i128> %c)
@@ -3863,15 +3837,12 @@ define <2 x i128> @rotl_v2i128_c(<2 x i128> %a) {
;
; CHECK-GI-LABEL: rotl_v2i128_c:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: lsr x8, x1, #61
-; CHECK-GI-NEXT: lsl x9, x1, #3
-; CHECK-GI-NEXT: lsl x10, x3, #3
-; CHECK-GI-NEXT: lsr x11, x3, #61
-; CHECK-GI-NEXT: orr x8, x8, x0, lsl #3
-; CHECK-GI-NEXT: orr x1, x9, x0, lsr #61
-; CHECK-GI-NEXT: orr x3, x10, x2, lsr #61
-; CHECK-GI-NEXT: orr x2, x11, x2, lsl #3
+; CHECK-GI-NEXT: extr x8, x0, x1, #61
+; CHECK-GI-NEXT: extr x9, x3, x2, #61
+; CHECK-GI-NEXT: extr x1, x1, x0, #61
+; CHECK-GI-NEXT: extr x2, x2, x3, #61
; CHECK-GI-NEXT: mov x0, x8
+; CHECK-GI-NEXT: mov x3, x9
; CHECK-GI-NEXT: ret
entry:
%d = call <2 x i128> @llvm.fshl(<2 x i128> %a, <2 x i128> %a, <2 x i128> <i128 3, i128 3>)
@@ -3891,14 +3862,12 @@ define <2 x i128> @rotr_v2i128_c(<2 x i128> %a) {
;
; CHECK-GI-LABEL: rotr_v2i128_c:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: lsl x8, x1, #61
-; CHECK-GI-NEXT: lsl x9, x3, #61
-; CHECK-GI-NEXT: lsl x10, x0, #61
-; CHECK-GI-NEXT: lsl x11, x2, #61
-; CHECK-GI-NEXT: orr x0, x8, x0, lsr #3
-; CHECK-GI-NEXT: orr x2, x9, x2, lsr #3
-; CHECK-GI-NEXT: orr x1, x10, x1, lsr #3
-; CHECK-GI-NEXT: orr x3, x11, x3, lsr #3
+; CHECK-GI-NEXT: extr x8, x1, x0, #3
+; CHECK-GI-NEXT: extr x9, x3, x2, #3
+; CHECK-GI-NEXT: extr x1, x0, x1, #3
+; CHECK-GI-NEXT: extr x3, x2, x3, #3
+; CHECK-GI-NEXT: mov x0, x8
+; CHECK-GI-NEXT: mov x2, x9
; CHECK-GI-NEXT: ret
entry:
%d = call <2 x i128> @llvm.fshr(<2 x i128> %a, <2 x i128> %a, <2 x i128> <i128 3, i128 3>)
@@ -4464,14 +4433,10 @@ define <2 x i128> @fshl_v2i128_c(<2 x i128> %a, <2 x i128> %b) {
;
; CHECK-GI-LABEL: fshl_v2i128_c:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: lsr x8, x5, #61
-; CHECK-GI-NEXT: lsl x9, x1, #3
-; CHECK-GI-NEXT: lsl x10, x3, #3
-; CHECK-GI-NEXT: lsr x11, x7, #61
-; CHECK-GI-NEXT: orr x8, x8, x0, lsl #3
-; CHECK-GI-NEXT: orr x1, x9, x0, lsr #61
-; CHECK-GI-NEXT: orr x3, x10, x2, lsr #61
-; CHECK-GI-NEXT: orr x2, x11, x2, lsl #3
+; CHECK-GI-NEXT: extr x8, x0, x5, #61
+; CHECK-GI-NEXT: extr x1, x1, x0, #61
+; CHECK-GI-NEXT: extr x3, x3, x2, #61
+; CHECK-GI-NEXT: extr x2, x2, x7, #61
; CHECK-GI-NEXT: mov x0, x8
; CHECK-GI-NEXT: ret
entry:
@@ -4480,29 +4445,15 @@ entry:
}
define <2 x i128> @fshr_v2i128_c(<2 x i128> %a, <2 x i128> %b) {
-; CHECK-SD-LABEL: fshr_v2i128_c:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: extr x8, x5, x4, #3
-; CHECK-SD-NEXT: extr x9, x7, x6, #3
-; CHECK-SD-NEXT: extr x1, x0, x5, #3
-; CHECK-SD-NEXT: extr x3, x2, x7, #3
-; CHECK-SD-NEXT: mov x0, x8
-; CHECK-SD-NEXT: mov x2, x9
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: fshr_v2i128_c:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: lsl x8, x5, #61
-; CHECK-GI-NEXT: lsl x9, x7, #61
-; CHECK-GI-NEXT: lsr x10, x5, #3
-; CHECK-GI-NEXT: lsr x11, x7, #3
-; CHECK-GI-NEXT: orr x8, x8, x4, lsr #3
-; CHECK-GI-NEXT: orr x9, x9, x6, lsr #3
-; CHECK-GI-NEXT: orr x1, x10, x0, lsl #61
-; CHECK-GI-NEXT: orr x3, x11, x2, lsl #61
-; CHECK-GI-NEXT: mov x0, x8
-; CHECK-GI-NEXT: mov x2, x9
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: fshr_v2i128_c:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: extr x8, x5, x4, #3
+; CHECK-NEXT: extr x9, x7, x6, #3
+; CHECK-NEXT: extr x1, x0, x5, #3
+; CHECK-NEXT: extr x3, x2, x7, #3
+; CHECK-NEXT: mov x0, x8
+; CHECK-NEXT: mov x2, x9
+; CHECK-NEXT: ret
entry:
%d = call <2 x i128> @llvm.fshr(<2 x i128> %a, <2 x i128> %b, <2 x i128> <i128 3, i128 3>)
ret <2 x i128> %d
diff --git a/llvm/test/CodeGen/AArch64/funnel-shift.ll b/llvm/test/CodeGen/AArch64/funnel-shift.ll
index f9fd2ad..90fb102 100644
--- a/llvm/test/CodeGen/AArch64/funnel-shift.ll
+++ b/llvm/test/CodeGen/AArch64/funnel-shift.ll
@@ -85,41 +85,40 @@ define i128 @fshl_i128(i128 %x, i128 %y, i128 %z) nounwind {
;
; CHECK-GI-LABEL: fshl_i128:
; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #64 // =0x40
; CHECK-GI-NEXT: and x9, x4, #0x7f
-; CHECK-GI-NEXT: mov w10, #64 // =0x40
-; CHECK-GI-NEXT: lsl x14, x3, #63
-; CHECK-GI-NEXT: sub x12, x10, x9
+; CHECK-GI-NEXT: mov w10, #127 // =0x7f
+; CHECK-GI-NEXT: sub x12, x8, x9
; CHECK-GI-NEXT: lsl x13, x1, x9
-; CHECK-GI-NEXT: mov w8, #127 // =0x7f
+; CHECK-GI-NEXT: bic x10, x10, x4
; CHECK-GI-NEXT: lsr x12, x0, x12
-; CHECK-GI-NEXT: bic x8, x8, x4
-; CHECK-GI-NEXT: sub x15, x9, #64
+; CHECK-GI-NEXT: sub x14, x9, #64
+; CHECK-GI-NEXT: lsl x15, x0, x9
+; CHECK-GI-NEXT: extr x16, x3, x2, #1
; CHECK-GI-NEXT: cmp x9, #64
-; CHECK-GI-NEXT: lsl x9, x0, x9
-; CHECK-GI-NEXT: lsl x15, x0, x15
-; CHECK-GI-NEXT: orr x12, x12, x13
-; CHECK-GI-NEXT: orr x13, x14, x2, lsr #1
-; CHECK-GI-NEXT: lsr x14, x3, #1
-; CHECK-GI-NEXT: sub x10, x10, x8
-; CHECK-GI-NEXT: sub x16, x8, #64
-; CHECK-GI-NEXT: csel x9, x9, xzr, lo
-; CHECK-GI-NEXT: lsr x17, x13, x8
-; CHECK-GI-NEXT: lsl x10, x14, x10
-; CHECK-GI-NEXT: csel x12, x12, x15, lo
+; CHECK-GI-NEXT: sub x8, x8, x10
+; CHECK-GI-NEXT: orr x9, x12, x13
+; CHECK-GI-NEXT: lsr x12, x3, #1
+; CHECK-GI-NEXT: lsl x13, x0, x14
+; CHECK-GI-NEXT: csel x14, x15, xzr, lo
+; CHECK-GI-NEXT: sub x15, x10, #64
+; CHECK-GI-NEXT: lsr x17, x16, x10
+; CHECK-GI-NEXT: lsl x8, x12, x8
+; CHECK-GI-NEXT: csel x9, x9, x13, lo
; CHECK-GI-NEXT: tst x4, #0x7f
-; CHECK-GI-NEXT: lsr x15, x14, x16
+; CHECK-GI-NEXT: lsr x13, x12, x15
; CHECK-GI-NEXT: mvn x11, x4
-; CHECK-GI-NEXT: csel x12, x1, x12, eq
-; CHECK-GI-NEXT: orr x10, x17, x10
-; CHECK-GI-NEXT: cmp x8, #64
-; CHECK-GI-NEXT: lsr x14, x14, x8
-; CHECK-GI-NEXT: csel x10, x10, x15, lo
+; CHECK-GI-NEXT: csel x9, x1, x9, eq
+; CHECK-GI-NEXT: orr x8, x17, x8
+; CHECK-GI-NEXT: cmp x10, #64
+; CHECK-GI-NEXT: lsr x12, x12, x10
+; CHECK-GI-NEXT: csel x8, x8, x13, lo
; CHECK-GI-NEXT: tst x11, #0x7f
-; CHECK-GI-NEXT: csel x10, x13, x10, eq
-; CHECK-GI-NEXT: cmp x8, #64
-; CHECK-GI-NEXT: csel x8, x14, xzr, lo
-; CHECK-GI-NEXT: orr x0, x9, x10
-; CHECK-GI-NEXT: orr x1, x12, x8
+; CHECK-GI-NEXT: csel x8, x16, x8, eq
+; CHECK-GI-NEXT: cmp x10, #64
+; CHECK-GI-NEXT: csel x10, x12, xzr, lo
+; CHECK-GI-NEXT: orr x0, x14, x8
+; CHECK-GI-NEXT: orr x1, x9, x10
; CHECK-GI-NEXT: ret
%f = call i128 @llvm.fshl.i128(i128 %x, i128 %y, i128 %z)
ret i128 %f
diff --git a/llvm/test/CodeGen/AArch64/rem-by-const.ll b/llvm/test/CodeGen/AArch64/rem-by-const.ll
index 1cb92e4..87b1108 100644
--- a/llvm/test/CodeGen/AArch64/rem-by-const.ll
+++ b/llvm/test/CodeGen/AArch64/rem-by-const.ll
@@ -559,20 +559,18 @@ define i128 @ui128_7(i128 %a, i128 %b) {
; CHECK-GI-NEXT: add x8, x8, x10
; CHECK-GI-NEXT: subs x10, x0, x9
; CHECK-GI-NEXT: sbc x11, x1, x8
-; CHECK-GI-NEXT: lsl x12, x11, #63
+; CHECK-GI-NEXT: extr x10, x11, x10, #1
; CHECK-GI-NEXT: lsr x11, x11, #1
-; CHECK-GI-NEXT: orr x10, x12, x10, lsr #1
; CHECK-GI-NEXT: adds x9, x10, x9
+; CHECK-GI-NEXT: mov w10, #7 // =0x7
; CHECK-GI-NEXT: adc x8, x11, x8
-; CHECK-GI-NEXT: lsl x10, x8, #62
+; CHECK-GI-NEXT: extr x9, x8, x9, #2
; CHECK-GI-NEXT: lsr x8, x8, #2
-; CHECK-GI-NEXT: orr x9, x10, x9, lsr #2
-; CHECK-GI-NEXT: mov w10, #7 // =0x7
-; CHECK-GI-NEXT: lsl x12, x8, #3
; CHECK-GI-NEXT: umulh x10, x9, x10
; CHECK-GI-NEXT: lsl x11, x9, #3
-; CHECK-GI-NEXT: sub x8, x12, x8
+; CHECK-GI-NEXT: lsl x12, x8, #3
; CHECK-GI-NEXT: sub x9, x11, x9
+; CHECK-GI-NEXT: sub x8, x12, x8
; CHECK-GI-NEXT: subs x0, x0, x9
; CHECK-GI-NEXT: add x8, x8, x10
; CHECK-GI-NEXT: sbc x1, x1, x8
@@ -640,10 +638,9 @@ define i128 @ui128_100(i128 %a, i128 %b) {
; CHECK-GI-NEXT: add x10, x11, x12
; CHECK-GI-NEXT: add x8, x8, x14
; CHECK-GI-NEXT: add x8, x8, x10
-; CHECK-GI-NEXT: lsl x10, x8, #60
-; CHECK-GI-NEXT: lsr x8, x8, #4
-; CHECK-GI-NEXT: orr x9, x10, x9, lsr #4
; CHECK-GI-NEXT: mov w10, #100 // =0x64
+; CHECK-GI-NEXT: extr x9, x8, x9, #4
+; CHECK-GI-NEXT: lsr x8, x8, #4
; CHECK-GI-NEXT: umulh x11, x9, x10
; CHECK-GI-NEXT: mul x9, x9, x10
; CHECK-GI-NEXT: madd x8, x8, x10, x11
@@ -3317,36 +3314,32 @@ define <2 x i128> @uv2i128_7(<2 x i128> %d, <2 x i128> %e) {
; CHECK-GI-NEXT: sbc x14, x1, x12
; CHECK-GI-NEXT: add x8, x8, x13
; CHECK-GI-NEXT: subs x13, x2, x10
-; CHECK-GI-NEXT: lsl x15, x14, #63
-; CHECK-GI-NEXT: sbc x16, x3, x8
+; CHECK-GI-NEXT: extr x9, x14, x9, #1
+; CHECK-GI-NEXT: sbc x15, x3, x8
; CHECK-GI-NEXT: lsr x14, x14, #1
-; CHECK-GI-NEXT: orr x9, x15, x9, lsr #1
-; CHECK-GI-NEXT: lsl x15, x16, #63
-; CHECK-GI-NEXT: orr x13, x15, x13, lsr #1
+; CHECK-GI-NEXT: extr x13, x15, x13, #1
; CHECK-GI-NEXT: adds x9, x9, x11
-; CHECK-GI-NEXT: lsr x11, x16, #1
+; CHECK-GI-NEXT: lsr x11, x15, #1
; CHECK-GI-NEXT: adc x12, x14, x12
; CHECK-GI-NEXT: adds x10, x13, x10
-; CHECK-GI-NEXT: lsl x13, x12, #62
-; CHECK-GI-NEXT: lsr x12, x12, #2
-; CHECK-GI-NEXT: adc x8, x11, x8
-; CHECK-GI-NEXT: lsl x11, x8, #62
-; CHECK-GI-NEXT: orr x9, x13, x9, lsr #2
+; CHECK-GI-NEXT: extr x9, x12, x9, #2
; CHECK-GI-NEXT: mov w13, #7 // =0x7
+; CHECK-GI-NEXT: adc x8, x11, x8
+; CHECK-GI-NEXT: lsr x11, x12, #2
+; CHECK-GI-NEXT: extr x10, x8, x10, #2
+; CHECK-GI-NEXT: umulh x12, x9, x13
; CHECK-GI-NEXT: lsr x8, x8, #2
-; CHECK-GI-NEXT: lsl x14, x12, #3
-; CHECK-GI-NEXT: orr x10, x11, x10, lsr #2
-; CHECK-GI-NEXT: umulh x11, x9, x13
+; CHECK-GI-NEXT: lsl x14, x11, #3
; CHECK-GI-NEXT: lsl x15, x9, #3
-; CHECK-GI-NEXT: sub x12, x14, x12
-; CHECK-GI-NEXT: lsl x16, x8, #3
; CHECK-GI-NEXT: umulh x13, x10, x13
+; CHECK-GI-NEXT: lsl x16, x8, #3
+; CHECK-GI-NEXT: sub x11, x14, x11
; CHECK-GI-NEXT: lsl x14, x10, #3
; CHECK-GI-NEXT: sub x9, x15, x9
; CHECK-GI-NEXT: sub x8, x16, x8
; CHECK-GI-NEXT: subs x0, x0, x9
+; CHECK-GI-NEXT: add x11, x11, x12
; CHECK-GI-NEXT: sub x10, x14, x10
-; CHECK-GI-NEXT: add x11, x12, x11
; CHECK-GI-NEXT: sbc x1, x1, x11
; CHECK-GI-NEXT: subs x2, x2, x10
; CHECK-GI-NEXT: add x8, x8, x13
@@ -3394,9 +3387,10 @@ define <2 x i128> @uv2i128_100(<2 x i128> %d, <2 x i128> %e) {
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: mov x10, #23593 // =0x5c29
; CHECK-GI-NEXT: mov x8, #62914 // =0xf5c2
-; CHECK-GI-NEXT: sub x18, x0, x0
+; CHECK-GI-NEXT: and x5, xzr, #0x1
; CHECK-GI-NEXT: movk x10, #49807, lsl #16
; CHECK-GI-NEXT: movk x8, #23592, lsl #16
+; CHECK-GI-NEXT: umulh x18, x0, xzr
; CHECK-GI-NEXT: movk x10, #10485, lsl #32
; CHECK-GI-NEXT: movk x8, #49807, lsl #32
; CHECK-GI-NEXT: movk x10, #36700, lsl #48
@@ -3409,84 +3403,81 @@ define <2 x i128> @uv2i128_100(<2 x i128> %d, <2 x i128> %e) {
; CHECK-GI-NEXT: umulh x15, x1, x10
; CHECK-GI-NEXT: cset w12, hs
; CHECK-GI-NEXT: cmn x11, x13
-; CHECK-GI-NEXT: and x11, x12, #0x1
-; CHECK-GI-NEXT: umulh x16, x0, x8
-; CHECK-GI-NEXT: cset w12, hs
+; CHECK-GI-NEXT: sub x13, x0, x0
; CHECK-GI-NEXT: and x12, x12, #0x1
-; CHECK-GI-NEXT: add x14, x14, x18
-; CHECK-GI-NEXT: add x11, x11, x12
-; CHECK-GI-NEXT: and x12, xzr, #0x1
+; CHECK-GI-NEXT: umulh x16, x0, x8
+; CHECK-GI-NEXT: cset w11, hs
+; CHECK-GI-NEXT: add x13, x14, x13
+; CHECK-GI-NEXT: and x11, x11, #0x1
+; CHECK-GI-NEXT: and x14, xzr, #0x1
; CHECK-GI-NEXT: umulh x9, xzr, x10
-; CHECK-GI-NEXT: adds x14, x14, x15
-; CHECK-GI-NEXT: and x15, xzr, #0x1
+; CHECK-GI-NEXT: add x11, x12, x11
+; CHECK-GI-NEXT: add x12, x5, x14
+; CHECK-GI-NEXT: adds x13, x13, x15
; CHECK-GI-NEXT: umulh x17, x1, x8
-; CHECK-GI-NEXT: cset w4, hs
-; CHECK-GI-NEXT: add x15, x12, x15
-; CHECK-GI-NEXT: adds x12, x14, x16
-; CHECK-GI-NEXT: and x4, x4, #0x1
-; CHECK-GI-NEXT: mul x18, x3, x10
; CHECK-GI-NEXT: cset w14, hs
-; CHECK-GI-NEXT: adds x12, x12, x11
-; CHECK-GI-NEXT: add x11, x15, x4
; CHECK-GI-NEXT: and x14, x14, #0x1
-; CHECK-GI-NEXT: cset w15, hs
-; CHECK-GI-NEXT: mul x5, x2, x8
-; CHECK-GI-NEXT: add x11, x11, x14
-; CHECK-GI-NEXT: and x14, x15, #0x1
-; CHECK-GI-NEXT: add x17, x9, x17
-; CHECK-GI-NEXT: add x14, x11, x14
-; CHECK-GI-NEXT: mov w11, #100 // =0x64
-; CHECK-GI-NEXT: umulh x13, x0, xzr
-; CHECK-GI-NEXT: umulh x16, x2, x10
-; CHECK-GI-NEXT: adds x18, x18, x5
-; CHECK-GI-NEXT: mul x15, x3, x8
-; CHECK-GI-NEXT: add x13, x17, x13
-; CHECK-GI-NEXT: cset w17, hs
-; CHECK-GI-NEXT: umulh x10, x3, x10
-; CHECK-GI-NEXT: add x13, x13, x14
-; CHECK-GI-NEXT: and x17, x17, #0x1
-; CHECK-GI-NEXT: cmn x18, x16
-; CHECK-GI-NEXT: sub x18, x2, x2
-; CHECK-GI-NEXT: umulh x16, x2, x8
+; CHECK-GI-NEXT: adds x13, x13, x16
+; CHECK-GI-NEXT: mul x4, x3, x10
+; CHECK-GI-NEXT: add x12, x12, x14
; CHECK-GI-NEXT: cset w14, hs
-; CHECK-GI-NEXT: and x14, x14, #0x1
-; CHECK-GI-NEXT: add x15, x15, x18
+; CHECK-GI-NEXT: adds x11, x13, x11
+; CHECK-GI-NEXT: and x13, x14, #0x1
+; CHECK-GI-NEXT: mul x15, x2, x8
+; CHECK-GI-NEXT: cset w14, hs
+; CHECK-GI-NEXT: add x12, x12, x13
+; CHECK-GI-NEXT: and x13, x14, #0x1
+; CHECK-GI-NEXT: add x14, x9, x17
+; CHECK-GI-NEXT: sub x17, x2, x2
+; CHECK-GI-NEXT: umulh x16, x2, x10
+; CHECK-GI-NEXT: add x12, x12, x13
+; CHECK-GI-NEXT: add x13, x14, x18
+; CHECK-GI-NEXT: add x12, x13, x12
; CHECK-GI-NEXT: and x18, xzr, #0x1
-; CHECK-GI-NEXT: add x14, x17, x14
+; CHECK-GI-NEXT: mul x5, x3, x8
+; CHECK-GI-NEXT: extr x11, x12, x11, #4
+; CHECK-GI-NEXT: adds x13, x4, x15
+; CHECK-GI-NEXT: umulh x14, x3, x10
+; CHECK-GI-NEXT: cset w15, hs
+; CHECK-GI-NEXT: mov w10, #100 // =0x64
+; CHECK-GI-NEXT: cmn x13, x16
+; CHECK-GI-NEXT: and x15, x15, #0x1
+; CHECK-GI-NEXT: umulh x13, x2, x8
+; CHECK-GI-NEXT: cset w16, hs
+; CHECK-GI-NEXT: add x17, x5, x17
+; CHECK-GI-NEXT: and x16, x16, #0x1
; CHECK-GI-NEXT: umulh x8, x3, x8
+; CHECK-GI-NEXT: add x15, x15, x16
+; CHECK-GI-NEXT: adds x14, x17, x14
; CHECK-GI-NEXT: and x17, xzr, #0x1
-; CHECK-GI-NEXT: adds x10, x15, x10
-; CHECK-GI-NEXT: add x15, x17, x18
+; CHECK-GI-NEXT: add x16, x18, x17
; CHECK-GI-NEXT: cset w17, hs
-; CHECK-GI-NEXT: umulh x18, x2, xzr
+; CHECK-GI-NEXT: adds x13, x14, x13
+; CHECK-GI-NEXT: umulh x14, x2, xzr
; CHECK-GI-NEXT: and x17, x17, #0x1
-; CHECK-GI-NEXT: adds x10, x10, x16
-; CHECK-GI-NEXT: lsl x16, x13, #60
-; CHECK-GI-NEXT: add x15, x15, x17
-; CHECK-GI-NEXT: cset w17, hs
-; CHECK-GI-NEXT: adds x10, x10, x14
-; CHECK-GI-NEXT: and x14, x17, #0x1
+; CHECK-GI-NEXT: cset w18, hs
+; CHECK-GI-NEXT: adds x13, x13, x15
+; CHECK-GI-NEXT: add x15, x16, x17
+; CHECK-GI-NEXT: and x16, x18, #0x1
; CHECK-GI-NEXT: cset w17, hs
; CHECK-GI-NEXT: add x8, x9, x8
-; CHECK-GI-NEXT: add x14, x15, x14
-; CHECK-GI-NEXT: and x15, x17, #0x1
-; CHECK-GI-NEXT: orr x12, x16, x12, lsr #4
-; CHECK-GI-NEXT: add x9, x14, x15
-; CHECK-GI-NEXT: add x8, x8, x18
-; CHECK-GI-NEXT: add x8, x8, x9
-; CHECK-GI-NEXT: lsr x9, x13, #4
-; CHECK-GI-NEXT: umulh x14, x12, x11
-; CHECK-GI-NEXT: lsl x13, x8, #60
+; CHECK-GI-NEXT: add x15, x15, x16
+; CHECK-GI-NEXT: and x16, x17, #0x1
+; CHECK-GI-NEXT: lsr x9, x12, #4
+; CHECK-GI-NEXT: add x15, x15, x16
+; CHECK-GI-NEXT: umulh x17, x11, x10
+; CHECK-GI-NEXT: add x8, x8, x14
+; CHECK-GI-NEXT: add x8, x8, x15
+; CHECK-GI-NEXT: mul x11, x11, x10
+; CHECK-GI-NEXT: extr x12, x8, x13, #4
; CHECK-GI-NEXT: lsr x8, x8, #4
-; CHECK-GI-NEXT: mul x12, x12, x11
-; CHECK-GI-NEXT: orr x10, x13, x10, lsr #4
-; CHECK-GI-NEXT: madd x9, x9, x11, x14
-; CHECK-GI-NEXT: umulh x13, x10, x11
-; CHECK-GI-NEXT: subs x0, x0, x12
-; CHECK-GI-NEXT: mul x10, x10, x11
+; CHECK-GI-NEXT: madd x9, x9, x10, x17
+; CHECK-GI-NEXT: umulh x13, x12, x10
+; CHECK-GI-NEXT: subs x0, x0, x11
+; CHECK-GI-NEXT: mul x12, x12, x10
; CHECK-GI-NEXT: sbc x1, x1, x9
-; CHECK-GI-NEXT: madd x8, x8, x11, x13
-; CHECK-GI-NEXT: subs x2, x2, x10
+; CHECK-GI-NEXT: madd x8, x8, x10, x13
+; CHECK-GI-NEXT: subs x2, x2, x12
; CHECK-GI-NEXT: sbc x3, x3, x8
; CHECK-GI-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/sme-zt0-state.ll b/llvm/test/CodeGen/AArch64/sme-zt0-state.ll
index 2583a93..5b81f5d 100644
--- a/llvm/test/CodeGen/AArch64/sme-zt0-state.ll
+++ b/llvm/test/CodeGen/AArch64/sme-zt0-state.ll
@@ -426,3 +426,21 @@ define void @zt0_multiple_private_za_calls(ptr %callee) "aarch64_in_zt0" nounwin
call void %callee()
ret void
}
+
+define void @disable_tailcallopt(ptr %callee) "aarch64_inout_zt0" nounwind {
+; CHECK-COMMON-LABEL: disable_tailcallopt:
+; CHECK-COMMON: // %bb.0:
+; CHECK-COMMON-NEXT: sub sp, sp, #80
+; CHECK-COMMON-NEXT: stp x30, x19, [sp, #64] // 16-byte Folded Spill
+; CHECK-COMMON-NEXT: mov x19, sp
+; CHECK-COMMON-NEXT: str zt0, [x19]
+; CHECK-COMMON-NEXT: smstop za
+; CHECK-COMMON-NEXT: blr x0
+; CHECK-COMMON-NEXT: smstart za
+; CHECK-COMMON-NEXT: ldr zt0, [x19]
+; CHECK-COMMON-NEXT: ldp x30, x19, [sp, #64] // 16-byte Folded Reload
+; CHECK-COMMON-NEXT: add sp, sp, #80
+; CHECK-COMMON-NEXT: ret
+ tail call void %callee()
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.ll
index 221e2fd..09e1fca 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.ll
@@ -1200,7 +1200,7 @@ define amdgpu_ps void @atomic_cmpswap_i32_1d_no_return(<8 x i32> inreg %rsrc, i3
; GFX6-NEXT: s_mov_b32 s5, s7
; GFX6-NEXT: s_mov_b32 s6, s8
; GFX6-NEXT: s_mov_b32 s7, s9
-; GFX6-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 unorm glc
+; GFX6-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 unorm
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_cmpswap_i32_1d_no_return:
@@ -1213,7 +1213,7 @@ define amdgpu_ps void @atomic_cmpswap_i32_1d_no_return(<8 x i32> inreg %rsrc, i3
; GFX8-NEXT: s_mov_b32 s5, s7
; GFX8-NEXT: s_mov_b32 s6, s8
; GFX8-NEXT: s_mov_b32 s7, s9
-; GFX8-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 unorm glc
+; GFX8-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 unorm
; GFX8-NEXT: s_endpgm
;
; GFX900-LABEL: atomic_cmpswap_i32_1d_no_return:
@@ -1226,7 +1226,7 @@ define amdgpu_ps void @atomic_cmpswap_i32_1d_no_return(<8 x i32> inreg %rsrc, i3
; GFX900-NEXT: s_mov_b32 s5, s7
; GFX900-NEXT: s_mov_b32 s6, s8
; GFX900-NEXT: s_mov_b32 s7, s9
-; GFX900-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 unorm glc
+; GFX900-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 unorm
; GFX900-NEXT: s_endpgm
;
; GFX90A-LABEL: atomic_cmpswap_i32_1d_no_return:
@@ -1239,7 +1239,7 @@ define amdgpu_ps void @atomic_cmpswap_i32_1d_no_return(<8 x i32> inreg %rsrc, i3
; GFX90A-NEXT: s_mov_b32 s5, s7
; GFX90A-NEXT: s_mov_b32 s6, s8
; GFX90A-NEXT: s_mov_b32 s7, s9
-; GFX90A-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 unorm glc
+; GFX90A-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 unorm
; GFX90A-NEXT: s_endpgm
;
; GFX10PLUS-LABEL: atomic_cmpswap_i32_1d_no_return:
@@ -1252,7 +1252,7 @@ define amdgpu_ps void @atomic_cmpswap_i32_1d_no_return(<8 x i32> inreg %rsrc, i3
; GFX10PLUS-NEXT: s_mov_b32 s5, s7
; GFX10PLUS-NEXT: s_mov_b32 s6, s8
; GFX10PLUS-NEXT: s_mov_b32 s7, s9
-; GFX10PLUS-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D unorm glc
+; GFX10PLUS-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D unorm
; GFX10PLUS-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_cmpswap_i32_1d_no_return:
@@ -1265,7 +1265,7 @@ define amdgpu_ps void @atomic_cmpswap_i32_1d_no_return(<8 x i32> inreg %rsrc, i3
; GFX12-NEXT: s_mov_b32 s5, s7
; GFX12-NEXT: s_mov_b32 s6, s8
; GFX12-NEXT: s_mov_b32 s7, s9
-; GFX12-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN
+; GFX12-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D
; GFX12-NEXT: s_endpgm
main_body:
%v = call i32 @llvm.amdgcn.image.atomic.cmpswap.1d.i32.i32(i32 %cmp, i32 %swap, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
@@ -3194,7 +3194,7 @@ define amdgpu_ps void @atomic_cmpswap_i64_1d_no_return(<8 x i32> inreg %rsrc, i6
; GFX6-NEXT: s_mov_b32 s5, s7
; GFX6-NEXT: s_mov_b32 s6, s8
; GFX6-NEXT: s_mov_b32 s7, s9
-; GFX6-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf unorm glc
+; GFX6-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf unorm
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_cmpswap_i64_1d_no_return:
@@ -3207,7 +3207,7 @@ define amdgpu_ps void @atomic_cmpswap_i64_1d_no_return(<8 x i32> inreg %rsrc, i6
; GFX8-NEXT: s_mov_b32 s5, s7
; GFX8-NEXT: s_mov_b32 s6, s8
; GFX8-NEXT: s_mov_b32 s7, s9
-; GFX8-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf unorm glc
+; GFX8-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf unorm
; GFX8-NEXT: s_endpgm
;
; GFX900-LABEL: atomic_cmpswap_i64_1d_no_return:
@@ -3220,7 +3220,7 @@ define amdgpu_ps void @atomic_cmpswap_i64_1d_no_return(<8 x i32> inreg %rsrc, i6
; GFX900-NEXT: s_mov_b32 s5, s7
; GFX900-NEXT: s_mov_b32 s6, s8
; GFX900-NEXT: s_mov_b32 s7, s9
-; GFX900-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf unorm glc
+; GFX900-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf unorm
; GFX900-NEXT: s_endpgm
;
; GFX90A-LABEL: atomic_cmpswap_i64_1d_no_return:
@@ -3233,7 +3233,7 @@ define amdgpu_ps void @atomic_cmpswap_i64_1d_no_return(<8 x i32> inreg %rsrc, i6
; GFX90A-NEXT: s_mov_b32 s5, s7
; GFX90A-NEXT: s_mov_b32 s6, s8
; GFX90A-NEXT: s_mov_b32 s7, s9
-; GFX90A-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf unorm glc
+; GFX90A-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf unorm
; GFX90A-NEXT: s_endpgm
;
; GFX10PLUS-LABEL: atomic_cmpswap_i64_1d_no_return:
@@ -3246,7 +3246,7 @@ define amdgpu_ps void @atomic_cmpswap_i64_1d_no_return(<8 x i32> inreg %rsrc, i6
; GFX10PLUS-NEXT: s_mov_b32 s5, s7
; GFX10PLUS-NEXT: s_mov_b32 s6, s8
; GFX10PLUS-NEXT: s_mov_b32 s7, s9
-; GFX10PLUS-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D unorm glc
+; GFX10PLUS-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D unorm
; GFX10PLUS-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_cmpswap_i64_1d_no_return:
@@ -3259,7 +3259,7 @@ define amdgpu_ps void @atomic_cmpswap_i64_1d_no_return(<8 x i32> inreg %rsrc, i6
; GFX12-NEXT: s_mov_b32 s5, s7
; GFX12-NEXT: s_mov_b32 s6, s8
; GFX12-NEXT: s_mov_b32 s7, s9
-; GFX12-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN
+; GFX12-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D
; GFX12-NEXT: s_endpgm
main_body:
%v = call i64 @llvm.amdgcn.image.atomic.cmpswap.1d.i64.i32(i64 %cmp, i64 %swap, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.mir
index 292fa4b..4f160b6 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.mir
@@ -25,6 +25,7 @@ body: |
; GFX6-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[IMAGE_ATOMIC_CMPSWAP_V2_V1_si]].sub0
; GFX6-NEXT: $vgpr0 = COPY [[COPY3]]
; GFX6-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
; GFX8-LABEL: name: atomic_cmpswap_i32_1d
; GFX8: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1, $vgpr2
; GFX8-NEXT: {{ $}}
@@ -35,6 +36,7 @@ body: |
; GFX8-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[IMAGE_ATOMIC_CMPSWAP_V2_V1_vi]].sub0
; GFX8-NEXT: $vgpr0 = COPY [[COPY3]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
; GFX10-LABEL: name: atomic_cmpswap_i32_1d
; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1, $vgpr2
; GFX10-NEXT: {{ $}}
@@ -45,6 +47,7 @@ body: |
; GFX10-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[IMAGE_ATOMIC_CMPSWAP_V2_V1_gfx10_]].sub0
; GFX10-NEXT: $vgpr0 = COPY [[COPY3]]
; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
; GFX11-LABEL: name: atomic_cmpswap_i32_1d
; GFX11: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1, $vgpr2
; GFX11-NEXT: {{ $}}
@@ -55,6 +58,7 @@ body: |
; GFX11-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[IMAGE_ATOMIC_CMPSWAP_V2_V1_gfx11_]].sub0
; GFX11-NEXT: $vgpr0 = COPY [[COPY3]]
; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
; GFX12-LABEL: name: atomic_cmpswap_i32_1d
; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1, $vgpr2
; GFX12-NEXT: {{ $}}
@@ -89,39 +93,43 @@ body: |
; GFX6-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
; GFX6-NEXT: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
; GFX6-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX6-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V2_V1_si:%[0-9]+]]:vreg_64 = IMAGE_ATOMIC_CMPSWAP_V2_V1_si [[COPY1]], [[COPY2]], [[COPY]], 3, 1, 1, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8)
+ ; GFX6-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V2_V1_si [[COPY1]], [[COPY2]], [[COPY]], 3, 1, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8)
; GFX6-NEXT: S_ENDPGM 0
+ ;
; GFX8-LABEL: name: atomic_cmpswap_i32_1d_no_return
; GFX8: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1, $vgpr2
; GFX8-NEXT: {{ $}}
; GFX8-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
; GFX8-NEXT: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX8-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V1_V1_vi:%[0-9]+]]:vreg_64 = IMAGE_ATOMIC_CMPSWAP_V2_V1_vi [[COPY1]], [[COPY2]], [[COPY]], 3, 1, 1, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8)
+ ; GFX8-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V2_V1_vi [[COPY1]], [[COPY2]], [[COPY]], 3, 1, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
+ ;
; GFX10-LABEL: name: atomic_cmpswap_i32_1d_no_return
; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1, $vgpr2
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
; GFX10-NEXT: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
; GFX10-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX10-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V2_V1_gfx10_:%[0-9]+]]:vreg_64 = IMAGE_ATOMIC_CMPSWAP_V2_V1_gfx10 [[COPY1]], [[COPY2]], [[COPY]], 3, 0, 1, 1, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8)
+ ; GFX10-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V2_V1_gfx10 [[COPY1]], [[COPY2]], [[COPY]], 3, 0, 1, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8)
; GFX10-NEXT: S_ENDPGM 0
+ ;
; GFX11-LABEL: name: atomic_cmpswap_i32_1d_no_return
; GFX11: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1, $vgpr2
; GFX11-NEXT: {{ $}}
; GFX11-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
; GFX11-NEXT: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
; GFX11-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX11-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V2_V1_gfx11_:%[0-9]+]]:vreg_64 = IMAGE_ATOMIC_CMPSWAP_V2_V1_gfx11 [[COPY1]], [[COPY2]], [[COPY]], 3, 0, 1, 1, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8)
+ ; GFX11-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V2_V1_gfx11 [[COPY1]], [[COPY2]], [[COPY]], 3, 0, 1, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8)
; GFX11-NEXT: S_ENDPGM 0
+ ;
; GFX12-LABEL: name: atomic_cmpswap_i32_1d_no_return
; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1, $vgpr2
; GFX12-NEXT: {{ $}}
; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
; GFX12-NEXT: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V2_V1_gfx12_:%[0-9]+]]:vreg_64 = IMAGE_ATOMIC_CMPSWAP_V2_V1_gfx12 [[COPY1]], [[COPY2]], [[COPY]], 3, 0, 1, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8)
+ ; GFX12-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V2_V1_gfx12 [[COPY1]], [[COPY2]], [[COPY]], 3, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8)
; GFX12-NEXT: S_ENDPGM 0
%0:sgpr(<8 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
%1:vgpr(<2 x s32>) = COPY $vgpr0_vgpr1
@@ -150,6 +158,7 @@ body: |
; GFX6-NEXT: [[COPY3:%[0-9]+]]:vreg_64 = COPY killed [[IMAGE_ATOMIC_CMPSWAP_V4_V1_si]].sub0_sub1
; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[COPY3]]
; GFX6-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0_vgpr1
+ ;
; GFX8-LABEL: name: atomic_cmpswap_i64_1d
; GFX8: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
; GFX8-NEXT: {{ $}}
@@ -160,6 +169,7 @@ body: |
; GFX8-NEXT: [[COPY3:%[0-9]+]]:vreg_64 = COPY killed [[IMAGE_ATOMIC_CMPSWAP_V4_V1_vi]].sub0_sub1
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[COPY3]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0_vgpr1
+ ;
; GFX10-LABEL: name: atomic_cmpswap_i64_1d
; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
; GFX10-NEXT: {{ $}}
@@ -170,6 +180,7 @@ body: |
; GFX10-NEXT: [[COPY3:%[0-9]+]]:vreg_64 = COPY killed [[IMAGE_ATOMIC_CMPSWAP_V4_V1_gfx10_]].sub0_sub1
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[COPY3]]
; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0_vgpr1
+ ;
; GFX11-LABEL: name: atomic_cmpswap_i64_1d
; GFX11: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
; GFX11-NEXT: {{ $}}
@@ -180,6 +191,7 @@ body: |
; GFX11-NEXT: [[COPY3:%[0-9]+]]:vreg_64 = COPY killed [[IMAGE_ATOMIC_CMPSWAP_V4_V1_gfx11_]].sub0_sub1
; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[COPY3]]
; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0_vgpr1
+ ;
; GFX12-LABEL: name: atomic_cmpswap_i64_1d
; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
; GFX12-NEXT: {{ $}}
@@ -214,39 +226,43 @@ body: |
; GFX6-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
; GFX6-NEXT: [[COPY1:%[0-9]+]]:vreg_128 = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; GFX6-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX6-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V4_V1_si:%[0-9]+]]:vreg_128 = IMAGE_ATOMIC_CMPSWAP_V4_V1_si [[COPY1]], [[COPY2]], [[COPY]], 15, 1, 1, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8)
+ ; GFX6-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V4_V1_si [[COPY1]], [[COPY2]], [[COPY]], 15, 1, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8)
; GFX6-NEXT: S_ENDPGM 0
+ ;
; GFX8-LABEL: name: atomic_cmpswap_i64_1d_no_return
; GFX8: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
; GFX8-NEXT: {{ $}}
; GFX8-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
; GFX8-NEXT: [[COPY1:%[0-9]+]]:vreg_128 = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; GFX8-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX8-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V4_V1_vi:%[0-9]+]]:vreg_128 = IMAGE_ATOMIC_CMPSWAP_V4_V1_vi [[COPY1]], [[COPY2]], [[COPY]], 15, 1, 1, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8)
+ ; GFX8-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V4_V1_vi [[COPY1]], [[COPY2]], [[COPY]], 15, 1, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
+ ;
; GFX10-LABEL: name: atomic_cmpswap_i64_1d_no_return
; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
; GFX10-NEXT: [[COPY1:%[0-9]+]]:vreg_128 = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; GFX10-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX10-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V4_V1_gfx10_:%[0-9]+]]:vreg_128 = IMAGE_ATOMIC_CMPSWAP_V4_V1_gfx10 [[COPY1]], [[COPY2]], [[COPY]], 15, 0, 1, 1, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8)
+ ; GFX10-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V4_V1_gfx10 [[COPY1]], [[COPY2]], [[COPY]], 15, 0, 1, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8)
; GFX10-NEXT: S_ENDPGM 0
+ ;
; GFX11-LABEL: name: atomic_cmpswap_i64_1d_no_return
; GFX11: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
; GFX11-NEXT: {{ $}}
; GFX11-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
; GFX11-NEXT: [[COPY1:%[0-9]+]]:vreg_128 = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; GFX11-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX11-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V4_V1_gfx11_:%[0-9]+]]:vreg_128 = IMAGE_ATOMIC_CMPSWAP_V4_V1_gfx11 [[COPY1]], [[COPY2]], [[COPY]], 15, 0, 1, 1, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8)
+ ; GFX11-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V4_V1_gfx11 [[COPY1]], [[COPY2]], [[COPY]], 15, 0, 1, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8)
; GFX11-NEXT: S_ENDPGM 0
+ ;
; GFX12-LABEL: name: atomic_cmpswap_i64_1d_no_return
; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
; GFX12-NEXT: {{ $}}
; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
; GFX12-NEXT: [[COPY1:%[0-9]+]]:vreg_128 = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V4_V1_gfx12_:%[0-9]+]]:vreg_128 = IMAGE_ATOMIC_CMPSWAP_V4_V1_gfx12 [[COPY1]], [[COPY2]], [[COPY]], 15, 0, 1, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8)
+ ; GFX12-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V4_V1_gfx12 [[COPY1]], [[COPY2]], [[COPY]], 15, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8)
; GFX12-NEXT: S_ENDPGM 0
%0:sgpr(<8 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
%1:vgpr(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
index 6c4f504..33ce278 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
@@ -23,7 +23,9 @@ define protected amdgpu_kernel void @trivial_waterfall_eq_zero(ptr addrspace(1)
; PASS-CHECK: [[WHILE]]:
; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT: [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]])
; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT: [[IS_DONE:%.*]] = icmp eq i64 [[BALLOT]], 0
; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF]]
; PASS-CHECK: [[IF]]:
; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
@@ -75,7 +77,9 @@ define protected amdgpu_kernel void @trivial_waterfall_eq_zero_swap_op(ptr addrs
; PASS-CHECK: [[WHILE]]:
; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT: [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]])
; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT: [[IS_DONE:%.*]] = icmp eq i64 0, [[BALLOT]]
; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF]]
; PASS-CHECK: [[IF]]:
; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
@@ -126,6 +130,8 @@ define protected amdgpu_kernel void @trivial_waterfall_ne_zero(ptr addrspace(1)
; PASS-CHECK-NEXT: br label %[[WHILE:.*]]
; PASS-CHECK: [[WHILE]]:
; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT: [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[DONE]])
+; PASS-CHECK-NEXT: [[IS_DONE:%.*]] = icmp ne i64 0, [[BALLOT]]
; PASS-CHECK-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]]
; PASS-CHECK: [[IF]]:
; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
@@ -175,6 +181,8 @@ define protected amdgpu_kernel void @trivial_waterfall_ne_zero_swap(ptr addrspac
; PASS-CHECK-NEXT: br label %[[WHILE:.*]]
; PASS-CHECK: [[WHILE]]:
; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT: [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[DONE]])
+; PASS-CHECK-NEXT: [[IS_DONE:%.*]] = icmp ne i64 [[BALLOT]], 0
; PASS-CHECK-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]]
; PASS-CHECK: [[IF]]:
; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
@@ -225,7 +233,9 @@ define protected amdgpu_kernel void @trivial_uniform_waterfall(ptr addrspace(1)
; PASS-CHECK: [[WHILE]]:
; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[NEW_DONE:%.*]], %[[TAIL:.*]] ]
; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT: [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]])
; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT: [[IS_DONE:%.*]] = icmp eq i64 [[BALLOT]], 0
; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF:.*]]
; PASS-CHECK: [[IF]]:
; PASS-CHECK-NEXT: [[IS_FIRST_ACTIVE_ID:%.*]] = icmp eq i32 0, 0
@@ -292,7 +302,9 @@ define protected amdgpu_kernel void @uniform_waterfall(ptr addrspace(1) %out, i3
; PASS-CHECK: [[WHILE]]:
; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[NEW_DONE:%.*]], %[[TAIL:.*]] ]
; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT: [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]])
; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT: [[IS_DONE:%.*]] = icmp eq i64 [[BALLOT]], 0
; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF:.*]]
; PASS-CHECK: [[IF]]:
; PASS-CHECK-NEXT: [[IS_FIRST_ACTIVE_ID:%.*]] = icmp eq i32 [[MYMASK]], [[MYMASK]]
@@ -359,7 +371,9 @@ define protected amdgpu_kernel void @trivial_waterfall_eq_zero_i32(ptr addrspace
; PASS-CHECK: [[WHILE]]:
; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT: [[BALLOT:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 [[NOT_DONE]])
; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT: [[IS_DONE:%.*]] = icmp eq i32 [[BALLOT]], 0
; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF]]
; PASS-CHECK: [[IF]]:
; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
@@ -410,6 +424,8 @@ define protected amdgpu_kernel void @trivial_waterfall_ne_zero_i32(ptr addrspace
; PASS-CHECK-NEXT: br label %[[WHILE:.*]]
; PASS-CHECK: [[WHILE]]:
; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT: [[BALLOT:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 [[DONE]])
+; PASS-CHECK-NEXT: [[IS_DONE:%.*]] = icmp ne i32 0, [[BALLOT]]
; PASS-CHECK-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]]
; PASS-CHECK: [[IF]]:
; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
index aa11574..a3e42e5 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
@@ -595,6 +595,8 @@ define amdgpu_kernel void @ballot_i32(i32 %v, ptr addrspace(1) %out) {
; PASS-CHECK-LABEL: define amdgpu_kernel void @ballot_i32(
; PASS-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
; PASS-CHECK-NEXT: [[C:%.*]] = trunc i32 [[V]] to i1
+; PASS-CHECK-NEXT: [[BALLOT:%.*]] = call i32 @llvm.amdgcn.ballot.i32(i1 [[C]])
+; PASS-CHECK-NEXT: [[BALLOT_NE_ZERO:%.*]] = icmp ne i32 [[BALLOT]], 0
; PASS-CHECK-NEXT: store i1 [[C]], ptr addrspace(1) [[OUT]], align 1
; PASS-CHECK-NEXT: ret void
;
@@ -623,6 +625,8 @@ define amdgpu_kernel void @ballot_i64(i32 %v, ptr addrspace(1) %out) {
; PASS-CHECK-LABEL: define amdgpu_kernel void @ballot_i64(
; PASS-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
; PASS-CHECK-NEXT: [[C:%.*]] = trunc i32 [[V]] to i1
+; PASS-CHECK-NEXT: [[BALLOT:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[C]])
+; PASS-CHECK-NEXT: [[BALLOT_NE_ZERO:%.*]] = icmp ne i64 [[BALLOT]], 0
; PASS-CHECK-NEXT: store i1 [[C]], ptr addrspace(1) [[OUT]], align 1
; PASS-CHECK-NEXT: ret void
;
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.dim.gfx90a.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.dim.gfx90a.ll
index 49607e3..83f0229 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.dim.gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.dim.gfx90a.ll
@@ -92,8 +92,7 @@ define amdgpu_ps void @atomic_swap_1d_agpr_noret(<8 x i32> inreg %rsrc, i32 %s)
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def a0
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_accvgpr_read_b32 v1, a0
-; GFX90A-NEXT: image_atomic_swap v1, v0, s[0:7] dmask:0x1 unorm glc
+; GFX90A-NEXT: image_atomic_swap a0, v0, s[0:7] dmask:0x1 unorm
; GFX90A-NEXT: s_endpgm
%data = call i32 asm "; def $0", "=a"()
%unused = call i32 @llvm.amdgcn.image.atomic.swap.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
@@ -106,8 +105,7 @@ define amdgpu_ps void @atomic_add_2d_agpr_noret(<8 x i32> inreg %rsrc, i32 %s, i
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def a0
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0
-; GFX90A-NEXT: image_atomic_add v2, v[0:1], s[0:7] dmask:0x1 unorm glc
+; GFX90A-NEXT: image_atomic_add a0, v[0:1], s[0:7] dmask:0x1 unorm
; GFX90A-NEXT: s_endpgm
%data = call i32 asm "; def $0", "=a"()
%unused = call i32 @llvm.amdgcn.image.atomic.add.2d.i32.i32(i32 %data, i32 %s, i32 %t, <8 x i32> %rsrc, i32 0, i32 0)
@@ -123,9 +121,7 @@ define amdgpu_ps void @atomic_cmpswap_1d_agpr_noret(<8 x i32> inreg %rsrc, i32 %
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def a1
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0
-; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1
-; GFX90A-NEXT: image_atomic_cmpswap v[2:3], v0, s[0:7] dmask:0x3 unorm glc
+; GFX90A-NEXT: image_atomic_cmpswap a[0:1], v0, s[0:7] dmask:0x3 unorm
; GFX90A-NEXT: s_endpgm
%cmp = call i32 asm "; def $0", "=a"()
%swap = call i32 asm "; def $0", "=a"()
@@ -139,9 +135,7 @@ define amdgpu_ps void @atomic_swap_1d_i64_agpr_noret(<8 x i32> inreg %rsrc, i32
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def a[0:1]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1
-; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0
-; GFX90A-NEXT: image_atomic_swap v[2:3], v0, s[0:7] dmask:0x3 unorm glc
+; GFX90A-NEXT: image_atomic_swap a[0:1], v0, s[0:7] dmask:0x3 unorm
; GFX90A-NEXT: s_endpgm
%data = call i64 asm "; def $0", "=a"()
%unused = call i64 @llvm.amdgcn.image.atomic.swap.1d.i64.i32(i64 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
@@ -154,14 +148,10 @@ define amdgpu_ps void @atomic_cmpswap_1d_64_agpr_noret(<8 x i32> inreg %rsrc, i3
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def a[0:1]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1
-; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0
; GFX90A-NEXT: ;;#ASMSTART
-; GFX90A-NEXT: ; def a[0:1]
+; GFX90A-NEXT: ; def a[2:3]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_accvgpr_read_b32 v5, a1
-; GFX90A-NEXT: v_accvgpr_read_b32 v4, a0
-; GFX90A-NEXT: image_atomic_cmpswap v[2:5], v0, s[0:7] dmask:0xf unorm glc
+; GFX90A-NEXT: image_atomic_cmpswap a[0:3], v0, s[0:7] dmask:0xf unorm
; GFX90A-NEXT: s_endpgm
%cmp = call i64 asm "; def $0", "=a"()
%swap = call i64 asm "; def $0", "=a"()
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.noret.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.noret.ll
new file mode 100644
index 0000000..6c58a1a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.noret.ll
@@ -0,0 +1,581 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck -check-prefixes=GFX10PLUS-GISE %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck -check-prefixes=GFX10PLUS %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12-GISE %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12 %s
+
+define amdgpu_ps void @atomic_swap_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) {
+; GFX10PLUS-GISE-LABEL: atomic_swap_1d:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_swap v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_swap_1d:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_swap v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_swap_1d:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_swap v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_swap_1d:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_swap v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.swap.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_swap_1d_i64(<8 x i32> inreg %rsrc, i64 %data, i32 %s) {
+; GFX10PLUS-GISE-LABEL: atomic_swap_1d_i64:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_swap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_swap_1d_i64:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_swap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_swap_1d_i64:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_swap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_swap_1d_i64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_swap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D
+; GFX12-NEXT: s_endpgm
+ %v = call i64 @llvm.amdgcn.image.atomic.swap.1d.i64.i32(i64 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_swap_1d_float(<8 x i32> inreg %rsrc, float %data, i32 %s) {
+; GFX10PLUS-GISE-LABEL: atomic_swap_1d_float:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_swap v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_swap_1d_float:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_swap v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_swap_1d_float:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_swap v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_swap_1d_float:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_swap v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-NEXT: s_endpgm
+ %v = call float @llvm.amdgcn.image.atomic.swap.1d.f32.i32(float %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_add_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) {
+; GFX10PLUS-GISE-LABEL: atomic_add_1d:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_add v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_add_1d:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_add v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_add_1d:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_add_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_add_1d:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_add_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.add.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_sub_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) {
+; GFX10PLUS-GISE-LABEL: atomic_sub_1d:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_sub v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_sub_1d:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_sub v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_sub_1d:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_sub_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_sub_1d:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_sub_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.sub.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_smin_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) {
+; GFX10PLUS-GISE-LABEL: atomic_smin_1d:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_smin v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_smin_1d:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_smin v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_smin_1d:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_min_int v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_smin_1d:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_min_int v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.smin.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_umin_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) {
+; GFX10PLUS-GISE-LABEL: atomic_umin_1d:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_umin v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_umin_1d:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_umin v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_umin_1d:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_min_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_umin_1d:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_min_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.umin.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_smax_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) {
+; GFX10PLUS-GISE-LABEL: atomic_smax_1d:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_smax v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_smax_1d:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_smax v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_smax_1d:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_max_int v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_smax_1d:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_max_int v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.smax.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_umax_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) {
+; GFX10PLUS-GISE-LABEL: atomic_umax_1d:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_umax v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_umax_1d:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_umax v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_umax_1d:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_max_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_umax_1d:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_max_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.umax.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_and_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) {
+; GFX10PLUS-GISE-LABEL: atomic_and_1d:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_and v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_and_1d:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_and v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_and_1d:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_and v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_and_1d:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_and v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.and.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_or_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) {
+; GFX10PLUS-GISE-LABEL: atomic_or_1d:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_or v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_or_1d:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_or v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_or_1d:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_or v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_or_1d:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_or v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.or.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_xor_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) {
+; GFX10PLUS-GISE-LABEL: atomic_xor_1d:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_xor v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_xor_1d:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_xor v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_xor_1d:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_xor v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_xor_1d:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_xor v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.xor.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_inc_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) {
+; GFX10PLUS-GISE-LABEL: atomic_inc_1d:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_inc v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_inc_1d:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_inc v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_inc_1d:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_inc_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_inc_1d:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_inc_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.inc.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_dec_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) {
+; GFX10PLUS-GISE-LABEL: atomic_dec_1d:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_dec v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_dec_1d:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_dec v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_dec_1d:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_dec_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_dec_1d:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_dec_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.dec.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_cmpswap_1d(<8 x i32> inreg %rsrc, i32 %cmp, i32 %swap, i32 %s) {
+; GFX10PLUS-GISE-LABEL: atomic_cmpswap_1d:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_cmpswap_1d:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_cmpswap_1d:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_cmpswap_1d:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.cmpswap.1d.i32.i32(i32 %cmp, i32 %swap, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_cmpswap_1d_64(<8 x i32> inreg %rsrc, i64 %cmp, i64 %swap, i32 %s) {
+; GFX10PLUS-GISE-LABEL: atomic_cmpswap_1d_64:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_cmpswap_1d_64:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_cmpswap_1d_64:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_cmpswap_1d_64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D
+; GFX12-NEXT: s_endpgm
+ %v = call i64 @llvm.amdgcn.image.atomic.cmpswap.1d.i64.i32(i64 %cmp, i64 %swap, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_add_2d(<8 x i32> inreg %rsrc, i32 %data, i32 %s, i32 %t) {
+; GFX10PLUS-GISE-LABEL: atomic_add_2d:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_add v0, v[1:2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_add_2d:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_add v0, v[1:2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_add_2d:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_add_uint v0, [v1, v2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_add_2d:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_add_uint v0, [v1, v2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.add.2d.i32.i32(i32 %data, i32 %s, i32 %t, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_add_3d(<8 x i32> inreg %rsrc, i32 %data, i32 %s, i32 %t, i32 %r) {
+; GFX10PLUS-GISE-LABEL: atomic_add_3d:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_add v0, v[1:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_3D unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_add_3d:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_add v0, v[1:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_3D unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_add_3d:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_add_uint v0, [v1, v2, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_3D
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_add_3d:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_add_uint v0, [v1, v2, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_3D
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.add.3d.i32.i32(i32 %data, i32 %s, i32 %t, i32 %r, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_add_cube(<8 x i32> inreg %rsrc, i32 %data, i32 %s, i32 %t, i32 %face) {
+; GFX10PLUS-GISE-LABEL: atomic_add_cube:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_add v0, v[1:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_CUBE unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_add_cube:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_add v0, v[1:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_CUBE unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_add_cube:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_add_uint v0, [v1, v2, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_CUBE
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_add_cube:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_add_uint v0, [v1, v2, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_CUBE
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.add.cube.i32.i32(i32 %data, i32 %s, i32 %t, i32 %face, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_add_1darray(<8 x i32> inreg %rsrc, i32 %data, i32 %s, i32 %slice) {
+; GFX10PLUS-GISE-LABEL: atomic_add_1darray:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_add v0, v[1:2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D_ARRAY unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_add_1darray:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_add v0, v[1:2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D_ARRAY unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_add_1darray:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_add_uint v0, [v1, v2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D_ARRAY
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_add_1darray:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_add_uint v0, [v1, v2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D_ARRAY
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.add.1darray.i32.i32(i32 %data, i32 %s, i32 %slice, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_add_2darray(<8 x i32> inreg %rsrc, i32 %data, i32 %s, i32 %t, i32 %slice) {
+; GFX10PLUS-GISE-LABEL: atomic_add_2darray:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_add v0, v[1:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_add_2darray:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_add v0, v[1:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_add_2darray:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_add_uint v0, [v1, v2, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_add_2darray:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_add_uint v0, [v1, v2, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.add.2darray.i32.i32(i32 %data, i32 %s, i32 %t, i32 %slice, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_add_2dmsaa(<8 x i32> inreg %rsrc, i32 %data, i32 %s, i32 %t, i32 %fragid) {
+; GFX10PLUS-GISE-LABEL: atomic_add_2dmsaa:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_add v0, v[1:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_add_2dmsaa:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_add v0, v[1:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_add_2dmsaa:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_add_uint v0, [v1, v2, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_add_2dmsaa:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_add_uint v0, [v1, v2, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.add.2dmsaa.i32.i32(i32 %data, i32 %s, i32 %t, i32 %fragid, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_add_2darraymsaa(<8 x i32> inreg %rsrc, i32 %data, i32 %s, i32 %t, i32 %slice, i32 %fragid) {
+; GFX10PLUS-GISE-LABEL: atomic_add_2darraymsaa:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_add v0, v[1:4], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_add_2darraymsaa:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_add v0, v[1:4], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_add_2darraymsaa:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_add_uint v0, [v1, v2, v3, v4], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_add_2darraymsaa:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_add_uint v0, [v1, v2, v3, v4], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.add.2darraymsaa.i32.i32(i32 %data, i32 %s, i32 %t, i32 %slice, i32 %fragid, <8 x i32> %rsrc, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @atomic_add_1d_slc(<8 x i32> inreg %rsrc, i32 %data, i32 %s) {
+; GFX10PLUS-GISE-LABEL: atomic_add_1d_slc:
+; GFX10PLUS-GISE: ; %bb.0:
+; GFX10PLUS-GISE-NEXT: image_atomic_add v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm slc
+; GFX10PLUS-GISE-NEXT: s_endpgm
+;
+; GFX10PLUS-LABEL: atomic_add_1d_slc:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: image_atomic_add v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm slc
+; GFX10PLUS-NEXT: s_endpgm
+;
+; GFX12-GISE-LABEL: atomic_add_1d_slc:
+; GFX12-GISE: ; %bb.0:
+; GFX12-GISE-NEXT: image_atomic_add_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_NT
+; GFX12-GISE-NEXT: s_endpgm
+;
+; GFX12-LABEL: atomic_add_1d_slc:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: image_atomic_add_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_NT
+; GFX12-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.image.atomic.add.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 2)
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.pk.add.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.pk.add.ll
index 3d1d6c8..0ba62e4 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.pk.add.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.pk.add.ll
@@ -41,15 +41,13 @@ main_body:
define amdgpu_ps float @atomic_pk_add_f16_1d_v2_noret(<8 x i32> inreg %rsrc, <2 x half> %data, i32 %s) {
; GFX12-SDAG-LABEL: atomic_pk_add_f16_1d_v2_noret:
; GFX12-SDAG: ; %bb.0: ; %main_body
-; GFX12-SDAG-NEXT: image_atomic_pk_add_f16 v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN
-; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: image_atomic_pk_add_f16 v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, 1.0
; GFX12-SDAG-NEXT: ; return to shader part epilog
;
; GFX12-GISEL-LABEL: atomic_pk_add_f16_1d_v2_noret:
; GFX12-GISEL: ; %bb.0: ; %main_body
-; GFX12-GISEL-NEXT: image_atomic_pk_add_f16 v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN
-; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: image_atomic_pk_add_f16 v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, 1.0
; GFX12-GISEL-NEXT: ; return to shader part epilog
main_body:
@@ -79,15 +77,13 @@ main_body:
define amdgpu_ps float @atomic_pk_add_f16_1d_v4_noret(<8 x i32> inreg %rsrc, <4 x half> %data, i32 %s) {
; GFX12-SDAG-LABEL: atomic_pk_add_f16_1d_v4_noret:
; GFX12-SDAG: ; %bb.0: ; %main_body
-; GFX12-SDAG-NEXT: image_atomic_pk_add_f16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN
-; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: image_atomic_pk_add_f16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D
; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, 1.0
; GFX12-SDAG-NEXT: ; return to shader part epilog
;
; GFX12-GISEL-LABEL: atomic_pk_add_f16_1d_v4_noret:
; GFX12-GISEL: ; %bb.0: ; %main_body
-; GFX12-GISEL-NEXT: image_atomic_pk_add_f16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN
-; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: image_atomic_pk_add_f16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D
; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, 1.0
; GFX12-GISEL-NEXT: ; return to shader part epilog
main_body:
@@ -126,15 +122,13 @@ main_body:
define amdgpu_ps float @atomic_pk_add_bf16_1d_v2_noret(<8 x i32> inreg %rsrc, <2 x bfloat> %data, i32 %s) {
; GFX12-SDAG-LABEL: atomic_pk_add_bf16_1d_v2_noret:
; GFX12-SDAG: ; %bb.0: ; %main_body
-; GFX12-SDAG-NEXT: image_atomic_pk_add_bf16 v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN
-; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: image_atomic_pk_add_bf16 v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, 1.0
; GFX12-SDAG-NEXT: ; return to shader part epilog
;
; GFX12-GISEL-LABEL: atomic_pk_add_bf16_1d_v2_noret:
; GFX12-GISEL: ; %bb.0: ; %main_body
-; GFX12-GISEL-NEXT: image_atomic_pk_add_bf16 v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN
-; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: image_atomic_pk_add_bf16 v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, 1.0
; GFX12-GISEL-NEXT: ; return to shader part epilog
main_body:
@@ -173,15 +167,13 @@ main_body:
define amdgpu_ps float @atomic_pk_add_bf16_1d_v4_noret(<8 x i32> inreg %rsrc, <4 x bfloat> %data, i32 %s) {
; GFX12-SDAG-LABEL: atomic_pk_add_bf16_1d_v4_noret:
; GFX12-SDAG: ; %bb.0: ; %main_body
-; GFX12-SDAG-NEXT: image_atomic_pk_add_bf16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN
-; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: image_atomic_pk_add_bf16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D
; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, 1.0
; GFX12-SDAG-NEXT: ; return to shader part epilog
;
; GFX12-GISEL-LABEL: atomic_pk_add_bf16_1d_v4_noret:
; GFX12-GISEL: ; %bb.0: ; %main_body
-; GFX12-GISEL-NEXT: image_atomic_pk_add_bf16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN
-; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: image_atomic_pk_add_bf16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D
; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, 1.0
; GFX12-GISEL-NEXT: ; return to shader part epilog
main_body:
@@ -192,15 +184,13 @@ main_body:
define amdgpu_ps float @atomic_pk_add_bf16_1d_v4_nt(<8 x i32> inreg %rsrc, <4 x bfloat> %data, i32 %s) {
; GFX12-SDAG-LABEL: atomic_pk_add_bf16_1d_v4_nt:
; GFX12-SDAG: ; %bb.0: ; %main_body
-; GFX12-SDAG-NEXT: image_atomic_pk_add_bf16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_NT_RETURN
-; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: image_atomic_pk_add_bf16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_NT
; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, 1.0
; GFX12-SDAG-NEXT: ; return to shader part epilog
;
; GFX12-GISEL-LABEL: atomic_pk_add_bf16_1d_v4_nt:
; GFX12-GISEL: ; %bb.0: ; %main_body
-; GFX12-GISEL-NEXT: image_atomic_pk_add_bf16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_NT_RETURN
-; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: image_atomic_pk_add_bf16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_NT
; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, 1.0
; GFX12-GISEL-NEXT: ; return to shader part epilog
main_body:
diff --git a/llvm/test/CodeGen/DirectX/ShaderFlags/wave-ops.ll b/llvm/test/CodeGen/DirectX/ShaderFlags/wave-ops.ll
index 7a876f6..3544017 100644
--- a/llvm/test/CodeGen/DirectX/ShaderFlags/wave-ops.ll
+++ b/llvm/test/CodeGen/DirectX/ShaderFlags/wave-ops.ll
@@ -76,6 +76,20 @@ entry:
ret i32 %ret
}
+define noundef i32 @wave_reduce_min(i32 noundef %x) {
+entry:
+ ; CHECK: Function wave_reduce_min : [[WAVE_FLAG]]
+ %ret = call i32 @llvm.dx.wave.reduce.min.i32(i32 %x)
+ ret i32 %ret
+}
+
+define noundef i32 @wave_reduce_umin(i32 noundef %x) {
+entry:
+ ; CHECK: Function wave_reduce_umin : [[WAVE_FLAG]]
+ %ret = call i32 @llvm.dx.wave.reduce.umin.i32(i32 %x)
+ ret i32 %ret
+}
+
define void @wave_active_countbits(i1 %expr) {
entry:
; CHECK: Function wave_active_countbits : [[WAVE_FLAG]]
diff --git a/llvm/test/CodeGen/DirectX/WaveActiveMin.ll b/llvm/test/CodeGen/DirectX/WaveActiveMin.ll
new file mode 100644
index 0000000..24fde48
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/WaveActiveMin.ll
@@ -0,0 +1,143 @@
+; RUN: opt -S -scalarizer -dxil-op-lower -mtriple=dxil-pc-shadermodel6.3-library < %s | FileCheck %s
+
+; Test that for scalar values, WaveActiveMin maps down to the DirectX op
+
+define noundef half @wave_active_min_half(half noundef %expr) {
+entry:
+; CHECK: call half @dx.op.waveActiveOp.f16(i32 119, half %expr, i8 2, i8 0){{$}}
+ %ret = call half @llvm.dx.wave.reduce.min.f16(half %expr)
+ ret half %ret
+}
+
+define noundef float @wave_active_min_float(float noundef %expr) {
+entry:
+; CHECK: call float @dx.op.waveActiveOp.f32(i32 119, float %expr, i8 2, i8 0){{$}}
+ %ret = call float @llvm.dx.wave.reduce.min.f32(float %expr)
+ ret float %ret
+}
+
+define noundef double @wave_active_min_double(double noundef %expr) {
+entry:
+; CHECK: call double @dx.op.waveActiveOp.f64(i32 119, double %expr, i8 2, i8 0){{$}}
+ %ret = call double @llvm.dx.wave.reduce.min.f64(double %expr)
+ ret double %ret
+}
+
+define noundef i16 @wave_active_min_i16(i16 noundef %expr) {
+entry:
+; CHECK: call i16 @dx.op.waveActiveOp.i16(i32 119, i16 %expr, i8 2, i8 0){{$}}
+ %ret = call i16 @llvm.dx.wave.reduce.min.i16(i16 %expr)
+ ret i16 %ret
+}
+
+define noundef i32 @wave_active_min_i32(i32 noundef %expr) {
+entry:
+; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr, i8 2, i8 0){{$}}
+ %ret = call i32 @llvm.dx.wave.reduce.min.i32(i32 %expr)
+ ret i32 %ret
+}
+
+define noundef i64 @wave_active_min_i64(i64 noundef %expr) {
+entry:
+; CHECK: call i64 @dx.op.waveActiveOp.i64(i32 119, i64 %expr, i8 2, i8 0){{$}}
+ %ret = call i64 @llvm.dx.wave.reduce.min.i64(i64 %expr)
+ ret i64 %ret
+}
+
+define noundef i16 @wave_active_umin_i16(i16 noundef %expr) {
+entry:
+; CHECK: call i16 @dx.op.waveActiveOp.i16(i32 119, i16 %expr, i8 2, i8 1){{$}}
+ %ret = call i16 @llvm.dx.wave.reduce.umin.i16(i16 %expr)
+ ret i16 %ret
+}
+
+define noundef i32 @wave_active_umin_i32(i32 noundef %expr) {
+entry:
+; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr, i8 2, i8 1){{$}}
+ %ret = call i32 @llvm.dx.wave.reduce.umin.i32(i32 %expr)
+ ret i32 %ret
+}
+
+define noundef i64 @wave_active_umin_i64(i64 noundef %expr) {
+entry:
+; CHECK: call i64 @dx.op.waveActiveOp.i64(i32 119, i64 %expr, i8 2, i8 1){{$}}
+ %ret = call i64 @llvm.dx.wave.reduce.umin.i64(i64 %expr)
+ ret i64 %ret
+}
+
+declare half @llvm.dx.wave.reduce.min.f16(half)
+declare float @llvm.dx.wave.reduce.min.f32(float)
+declare double @llvm.dx.wave.reduce.min.f64(double)
+
+declare i16 @llvm.dx.wave.reduce.min.i16(i16)
+declare i32 @llvm.dx.wave.reduce.min.i32(i32)
+declare i64 @llvm.dx.wave.reduce.min.i64(i64)
+
+declare i16 @llvm.dx.wave.reduce.umin.i16(i16)
+declare i32 @llvm.dx.wave.reduce.umin.i32(i32)
+declare i64 @llvm.dx.wave.reduce.umin.i64(i64)
+
+; Test that for vector values, WaveActiveMin scalarizes and maps down to the
+; DirectX op
+
+define noundef <2 x half> @wave_active_min_v2half(<2 x half> noundef %expr) {
+entry:
+; CHECK: call half @dx.op.waveActiveOp.f16(i32 119, half %expr.i0, i8 2, i8 0){{$}}
+; CHECK: call half @dx.op.waveActiveOp.f16(i32 119, half %expr.i1, i8 2, i8 0){{$}}
+ %ret = call <2 x half> @llvm.dx.wave.reduce.min.v2f16(<2 x half> %expr)
+ ret <2 x half> %ret
+}
+
+define noundef <3 x i32> @wave_active_min_v3i32(<3 x i32> noundef %expr) {
+entry:
+; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr.i0, i8 2, i8 0){{$}}
+; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr.i1, i8 2, i8 0){{$}}
+; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr.i2, i8 2, i8 0){{$}}
+ %ret = call <3 x i32> @llvm.dx.wave.reduce.min.v3i32(<3 x i32> %expr)
+ ret <3 x i32> %ret
+}
+
+define noundef <4 x double> @wave_active_min_v4f64(<4 x double> noundef %expr) {
+entry:
+; CHECK: call double @dx.op.waveActiveOp.f64(i32 119, double %expr.i0, i8 2, i8 0){{$}}
+; CHECK: call double @dx.op.waveActiveOp.f64(i32 119, double %expr.i1, i8 2, i8 0){{$}}
+; CHECK: call double @dx.op.waveActiveOp.f64(i32 119, double %expr.i2, i8 2, i8 0){{$}}
+; CHECK: call double @dx.op.waveActiveOp.f64(i32 119, double %expr.i3, i8 2, i8 0){{$}}
+ %ret = call <4 x double> @llvm.dx.wave.reduce.min.v4f64(<4 x double> %expr)
+ ret <4 x double> %ret
+}
+
+declare <2 x half> @llvm.dx.wave.reduce.min.v2f16(<2 x half>)
+declare <3 x i32> @llvm.dx.wave.reduce.min.v3i32(<3 x i32>)
+declare <4 x double> @llvm.dx.wave.reduce.min.v4f64(<4 x double>)
+
+define noundef <2 x i16> @wave_active_umin_v2i16(<2 x i16> noundef %expr) {
+entry:
+; CHECK: call i16 @dx.op.waveActiveOp.i16(i32 119, i16 %expr.i0, i8 2, i8 1){{$}}
+; CHECK: call i16 @dx.op.waveActiveOp.i16(i32 119, i16 %expr.i1, i8 2, i8 1){{$}}
+ %ret = call <2 x i16> @llvm.dx.wave.reduce.umin.v2f16(<2 x i16> %expr)
+ ret <2 x i16> %ret
+}
+
+define noundef <3 x i32> @wave_active_umin_v3i32(<3 x i32> noundef %expr) {
+entry:
+; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr.i0, i8 2, i8 1){{$}}
+; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr.i1, i8 2, i8 1){{$}}
+; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr.i2, i8 2, i8 1){{$}}
+ %ret = call <3 x i32> @llvm.dx.wave.reduce.umin.v3i32(<3 x i32> %expr)
+ ret <3 x i32> %ret
+}
+
+define noundef <4 x i64> @wave_active_umin_v4f64(<4 x i64> noundef %expr) {
+entry:
+; CHECK: call i64 @dx.op.waveActiveOp.i64(i32 119, i64 %expr.i0, i8 2, i8 1){{$}}
+; CHECK: call i64 @dx.op.waveActiveOp.i64(i32 119, i64 %expr.i1, i8 2, i8 1){{$}}
+; CHECK: call i64 @dx.op.waveActiveOp.i64(i32 119, i64 %expr.i2, i8 2, i8 1){{$}}
+; CHECK: call i64 @dx.op.waveActiveOp.i64(i32 119, i64 %expr.i3, i8 2, i8 1){{$}}
+ %ret = call <4 x i64> @llvm.dx.wave.reduce.umin.v4f64(<4 x i64> %expr)
+ ret <4 x i64> %ret
+}
+
+declare <2 x i16> @llvm.dx.wave.reduce.umin.v2f16(<2 x i16>)
+declare <3 x i32> @llvm.dx.wave.reduce.umin.v3i32(<3 x i32>)
+declare <4 x i64> @llvm.dx.wave.reduce.umin.v4f64(<4 x i64>)
diff --git a/llvm/test/CodeGen/Hexagon/inst_masked_store_bug1.ll b/llvm/test/CodeGen/Hexagon/inst_masked_store_bug1.ll
new file mode 100644
index 0000000..fcf1246
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/inst_masked_store_bug1.ll
@@ -0,0 +1,94 @@
+;; REQUIRES: asserts
+;; RUN: llc --mtriple=hexagon -mattr=+hvxv79,+hvx-length128b %s -o - | FileCheck %s
+;; Sanity check for lowering masked scatter without assertion errors.
+
+define void @outer_product(ptr %aptr, ptr %bptr, ptr %cptr, i32 %T, i32 %W) {
+entry:
+ %W.ripple.bcast.splatinsert = insertelement <8 x i32> poison, i32 %W, i64 0
+ %W.ripple.bcast.splat = shufflevector <8 x i32> %W.ripple.bcast.splatinsert, <8 x i32> poison, <8 x i32> zeroinitializer
+ %div1194 = lshr i32 %T, 3
+ %cmp84.not = icmp ult i32 %T, 8
+ br i1 %cmp84.not, label %for.end49, label %for.body.preheader
+
+for.body.preheader: ; preds = %entry
+ %div10195 = lshr i32 %W, 3
+ %cmp1782.not = icmp ult i32 %W, 8
+ %arrayidx27.ripple.LS.dim.slope = mul <8 x i32> %W.ripple.bcast.splat, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %arrayidx27.ripple.LS.dim.slope.ripple.bcast = shufflevector <8 x i32> %arrayidx27.ripple.LS.dim.slope, <8 x i32> poison, <64 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ %arrayidx27.ripple.LS.slope = add <64 x i32> %arrayidx27.ripple.LS.dim.slope.ripple.bcast, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %invariant.gep196 = getelementptr i8, ptr %cptr, <64 x i32> %arrayidx27.ripple.LS.slope
+ br label %for.body
+
+for.body: ; preds = %for.end, %for.body.preheader
+ %ripple.par.iv.085 = phi i32 [ %add48, %for.end ], [ 0, %for.body.preheader ]
+ %mul2 = shl i32 %ripple.par.iv.085, 3
+ br i1 %cmp1782.not, label %for.end, label %for.body18.lr.ph
+
+for.body18.lr.ph: ; preds = %for.body
+ %arrayidx = getelementptr inbounds nuw i8, ptr %aptr, i32 %mul2
+ %mul25 = mul i32 %mul2, %W
+ %gep197 = getelementptr i8, <64 x ptr> %invariant.gep196, i32 %mul25
+ br label %for.body18
+
+for.body18: ; preds = %for.body18, %for.body18.lr.ph
+ %ripple.par.iv15.083 = phi i32 [ 0, %for.body18.lr.ph ], [ %add28, %for.body18 ]
+ %mul19 = shl i32 %ripple.par.iv15.083, 3
+ %.ripple.LS.instance184 = load <8 x i8>, ptr %arrayidx, align 1
+ %.ripple.LS.instance184.ripple.bcast = shufflevector <8 x i8> %.ripple.LS.instance184, <8 x i8> poison, <64 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ %arrayidx21 = getelementptr inbounds nuw i8, ptr %bptr, i32 %mul19
+ %.ripple.LS.instance = load <8 x i8>, ptr %arrayidx21, align 1
+ %.ripple.LS.instance.ripple.bcast = shufflevector <8 x i8> %.ripple.LS.instance, <8 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %mul23.ripple.LS.instance = mul <64 x i8> %.ripple.LS.instance.ripple.bcast, %.ripple.LS.instance184.ripple.bcast
+ %gep = getelementptr i8, <64 x ptr> %gep197, i32 %mul19
+ tail call void @llvm.masked.scatter.v64i8.v64p0(<64 x i8> %mul23.ripple.LS.instance, <64 x ptr> %gep, i32 1, <64 x i1> splat (i1 true))
+ %add28 = add nuw i32 %ripple.par.iv15.083, 1
+ %cmp17 = icmp ult i32 %add28, %div10195
+ br i1 %cmp17, label %for.body18, label %for.end.loopexit
+
+for.end.loopexit: ; preds = %for.body18
+ %0 = shl i32 %add28, 3
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %for.body
+ %ripple.par.iv15.0.lcssa = phi i32 [ 0, %for.body ], [ %0, %for.end.loopexit ]
+ %add30.ripple.bcast.splatinsert = insertelement <8 x i32> poison, i32 %ripple.par.iv15.0.lcssa, i64 0
+ %add30.ripple.bcast.splat = shufflevector <8 x i32> %add30.ripple.bcast.splatinsert, <8 x i32> poison, <8 x i32> zeroinitializer
+ %add30.ripple.LS.instance = or disjoint <8 x i32> %add30.ripple.bcast.splat, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %cmp32.ripple.LS.instance = icmp ne i32 %ripple.par.iv15.0.lcssa, %W
+ %cmp32.ripple.LS.instance.ripple.bcast.splatinsert = insertelement <8 x i1> poison, i1 %cmp32.ripple.LS.instance, i64 0
+ %cmp32.ripple.LS.instance.ripple.bcast.splat = shufflevector <8 x i1> %cmp32.ripple.LS.instance.ripple.bcast.splatinsert, <8 x i1> poison, <8 x i32> zeroinitializer
+ %cmp33.ripple.vectorized = icmp ult <8 x i32> %add30.ripple.LS.instance, %W.ripple.bcast.splat
+ %or.cond.ripple.LS.instance = select <8 x i1> %cmp32.ripple.LS.instance.ripple.bcast.splat, <8 x i1> %cmp33.ripple.vectorized, <8 x i1> zeroinitializer
+ %or.cond.ripple.LS.instance.ripple.bcast = shufflevector <8 x i1> %or.cond.ripple.LS.instance, <8 x i1> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %or.cond.ripple.LS.instance.ripple.reducelog2.shuffle = shufflevector <8 x i1> %or.cond.ripple.LS.instance, <8 x i1> <i1 poison, i1 poison, i1 poison, i1 poison, i1 poison, i1 poison, i1 poison, i1 false>, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 15>
+ %or.cond.ripple.LS.instance.ripple.reducelog2.operator = or <8 x i1> %or.cond.ripple.LS.instance, %or.cond.ripple.LS.instance.ripple.reducelog2.shuffle
+ %or.cond.ripple.LS.instance.ripple.reducelog2.shuffle189 = shufflevector <8 x i1> %or.cond.ripple.LS.instance.ripple.reducelog2.operator, <8 x i1> <i1 poison, i1 poison, i1 poison, i1 poison, i1 poison, i1 poison, i1 false, i1 false>, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 14, i32 15>
+ %or.cond.ripple.LS.instance.ripple.reducelog2.operator190 = or <8 x i1> %or.cond.ripple.LS.instance.ripple.reducelog2.operator, %or.cond.ripple.LS.instance.ripple.reducelog2.shuffle189
+ %or.cond.ripple.LS.instance.ripple.reducelog2.shuffle191 = shufflevector <8 x i1> %or.cond.ripple.LS.instance.ripple.reducelog2.operator190, <8 x i1> poison, <8 x i32> <i32 4, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %or.cond.ripple.LS.instance.ripple.reducelog2.operator192 = or <8 x i1> %or.cond.ripple.LS.instance.ripple.reducelog2.operator190, %or.cond.ripple.LS.instance.ripple.reducelog2.shuffle191
+ %ripple.red.extract.ripple.bcast.splat = shufflevector <8 x i1> %or.cond.ripple.LS.instance.ripple.reducelog2.operator192, <8 x i1> poison, <8 x i32> zeroinitializer
+ %arrayidx34.ripple.branch.clone = getelementptr inbounds nuw i8, ptr %aptr, i32 %mul2
+ %.ripple.LS.instance188.ripple.branch.clone.ripple.masked.load = tail call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %arrayidx34.ripple.branch.clone, i32 1, <8 x i1> %ripple.red.extract.ripple.bcast.splat, <8 x i8> poison)
+ %.ripple.LS.instance188.ripple.bcast.ripple.branch.clone = shufflevector <8 x i8> %.ripple.LS.instance188.ripple.branch.clone.ripple.masked.load, <8 x i8> poison, <64 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ %arrayidx36.ripple.branch.clone = getelementptr inbounds nuw i8, ptr %bptr, i32 %ripple.par.iv15.0.lcssa
+ %.ripple.LS.instance187.ripple.branch.clone.ripple.masked.load = tail call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %arrayidx36.ripple.branch.clone, i32 1, <8 x i1> %or.cond.ripple.LS.instance, <8 x i8> poison)
+ %.ripple.LS.instance187.ripple.bcast.ripple.branch.clone = shufflevector <8 x i8> %.ripple.LS.instance187.ripple.branch.clone.ripple.masked.load, <8 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %mul38.ripple.LS.instance.ripple.branch.clone = mul <64 x i8> %.ripple.LS.instance187.ripple.bcast.ripple.branch.clone, %.ripple.LS.instance188.ripple.bcast.ripple.branch.clone
+ %mul40.ripple.branch.clone = mul i32 %mul2, %W
+ %1 = getelementptr i8, ptr %cptr, i32 %mul40.ripple.branch.clone
+ %arrayidx42.ripple.branch.clone = getelementptr i8, ptr %1, i32 %ripple.par.iv15.0.lcssa
+ %arrayidx42.ripple.LS.instance.ripple.branch.clone = getelementptr i8, ptr %arrayidx42.ripple.branch.clone, <64 x i32> %arrayidx27.ripple.LS.slope
+ tail call void @llvm.masked.scatter.v64i8.v64p0(<64 x i8> %mul38.ripple.LS.instance.ripple.branch.clone, <64 x ptr> %arrayidx42.ripple.LS.instance.ripple.branch.clone, i32 1, <64 x i1> %or.cond.ripple.LS.instance.ripple.bcast)
+ %add48 = add nuw i32 %ripple.par.iv.085, 1
+ %cmp = icmp ult i32 %add48, %div1194
+ br i1 %cmp, label %for.body, label %for.end49
+
+for.end49: ; preds = %for.end, %entry
+ ret void
+}
+
+;; CHECK: outer_product
+;; CHECK: {{r[0-9]+}} = lsr({{r[0-9]+}},#3)
+;; CHECK: {{q[0-9]+}} = vand({{v[0-9]+}},{{r[0-9]+}})
+;; CHECK: {{v[0-9]+}} = vmux(q0,{{v[0-9]+}},{{v[0-9]+}})
+;; CHECK: vmem{{.*}} = {{v[0-9]+}}
diff --git a/llvm/test/CodeGen/LoongArch/lasx/fp-max-min.ll b/llvm/test/CodeGen/LoongArch/lasx/fp-max-min.ll
new file mode 100644
index 0000000..48ec98c
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/fp-max-min.ll
@@ -0,0 +1,160 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+define void @minnum_v8f32(ptr %res, ptr %x, ptr %y) nounwind {
+; CHECK-LABEL: minnum_v8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a2, 0
+; CHECK-NEXT: xvld $xr1, $a1, 0
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 5
+; CHECK-NEXT: xvpickve.w $xr3, $xr1, 5
+; CHECK-NEXT: fmin.s $fa2, $fa3, $fa2
+; CHECK-NEXT: xvpickve.w $xr3, $xr0, 4
+; CHECK-NEXT: xvpickve.w $xr4, $xr1, 4
+; CHECK-NEXT: fmin.s $fa3, $fa4, $fa3
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 16
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 6
+; CHECK-NEXT: xvpickve.w $xr4, $xr1, 6
+; CHECK-NEXT: fmin.s $fa2, $fa4, $fa2
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 32
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 7
+; CHECK-NEXT: xvpickve.w $xr4, $xr1, 7
+; CHECK-NEXT: fmin.s $fa2, $fa4, $fa2
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 48
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 1
+; CHECK-NEXT: xvpickve.w $xr4, $xr1, 1
+; CHECK-NEXT: fmin.s $fa2, $fa4, $fa2
+; CHECK-NEXT: xvpickve.w $xr4, $xr0, 0
+; CHECK-NEXT: xvpickve.w $xr5, $xr1, 0
+; CHECK-NEXT: fmin.s $fa4, $fa5, $fa4
+; CHECK-NEXT: vextrins.w $vr4, $vr2, 16
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 2
+; CHECK-NEXT: xvpickve.w $xr5, $xr1, 2
+; CHECK-NEXT: fmin.s $fa2, $fa5, $fa2
+; CHECK-NEXT: vextrins.w $vr4, $vr2, 32
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 3
+; CHECK-NEXT: xvpickve.w $xr1, $xr1, 3
+; CHECK-NEXT: fmin.s $fa0, $fa1, $fa0
+; CHECK-NEXT: vextrins.w $vr4, $vr0, 48
+; CHECK-NEXT: xvpermi.q $xr4, $xr3, 2
+; CHECK-NEXT: xvst $xr4, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %v0 = load <8 x float>, ptr %x
+ %v1 = load <8 x float>, ptr %y
+ %r = call <8 x float> @llvm.minnum.v8f32(<8 x float> %v0, <8 x float> %v1)
+ store <8 x float> %r, ptr %res
+ ret void
+}
+
+define void @minnum_v4f64(ptr %res, ptr %x, ptr %y) nounwind {
+; CHECK-LABEL: minnum_v4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a2, 0
+; CHECK-NEXT: xvld $xr1, $a1, 0
+; CHECK-NEXT: xvpickve.d $xr2, $xr0, 3
+; CHECK-NEXT: xvpickve.d $xr3, $xr1, 3
+; CHECK-NEXT: fmin.d $fa2, $fa3, $fa2
+; CHECK-NEXT: xvpickve.d $xr3, $xr0, 2
+; CHECK-NEXT: xvpickve.d $xr4, $xr1, 2
+; CHECK-NEXT: fmin.d $fa3, $fa4, $fa3
+; CHECK-NEXT: vextrins.d $vr3, $vr2, 16
+; CHECK-NEXT: xvpickve.d $xr2, $xr0, 1
+; CHECK-NEXT: xvpickve.d $xr4, $xr1, 1
+; CHECK-NEXT: fmin.d $fa2, $fa4, $fa2
+; CHECK-NEXT: xvpickve.d $xr0, $xr0, 0
+; CHECK-NEXT: xvpickve.d $xr1, $xr1, 0
+; CHECK-NEXT: fmin.d $fa0, $fa1, $fa0
+; CHECK-NEXT: vextrins.d $vr0, $vr2, 16
+; CHECK-NEXT: xvpermi.q $xr0, $xr3, 2
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %v0 = load <4 x double>, ptr %x
+ %v1 = load <4 x double>, ptr %y
+ %r = call <4 x double> @llvm.minnum.v4f64(<4 x double> %v0, <4 x double> %v1)
+ store <4 x double> %r, ptr %res
+ ret void
+}
+
+define void @maxnum_v8f32(ptr %res, ptr %x, ptr %y) nounwind {
+; CHECK-LABEL: maxnum_v8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a2, 0
+; CHECK-NEXT: xvld $xr1, $a1, 0
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 5
+; CHECK-NEXT: xvpickve.w $xr3, $xr1, 5
+; CHECK-NEXT: fmax.s $fa2, $fa3, $fa2
+; CHECK-NEXT: xvpickve.w $xr3, $xr0, 4
+; CHECK-NEXT: xvpickve.w $xr4, $xr1, 4
+; CHECK-NEXT: fmax.s $fa3, $fa4, $fa3
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 16
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 6
+; CHECK-NEXT: xvpickve.w $xr4, $xr1, 6
+; CHECK-NEXT: fmax.s $fa2, $fa4, $fa2
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 32
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 7
+; CHECK-NEXT: xvpickve.w $xr4, $xr1, 7
+; CHECK-NEXT: fmax.s $fa2, $fa4, $fa2
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 48
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 1
+; CHECK-NEXT: xvpickve.w $xr4, $xr1, 1
+; CHECK-NEXT: fmax.s $fa2, $fa4, $fa2
+; CHECK-NEXT: xvpickve.w $xr4, $xr0, 0
+; CHECK-NEXT: xvpickve.w $xr5, $xr1, 0
+; CHECK-NEXT: fmax.s $fa4, $fa5, $fa4
+; CHECK-NEXT: vextrins.w $vr4, $vr2, 16
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 2
+; CHECK-NEXT: xvpickve.w $xr5, $xr1, 2
+; CHECK-NEXT: fmax.s $fa2, $fa5, $fa2
+; CHECK-NEXT: vextrins.w $vr4, $vr2, 32
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 3
+; CHECK-NEXT: xvpickve.w $xr1, $xr1, 3
+; CHECK-NEXT: fmax.s $fa0, $fa1, $fa0
+; CHECK-NEXT: vextrins.w $vr4, $vr0, 48
+; CHECK-NEXT: xvpermi.q $xr4, $xr3, 2
+; CHECK-NEXT: xvst $xr4, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %v0 = load <8 x float>, ptr %x
+ %v1 = load <8 x float>, ptr %y
+ %r = call <8 x float> @llvm.maxnum.v8f32(<8 x float> %v0, <8 x float> %v1)
+ store <8 x float> %r, ptr %res
+ ret void
+}
+
+define void @maxnum_v4f64(ptr %res, ptr %x, ptr %y) nounwind {
+; CHECK-LABEL: maxnum_v4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a2, 0
+; CHECK-NEXT: xvld $xr1, $a1, 0
+; CHECK-NEXT: xvpickve.d $xr2, $xr0, 3
+; CHECK-NEXT: xvpickve.d $xr3, $xr1, 3
+; CHECK-NEXT: fmax.d $fa2, $fa3, $fa2
+; CHECK-NEXT: xvpickve.d $xr3, $xr0, 2
+; CHECK-NEXT: xvpickve.d $xr4, $xr1, 2
+; CHECK-NEXT: fmax.d $fa3, $fa4, $fa3
+; CHECK-NEXT: vextrins.d $vr3, $vr2, 16
+; CHECK-NEXT: xvpickve.d $xr2, $xr0, 1
+; CHECK-NEXT: xvpickve.d $xr4, $xr1, 1
+; CHECK-NEXT: fmax.d $fa2, $fa4, $fa2
+; CHECK-NEXT: xvpickve.d $xr0, $xr0, 0
+; CHECK-NEXT: xvpickve.d $xr1, $xr1, 0
+; CHECK-NEXT: fmax.d $fa0, $fa1, $fa0
+; CHECK-NEXT: vextrins.d $vr0, $vr2, 16
+; CHECK-NEXT: xvpermi.q $xr0, $xr3, 2
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %v0 = load <4 x double>, ptr %x
+ %v1 = load <4 x double>, ptr %y
+ %r = call <4 x double> @llvm.maxnum.v4f64(<4 x double> %v0, <4 x double> %v1)
+ store <4 x double> %r, ptr %res
+ ret void
+}
+
+declare <8 x float> @llvm.minnum.v8f32(<8 x float>, <8 x float>)
+declare <4 x double> @llvm.minnum.v4f64(<4 x double>, <4 x double>)
+declare <8 x float> @llvm.maxnum.v8f32(<8 x float>, <8 x float>)
+declare <4 x double> @llvm.maxnum.v4f64(<4 x double>, <4 x double>)
diff --git a/llvm/test/CodeGen/LoongArch/lsx/fp-max-min.ll b/llvm/test/CodeGen/LoongArch/lsx/fp-max-min.ll
new file mode 100644
index 0000000..27ecb75
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/fp-max-min.ll
@@ -0,0 +1,112 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @minnum_v4f32(ptr %res, ptr %x, ptr %y) nounwind {
+; CHECK-LABEL: minnum_v4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a2, 0
+; CHECK-NEXT: vld $vr1, $a1, 0
+; CHECK-NEXT: vreplvei.w $vr2, $vr0, 1
+; CHECK-NEXT: vreplvei.w $vr3, $vr1, 1
+; CHECK-NEXT: fmin.s $fa2, $fa3, $fa2
+; CHECK-NEXT: vreplvei.w $vr3, $vr0, 0
+; CHECK-NEXT: vreplvei.w $vr4, $vr1, 0
+; CHECK-NEXT: fmin.s $fa3, $fa4, $fa3
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 16
+; CHECK-NEXT: vreplvei.w $vr2, $vr0, 2
+; CHECK-NEXT: vreplvei.w $vr4, $vr1, 2
+; CHECK-NEXT: fmin.s $fa2, $fa4, $fa2
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 32
+; CHECK-NEXT: vreplvei.w $vr0, $vr0, 3
+; CHECK-NEXT: vreplvei.w $vr1, $vr1, 3
+; CHECK-NEXT: fmin.s $fa0, $fa1, $fa0
+; CHECK-NEXT: vextrins.w $vr3, $vr0, 48
+; CHECK-NEXT: vst $vr3, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %v0 = load <4 x float>, ptr %x
+ %v1 = load <4 x float>, ptr %y
+ %r = call <4 x float> @llvm.minnum.v4f32(<4 x float> %v0, <4 x float> %v1)
+ store <4 x float> %r, ptr %res
+ ret void
+}
+
+define void @minnum_v2f64(ptr %res, ptr %x, ptr %y) nounwind {
+; CHECK-LABEL: minnum_v2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a2, 0
+; CHECK-NEXT: vld $vr1, $a1, 0
+; CHECK-NEXT: vreplvei.d $vr2, $vr0, 1
+; CHECK-NEXT: vreplvei.d $vr3, $vr1, 1
+; CHECK-NEXT: fmin.d $fa2, $fa3, $fa2
+; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT: vreplvei.d $vr1, $vr1, 0
+; CHECK-NEXT: fmin.d $fa0, $fa1, $fa0
+; CHECK-NEXT: vextrins.d $vr0, $vr2, 16
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %v0 = load <2 x double>, ptr %x
+ %v1 = load <2 x double>, ptr %y
+ %r = call <2 x double> @llvm.minnum.v2f64(<2 x double> %v0, <2 x double> %v1)
+ store <2 x double> %r, ptr %res
+ ret void
+}
+
+define void @maxnum_v4f32(ptr %res, ptr %x, ptr %y) nounwind {
+; CHECK-LABEL: maxnum_v4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a2, 0
+; CHECK-NEXT: vld $vr1, $a1, 0
+; CHECK-NEXT: vreplvei.w $vr2, $vr0, 1
+; CHECK-NEXT: vreplvei.w $vr3, $vr1, 1
+; CHECK-NEXT: fmax.s $fa2, $fa3, $fa2
+; CHECK-NEXT: vreplvei.w $vr3, $vr0, 0
+; CHECK-NEXT: vreplvei.w $vr4, $vr1, 0
+; CHECK-NEXT: fmax.s $fa3, $fa4, $fa3
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 16
+; CHECK-NEXT: vreplvei.w $vr2, $vr0, 2
+; CHECK-NEXT: vreplvei.w $vr4, $vr1, 2
+; CHECK-NEXT: fmax.s $fa2, $fa4, $fa2
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 32
+; CHECK-NEXT: vreplvei.w $vr0, $vr0, 3
+; CHECK-NEXT: vreplvei.w $vr1, $vr1, 3
+; CHECK-NEXT: fmax.s $fa0, $fa1, $fa0
+; CHECK-NEXT: vextrins.w $vr3, $vr0, 48
+; CHECK-NEXT: vst $vr3, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %v0 = load <4 x float>, ptr %x
+ %v1 = load <4 x float>, ptr %y
+ %r = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %v0, <4 x float> %v1)
+ store <4 x float> %r, ptr %res
+ ret void
+}
+
+define void @maxnum_v2f64(ptr %res, ptr %x, ptr %y) nounwind {
+; CHECK-LABEL: maxnum_v2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a2, 0
+; CHECK-NEXT: vld $vr1, $a1, 0
+; CHECK-NEXT: vreplvei.d $vr2, $vr0, 1
+; CHECK-NEXT: vreplvei.d $vr3, $vr1, 1
+; CHECK-NEXT: fmax.d $fa2, $fa3, $fa2
+; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT: vreplvei.d $vr1, $vr1, 0
+; CHECK-NEXT: fmax.d $fa0, $fa1, $fa0
+; CHECK-NEXT: vextrins.d $vr0, $vr2, 16
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %v0 = load <2 x double>, ptr %x
+ %v1 = load <2 x double>, ptr %y
+ %r = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %v0, <2 x double> %v1)
+ store <2 x double> %r, ptr %res
+ ret void
+}
+
+declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.minnum.v2f64(<2 x double>, <2 x double>)
+declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.maxnum.v2f64(<2 x double>, <2 x double>)
diff --git a/llvm/test/CodeGen/PowerPC/bittest.ll b/llvm/test/CodeGen/PowerPC/bittest.ll
new file mode 100644
index 0000000..cba56e3
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/bittest.ll
@@ -0,0 +1,193 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -verify-machineinstrs < %s -O3 -mcpu=ppc -mtriple powerpc-ibm-aix \
+; RUN: -ppc-asm-full-reg-names | FileCheck %s
+
+define i32 @foo(i32 noundef signext %x) {
+; CHECK-LABEL: foo:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: mflr r0
+; CHECK-NEXT: stwu r1, -64(r1)
+; CHECK-NEXT: stw r0, 72(r1)
+; CHECK-NEXT: cmpwi r3, 8
+; CHECK-NEXT: stw r31, 60(r1) # 4-byte Folded Spill
+; CHECK-NEXT: mr r31, r3
+; CHECK-NEXT: li r3, 0
+; CHECK-NEXT: ble cr0, L..BB0_4
+; CHECK-NEXT: # %bb.1: # %entry
+; CHECK-NEXT: cmpwi r31, 11
+; CHECK-NEXT: bge cr0, L..BB0_7
+; CHECK-NEXT: # %bb.2: # %entry
+; CHECK-NEXT: cmplwi r31, 9
+; CHECK-NEXT: beq cr0, L..BB0_9
+; CHECK-NEXT: # %bb.3: # %entry
+; CHECK-NEXT: cmplwi r31, 10
+; CHECK-NEXT: beq cr0, L..BB0_11
+; CHECK-NEXT: b L..BB0_13
+; CHECK-NEXT: L..BB0_4: # %entry
+; CHECK-NEXT: cmplwi r31, 4
+; CHECK-NEXT: beq cr0, L..BB0_12
+; CHECK-NEXT: # %bb.5: # %entry
+; CHECK-NEXT: cmplwi r31, 7
+; CHECK-NEXT: beq cr0, L..BB0_11
+; CHECK-NEXT: # %bb.6: # %entry
+; CHECK-NEXT: cmplwi r31, 8
+; CHECK-NEXT: beq cr0, L..BB0_10
+; CHECK-NEXT: b L..BB0_13
+; CHECK-NEXT: L..BB0_7: # %entry
+; CHECK-NEXT: beq cr0, L..BB0_10
+; CHECK-NEXT: # %bb.8: # %entry
+; CHECK-NEXT: cmplwi r31, 12
+; CHECK-NEXT: bne cr0, L..BB0_13
+; CHECK-NEXT: L..BB0_9: # %sw.bb2
+; CHECK-NEXT: mr r3, r31
+; CHECK-NEXT: bl .foo3[PR]
+; CHECK-NEXT: nop
+; CHECK-NEXT: mr r3, r31
+; CHECK-NEXT: b L..BB0_13
+; CHECK-NEXT: L..BB0_10: # %sw.bb1
+; CHECK-NEXT: mr r3, r31
+; CHECK-NEXT: bl .foo2[PR]
+; CHECK-NEXT: nop
+; CHECK-NEXT: mr r3, r31
+; CHECK-NEXT: b L..BB0_13
+; CHECK-NEXT: L..BB0_11: # %sw.bb
+; CHECK-NEXT: mr r3, r31
+; CHECK-NEXT: bl .foo1[PR]
+; CHECK-NEXT: nop
+; CHECK-NEXT: mr r3, r31
+; CHECK-NEXT: b L..BB0_13
+; CHECK-NEXT: L..BB0_12: # %sw.bb3
+; CHECK-NEXT: li r3, 4
+; CHECK-NEXT: bl .foo4[PR]
+; CHECK-NEXT: nop
+; CHECK-NEXT: li r3, 4
+; CHECK-NEXT: L..BB0_13: # %return
+; CHECK-NEXT: lwz r31, 60(r1) # 4-byte Folded Reload
+; CHECK-NEXT: addi r1, r1, 64
+; CHECK-NEXT: lwz r0, 8(r1)
+; CHECK-NEXT: mtlr r0
+; CHECK-NEXT: blr
+entry:
+ switch i32 %x, label %return [
+ i32 7, label %sw.bb
+ i32 10, label %sw.bb
+ i32 8, label %sw.bb1
+ i32 11, label %sw.bb1
+ i32 9, label %sw.bb2
+ i32 12, label %sw.bb2
+ i32 4, label %sw.bb3
+ ]
+
+sw.bb: ; preds = %entry, %entry
+ tail call void @foo1(i32 noundef signext %x)
+ br label %return
+
+sw.bb1: ; preds = %entry, %entry
+ tail call void @foo2(i32 noundef signext %x)
+ br label %return
+
+sw.bb2: ; preds = %entry, %entry
+ tail call void @foo3(i32 noundef signext %x)
+ br label %return
+
+sw.bb3: ; preds = %entry
+ tail call void @foo4(i32 noundef signext 4)
+ br label %return
+
+return: ; preds = %sw.bb, %sw.bb1, %sw.bb2, %sw.bb3, %entry
+ %retval.0 = phi i32 [ 0, %entry ], [ 4, %sw.bb3 ], [ %x, %sw.bb2 ], [ %x, %sw.bb1 ], [ %x, %sw.bb ]
+ ret i32 %retval.0
+}
+
+define i32 @goo(i32 noundef signext %x) {
+; CHECK-LABEL: goo:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: mflr r0
+; CHECK-NEXT: stwu r1, -64(r1)
+; CHECK-NEXT: stw r0, 72(r1)
+; CHECK-NEXT: cmplwi r3, 12
+; CHECK-NEXT: stw r31, 60(r1) # 4-byte Folded Spill
+; CHECK-NEXT: mr r31, r3
+; CHECK-NEXT: bgt cr0, L..BB1_7
+; CHECK-NEXT: # %bb.1: # %entry
+; CHECK-NEXT: li r3, 1
+; CHECK-NEXT: slw r3, r3, r31
+; CHECK-NEXT: andi. r4, r3, 5632
+; CHECK-NEXT: bne cr0, L..BB1_4
+; CHECK-NEXT: # %bb.2: # %entry
+; CHECK-NEXT: andi. r3, r3, 2304
+; CHECK-NEXT: beq cr0, L..BB1_5
+; CHECK-NEXT: # %bb.3: # %sw.bb1
+; CHECK-NEXT: mr r3, r31
+; CHECK-NEXT: bl .foo2[PR]
+; CHECK-NEXT: nop
+; CHECK-NEXT: b L..BB1_9
+; CHECK-NEXT: L..BB1_4: # %sw.bb2
+; CHECK-NEXT: mr r3, r31
+; CHECK-NEXT: bl .foo3[PR]
+; CHECK-NEXT: nop
+; CHECK-NEXT: b L..BB1_9
+; CHECK-NEXT: L..BB1_5: # %entry
+; CHECK-NEXT: cmplwi r31, 7
+; CHECK-NEXT: bne cr0, L..BB1_7
+; CHECK-NEXT: # %bb.6: # %sw.bb
+; CHECK-NEXT: li r3, 7
+; CHECK-NEXT: li r31, 7
+; CHECK-NEXT: bl .foo1[PR]
+; CHECK-NEXT: nop
+; CHECK-NEXT: b L..BB1_9
+; CHECK-NEXT: L..BB1_7: # %entry
+; CHECK-NEXT: cmplwi r31, 4
+; CHECK-NEXT: li r31, 0
+; CHECK-NEXT: bne cr0, L..BB1_9
+; CHECK-NEXT: # %bb.8: # %sw.bb3
+; CHECK-NEXT: li r3, 4
+; CHECK-NEXT: li r31, 4
+; CHECK-NEXT: bl .foo4[PR]
+; CHECK-NEXT: nop
+; CHECK-NEXT: L..BB1_9: # %return
+; CHECK-NEXT: mr r3, r31
+; CHECK-NEXT: lwz r31, 60(r1) # 4-byte Folded Reload
+; CHECK-NEXT: addi r1, r1, 64
+; CHECK-NEXT: lwz r0, 8(r1)
+; CHECK-NEXT: mtlr r0
+; CHECK-NEXT: blr
+entry:
+ switch i32 %x, label %return [
+ i32 7, label %sw.bb
+ i32 8, label %sw.bb1
+ i32 11, label %sw.bb1
+ i32 9, label %sw.bb2
+ i32 10, label %sw.bb2
+ i32 12, label %sw.bb2
+ i32 4, label %sw.bb3
+ ]
+
+sw.bb: ; preds = %entry
+ tail call void @foo1(i32 noundef signext 7)
+ br label %return
+
+sw.bb1: ; preds = %entry, %entry
+ tail call void @foo2(i32 noundef signext %x)
+ br label %return
+
+sw.bb2: ; preds = %entry, %entry, %entry
+ tail call void @foo3(i32 noundef signext %x)
+ br label %return
+
+sw.bb3: ; preds = %entry
+ tail call void @foo4(i32 noundef signext 4)
+ br label %return
+
+return: ; preds = %sw.bb, %sw.bb1, %sw.bb2, %sw.bb3, %entry
+ %retval.0 = phi i32 [ 0, %entry ], [ 4, %sw.bb3 ], [ %x, %sw.bb2 ], [ %x, %sw.bb1 ], [ 7, %sw.bb ]
+ ret i32 %retval.0
+}
+
+declare void @foo1(i32 noundef signext)
+
+declare void @foo2(i32 noundef signext)
+
+declare void @foo3(i32 noundef signext)
+
+declare void @foo4(i32 noundef signext)
diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw-minmax.ll b/llvm/test/CodeGen/RISCV/atomic-rmw-minmax.ll
new file mode 100644
index 0000000..b43555c6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw-minmax.ll
@@ -0,0 +1,642 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+b,+zalrsc -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV32IB-COMMON,RV32IB-ZALRSC %s
+; RUN: llc -mtriple=riscv32 -mattr=+b,+zalrsc,+permissive-zalrsc -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV32IB-COMMON,RV32IB-ZALRSC-PERM %s
+; RUN: llc -mtriple=riscv32 -mattr=+b,+zalrsc,+permissive-zalrsc,+a -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV32IB-COMMON,RV32IAB %s
+;
+; RUN: llc -mtriple=riscv64 -mattr=+b,+zalrsc -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV64IB-ZALRSC %s
+; RUN: llc -mtriple=riscv64 -mattr=+b,+zalrsc,+permissive-zalrsc -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV64IB-ZALRSC-PERM %s
+; RUN: llc -mtriple=riscv64 -mattr=+b,+zalrsc,+permissive-zalrsc,+a -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV64IAB %s
+
+define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; RV32IB-ZALRSC-LABEL: atomicrmw_max_i32_seq_cst:
+; RV32IB-ZALRSC: # %bb.0:
+; RV32IB-ZALRSC-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV32IB-ZALRSC-NEXT: mv a3, a2
+; RV32IB-ZALRSC-NEXT: bge a3, a1, .LBB0_3
+; RV32IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB0_1 Depth=1
+; RV32IB-ZALRSC-NEXT: mv a3, a1
+; RV32IB-ZALRSC-NEXT: .LBB0_3: # in Loop: Header=BB0_1 Depth=1
+; RV32IB-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32IB-ZALRSC-NEXT: bnez a3, .LBB0_1
+; RV32IB-ZALRSC-NEXT: # %bb.4:
+; RV32IB-ZALRSC-NEXT: mv a0, a2
+; RV32IB-ZALRSC-NEXT: ret
+;
+; RV32IB-ZALRSC-PERM-LABEL: atomicrmw_max_i32_seq_cst:
+; RV32IB-ZALRSC-PERM: # %bb.0:
+; RV32IB-ZALRSC-PERM-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-PERM-NEXT: lr.w.aqrl a2, (a0)
+; RV32IB-ZALRSC-PERM-NEXT: max a3, a2, a1
+; RV32IB-ZALRSC-PERM-NEXT: sc.w.rl a3, a3, (a0)
+; RV32IB-ZALRSC-PERM-NEXT: bnez a3, .LBB0_1
+; RV32IB-ZALRSC-PERM-NEXT: # %bb.2:
+; RV32IB-ZALRSC-PERM-NEXT: mv a0, a2
+; RV32IB-ZALRSC-PERM-NEXT: ret
+;
+; RV32IAB-LABEL: atomicrmw_max_i32_seq_cst:
+; RV32IAB: # %bb.0:
+; RV32IAB-NEXT: amomax.w.aqrl a0, a1, (a0)
+; RV32IAB-NEXT: ret
+;
+; RV64IB-ZALRSC-LABEL: atomicrmw_max_i32_seq_cst:
+; RV64IB-ZALRSC: # %bb.0:
+; RV64IB-ZALRSC-NEXT: sext.w a2, a1
+; RV64IB-ZALRSC-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-NEXT: lr.w.aqrl a1, (a0)
+; RV64IB-ZALRSC-NEXT: mv a3, a1
+; RV64IB-ZALRSC-NEXT: bge a3, a2, .LBB0_3
+; RV64IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB0_1 Depth=1
+; RV64IB-ZALRSC-NEXT: mv a3, a2
+; RV64IB-ZALRSC-NEXT: .LBB0_3: # in Loop: Header=BB0_1 Depth=1
+; RV64IB-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64IB-ZALRSC-NEXT: bnez a3, .LBB0_1
+; RV64IB-ZALRSC-NEXT: # %bb.4:
+; RV64IB-ZALRSC-NEXT: mv a0, a1
+; RV64IB-ZALRSC-NEXT: ret
+;
+; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_max_i32_seq_cst:
+; RV64IB-ZALRSC-PERM: # %bb.0:
+; RV64IB-ZALRSC-PERM-NEXT: sext.w a2, a1
+; RV64IB-ZALRSC-PERM-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-PERM-NEXT: lr.w.aqrl a1, (a0)
+; RV64IB-ZALRSC-PERM-NEXT: max a3, a1, a2
+; RV64IB-ZALRSC-PERM-NEXT: sc.w.rl a3, a3, (a0)
+; RV64IB-ZALRSC-PERM-NEXT: bnez a3, .LBB0_1
+; RV64IB-ZALRSC-PERM-NEXT: # %bb.2:
+; RV64IB-ZALRSC-PERM-NEXT: mv a0, a1
+; RV64IB-ZALRSC-PERM-NEXT: ret
+;
+; RV64IAB-LABEL: atomicrmw_max_i32_seq_cst:
+; RV64IAB: # %bb.0:
+; RV64IAB-NEXT: amomax.w.aqrl a0, a1, (a0)
+; RV64IAB-NEXT: ret
+ %1 = atomicrmw max ptr %a, i32 %b seq_cst
+ ret i32 %1
+}
+
+define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; RV32IB-ZALRSC-LABEL: atomicrmw_min_i32_seq_cst:
+; RV32IB-ZALRSC: # %bb.0:
+; RV32IB-ZALRSC-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV32IB-ZALRSC-NEXT: mv a3, a2
+; RV32IB-ZALRSC-NEXT: bge a1, a3, .LBB1_3
+; RV32IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB1_1 Depth=1
+; RV32IB-ZALRSC-NEXT: mv a3, a1
+; RV32IB-ZALRSC-NEXT: .LBB1_3: # in Loop: Header=BB1_1 Depth=1
+; RV32IB-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32IB-ZALRSC-NEXT: bnez a3, .LBB1_1
+; RV32IB-ZALRSC-NEXT: # %bb.4:
+; RV32IB-ZALRSC-NEXT: mv a0, a2
+; RV32IB-ZALRSC-NEXT: ret
+;
+; RV32IB-ZALRSC-PERM-LABEL: atomicrmw_min_i32_seq_cst:
+; RV32IB-ZALRSC-PERM: # %bb.0:
+; RV32IB-ZALRSC-PERM-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-PERM-NEXT: lr.w.aqrl a2, (a0)
+; RV32IB-ZALRSC-PERM-NEXT: min a3, a2, a1
+; RV32IB-ZALRSC-PERM-NEXT: sc.w.rl a3, a3, (a0)
+; RV32IB-ZALRSC-PERM-NEXT: bnez a3, .LBB1_1
+; RV32IB-ZALRSC-PERM-NEXT: # %bb.2:
+; RV32IB-ZALRSC-PERM-NEXT: mv a0, a2
+; RV32IB-ZALRSC-PERM-NEXT: ret
+;
+; RV32IAB-LABEL: atomicrmw_min_i32_seq_cst:
+; RV32IAB: # %bb.0:
+; RV32IAB-NEXT: amomin.w.aqrl a0, a1, (a0)
+; RV32IAB-NEXT: ret
+;
+; RV64IB-ZALRSC-LABEL: atomicrmw_min_i32_seq_cst:
+; RV64IB-ZALRSC: # %bb.0:
+; RV64IB-ZALRSC-NEXT: sext.w a2, a1
+; RV64IB-ZALRSC-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-NEXT: lr.w.aqrl a1, (a0)
+; RV64IB-ZALRSC-NEXT: mv a3, a1
+; RV64IB-ZALRSC-NEXT: bge a2, a3, .LBB1_3
+; RV64IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB1_1 Depth=1
+; RV64IB-ZALRSC-NEXT: mv a3, a2
+; RV64IB-ZALRSC-NEXT: .LBB1_3: # in Loop: Header=BB1_1 Depth=1
+; RV64IB-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64IB-ZALRSC-NEXT: bnez a3, .LBB1_1
+; RV64IB-ZALRSC-NEXT: # %bb.4:
+; RV64IB-ZALRSC-NEXT: mv a0, a1
+; RV64IB-ZALRSC-NEXT: ret
+;
+; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_min_i32_seq_cst:
+; RV64IB-ZALRSC-PERM: # %bb.0:
+; RV64IB-ZALRSC-PERM-NEXT: sext.w a2, a1
+; RV64IB-ZALRSC-PERM-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-PERM-NEXT: lr.w.aqrl a1, (a0)
+; RV64IB-ZALRSC-PERM-NEXT: min a3, a1, a2
+; RV64IB-ZALRSC-PERM-NEXT: sc.w.rl a3, a3, (a0)
+; RV64IB-ZALRSC-PERM-NEXT: bnez a3, .LBB1_1
+; RV64IB-ZALRSC-PERM-NEXT: # %bb.2:
+; RV64IB-ZALRSC-PERM-NEXT: mv a0, a1
+; RV64IB-ZALRSC-PERM-NEXT: ret
+;
+; RV64IAB-LABEL: atomicrmw_min_i32_seq_cst:
+; RV64IAB: # %bb.0:
+; RV64IAB-NEXT: amomin.w.aqrl a0, a1, (a0)
+; RV64IAB-NEXT: ret
+ %1 = atomicrmw min ptr %a, i32 %b seq_cst
+ ret i32 %1
+}
+
+define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; RV32IB-ZALRSC-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV32IB-ZALRSC: # %bb.0:
+; RV32IB-ZALRSC-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV32IB-ZALRSC-NEXT: mv a3, a2
+; RV32IB-ZALRSC-NEXT: bgeu a3, a1, .LBB2_3
+; RV32IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB2_1 Depth=1
+; RV32IB-ZALRSC-NEXT: mv a3, a1
+; RV32IB-ZALRSC-NEXT: .LBB2_3: # in Loop: Header=BB2_1 Depth=1
+; RV32IB-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32IB-ZALRSC-NEXT: bnez a3, .LBB2_1
+; RV32IB-ZALRSC-NEXT: # %bb.4:
+; RV32IB-ZALRSC-NEXT: mv a0, a2
+; RV32IB-ZALRSC-NEXT: ret
+;
+; RV32IB-ZALRSC-PERM-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV32IB-ZALRSC-PERM: # %bb.0:
+; RV32IB-ZALRSC-PERM-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-PERM-NEXT: lr.w.aqrl a2, (a0)
+; RV32IB-ZALRSC-PERM-NEXT: maxu a3, a2, a1
+; RV32IB-ZALRSC-PERM-NEXT: sc.w.rl a3, a3, (a0)
+; RV32IB-ZALRSC-PERM-NEXT: bnez a3, .LBB2_1
+; RV32IB-ZALRSC-PERM-NEXT: # %bb.2:
+; RV32IB-ZALRSC-PERM-NEXT: mv a0, a2
+; RV32IB-ZALRSC-PERM-NEXT: ret
+;
+; RV32IAB-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV32IAB: # %bb.0:
+; RV32IAB-NEXT: amomaxu.w.aqrl a0, a1, (a0)
+; RV32IAB-NEXT: ret
+;
+; RV64IB-ZALRSC-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV64IB-ZALRSC: # %bb.0:
+; RV64IB-ZALRSC-NEXT: sext.w a2, a1
+; RV64IB-ZALRSC-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-NEXT: lr.w.aqrl a1, (a0)
+; RV64IB-ZALRSC-NEXT: mv a3, a1
+; RV64IB-ZALRSC-NEXT: bgeu a3, a2, .LBB2_3
+; RV64IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB2_1 Depth=1
+; RV64IB-ZALRSC-NEXT: mv a3, a2
+; RV64IB-ZALRSC-NEXT: .LBB2_3: # in Loop: Header=BB2_1 Depth=1
+; RV64IB-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64IB-ZALRSC-NEXT: bnez a3, .LBB2_1
+; RV64IB-ZALRSC-NEXT: # %bb.4:
+; RV64IB-ZALRSC-NEXT: mv a0, a1
+; RV64IB-ZALRSC-NEXT: ret
+;
+; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV64IB-ZALRSC-PERM: # %bb.0:
+; RV64IB-ZALRSC-PERM-NEXT: sext.w a2, a1
+; RV64IB-ZALRSC-PERM-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-PERM-NEXT: lr.w.aqrl a1, (a0)
+; RV64IB-ZALRSC-PERM-NEXT: maxu a3, a1, a2
+; RV64IB-ZALRSC-PERM-NEXT: sc.w.rl a3, a3, (a0)
+; RV64IB-ZALRSC-PERM-NEXT: bnez a3, .LBB2_1
+; RV64IB-ZALRSC-PERM-NEXT: # %bb.2:
+; RV64IB-ZALRSC-PERM-NEXT: mv a0, a1
+; RV64IB-ZALRSC-PERM-NEXT: ret
+;
+; RV64IAB-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV64IAB: # %bb.0:
+; RV64IAB-NEXT: amomaxu.w.aqrl a0, a1, (a0)
+; RV64IAB-NEXT: ret
+ %1 = atomicrmw umax ptr %a, i32 %b seq_cst
+ ret i32 %1
+}
+
+define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; RV32IB-ZALRSC-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV32IB-ZALRSC: # %bb.0:
+; RV32IB-ZALRSC-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV32IB-ZALRSC-NEXT: mv a3, a2
+; RV32IB-ZALRSC-NEXT: bgeu a1, a3, .LBB3_3
+; RV32IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB3_1 Depth=1
+; RV32IB-ZALRSC-NEXT: mv a3, a1
+; RV32IB-ZALRSC-NEXT: .LBB3_3: # in Loop: Header=BB3_1 Depth=1
+; RV32IB-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32IB-ZALRSC-NEXT: bnez a3, .LBB3_1
+; RV32IB-ZALRSC-NEXT: # %bb.4:
+; RV32IB-ZALRSC-NEXT: mv a0, a2
+; RV32IB-ZALRSC-NEXT: ret
+;
+; RV32IB-ZALRSC-PERM-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV32IB-ZALRSC-PERM: # %bb.0:
+; RV32IB-ZALRSC-PERM-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-PERM-NEXT: lr.w.aqrl a2, (a0)
+; RV32IB-ZALRSC-PERM-NEXT: minu a3, a2, a1
+; RV32IB-ZALRSC-PERM-NEXT: sc.w.rl a3, a3, (a0)
+; RV32IB-ZALRSC-PERM-NEXT: bnez a3, .LBB3_1
+; RV32IB-ZALRSC-PERM-NEXT: # %bb.2:
+; RV32IB-ZALRSC-PERM-NEXT: mv a0, a2
+; RV32IB-ZALRSC-PERM-NEXT: ret
+;
+; RV32IAB-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV32IAB: # %bb.0:
+; RV32IAB-NEXT: amominu.w.aqrl a0, a1, (a0)
+; RV32IAB-NEXT: ret
+;
+; RV64IB-ZALRSC-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV64IB-ZALRSC: # %bb.0:
+; RV64IB-ZALRSC-NEXT: sext.w a2, a1
+; RV64IB-ZALRSC-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-NEXT: lr.w.aqrl a1, (a0)
+; RV64IB-ZALRSC-NEXT: mv a3, a1
+; RV64IB-ZALRSC-NEXT: bgeu a2, a3, .LBB3_3
+; RV64IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB3_1 Depth=1
+; RV64IB-ZALRSC-NEXT: mv a3, a2
+; RV64IB-ZALRSC-NEXT: .LBB3_3: # in Loop: Header=BB3_1 Depth=1
+; RV64IB-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64IB-ZALRSC-NEXT: bnez a3, .LBB3_1
+; RV64IB-ZALRSC-NEXT: # %bb.4:
+; RV64IB-ZALRSC-NEXT: mv a0, a1
+; RV64IB-ZALRSC-NEXT: ret
+;
+; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV64IB-ZALRSC-PERM: # %bb.0:
+; RV64IB-ZALRSC-PERM-NEXT: sext.w a2, a1
+; RV64IB-ZALRSC-PERM-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-PERM-NEXT: lr.w.aqrl a1, (a0)
+; RV64IB-ZALRSC-PERM-NEXT: minu a3, a1, a2
+; RV64IB-ZALRSC-PERM-NEXT: sc.w.rl a3, a3, (a0)
+; RV64IB-ZALRSC-PERM-NEXT: bnez a3, .LBB3_1
+; RV64IB-ZALRSC-PERM-NEXT: # %bb.2:
+; RV64IB-ZALRSC-PERM-NEXT: mv a0, a1
+; RV64IB-ZALRSC-PERM-NEXT: ret
+;
+; RV64IAB-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV64IAB: # %bb.0:
+; RV64IAB-NEXT: amominu.w.aqrl a0, a1, (a0)
+; RV64IAB-NEXT: ret
+ %1 = atomicrmw umin ptr %a, i32 %b seq_cst
+ ret i32 %1
+}
+
+define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; RV32IB-COMMON-LABEL: atomicrmw_max_i64_seq_cst:
+; RV32IB-COMMON: # %bb.0:
+; RV32IB-COMMON-NEXT: addi sp, sp, -32
+; RV32IB-COMMON-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT: mv s0, a2
+; RV32IB-COMMON-NEXT: mv s1, a0
+; RV32IB-COMMON-NEXT: lw a4, 0(a0)
+; RV32IB-COMMON-NEXT: lw a5, 4(a0)
+; RV32IB-COMMON-NEXT: mv s2, a1
+; RV32IB-COMMON-NEXT: j .LBB4_2
+; RV32IB-COMMON-NEXT: .LBB4_1: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # in Loop: Header=BB4_2 Depth=1
+; RV32IB-COMMON-NEXT: sw a4, 8(sp)
+; RV32IB-COMMON-NEXT: sw a5, 12(sp)
+; RV32IB-COMMON-NEXT: addi a1, sp, 8
+; RV32IB-COMMON-NEXT: li a4, 5
+; RV32IB-COMMON-NEXT: li a5, 5
+; RV32IB-COMMON-NEXT: mv a0, s1
+; RV32IB-COMMON-NEXT: call __atomic_compare_exchange_8
+; RV32IB-COMMON-NEXT: lw a4, 8(sp)
+; RV32IB-COMMON-NEXT: lw a5, 12(sp)
+; RV32IB-COMMON-NEXT: bnez a0, .LBB4_7
+; RV32IB-COMMON-NEXT: .LBB4_2: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32IB-COMMON-NEXT: beq a5, s0, .LBB4_4
+; RV32IB-COMMON-NEXT: # %bb.3: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # in Loop: Header=BB4_2 Depth=1
+; RV32IB-COMMON-NEXT: slt a0, s0, a5
+; RV32IB-COMMON-NEXT: j .LBB4_5
+; RV32IB-COMMON-NEXT: .LBB4_4: # in Loop: Header=BB4_2 Depth=1
+; RV32IB-COMMON-NEXT: sltu a0, s2, a4
+; RV32IB-COMMON-NEXT: .LBB4_5: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # in Loop: Header=BB4_2 Depth=1
+; RV32IB-COMMON-NEXT: mv a2, a4
+; RV32IB-COMMON-NEXT: mv a3, a5
+; RV32IB-COMMON-NEXT: bnez a0, .LBB4_1
+; RV32IB-COMMON-NEXT: # %bb.6: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # in Loop: Header=BB4_2 Depth=1
+; RV32IB-COMMON-NEXT: mv a2, s2
+; RV32IB-COMMON-NEXT: mv a3, s0
+; RV32IB-COMMON-NEXT: j .LBB4_1
+; RV32IB-COMMON-NEXT: .LBB4_7: # %atomicrmw.end
+; RV32IB-COMMON-NEXT: mv a0, a4
+; RV32IB-COMMON-NEXT: mv a1, a5
+; RV32IB-COMMON-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT: addi sp, sp, 32
+; RV32IB-COMMON-NEXT: ret
+;
+; RV64IB-ZALRSC-LABEL: atomicrmw_max_i64_seq_cst:
+; RV64IB-ZALRSC: # %bb.0:
+; RV64IB-ZALRSC-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-NEXT: lr.d.aqrl a2, (a0)
+; RV64IB-ZALRSC-NEXT: mv a3, a2
+; RV64IB-ZALRSC-NEXT: bge a3, a1, .LBB4_3
+; RV64IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB4_1 Depth=1
+; RV64IB-ZALRSC-NEXT: mv a3, a1
+; RV64IB-ZALRSC-NEXT: .LBB4_3: # in Loop: Header=BB4_1 Depth=1
+; RV64IB-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64IB-ZALRSC-NEXT: bnez a3, .LBB4_1
+; RV64IB-ZALRSC-NEXT: # %bb.4:
+; RV64IB-ZALRSC-NEXT: mv a0, a2
+; RV64IB-ZALRSC-NEXT: ret
+;
+; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_max_i64_seq_cst:
+; RV64IB-ZALRSC-PERM: # %bb.0:
+; RV64IB-ZALRSC-PERM-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-PERM-NEXT: lr.d.aqrl a2, (a0)
+; RV64IB-ZALRSC-PERM-NEXT: max a3, a2, a1
+; RV64IB-ZALRSC-PERM-NEXT: sc.d.rl a3, a3, (a0)
+; RV64IB-ZALRSC-PERM-NEXT: bnez a3, .LBB4_1
+; RV64IB-ZALRSC-PERM-NEXT: # %bb.2:
+; RV64IB-ZALRSC-PERM-NEXT: mv a0, a2
+; RV64IB-ZALRSC-PERM-NEXT: ret
+;
+; RV64IAB-LABEL: atomicrmw_max_i64_seq_cst:
+; RV64IAB: # %bb.0:
+; RV64IAB-NEXT: amomax.d.aqrl a0, a1, (a0)
+; RV64IAB-NEXT: ret
+ %1 = atomicrmw max ptr %a, i64 %b seq_cst
+ ret i64 %1
+}
+
+define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; RV32IB-COMMON-LABEL: atomicrmw_min_i64_seq_cst:
+; RV32IB-COMMON: # %bb.0:
+; RV32IB-COMMON-NEXT: addi sp, sp, -32
+; RV32IB-COMMON-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT: mv s0, a2
+; RV32IB-COMMON-NEXT: mv s1, a0
+; RV32IB-COMMON-NEXT: lw a4, 0(a0)
+; RV32IB-COMMON-NEXT: lw a5, 4(a0)
+; RV32IB-COMMON-NEXT: mv s2, a1
+; RV32IB-COMMON-NEXT: j .LBB5_2
+; RV32IB-COMMON-NEXT: .LBB5_1: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # in Loop: Header=BB5_2 Depth=1
+; RV32IB-COMMON-NEXT: sw a4, 8(sp)
+; RV32IB-COMMON-NEXT: sw a5, 12(sp)
+; RV32IB-COMMON-NEXT: addi a1, sp, 8
+; RV32IB-COMMON-NEXT: li a4, 5
+; RV32IB-COMMON-NEXT: li a5, 5
+; RV32IB-COMMON-NEXT: mv a0, s1
+; RV32IB-COMMON-NEXT: call __atomic_compare_exchange_8
+; RV32IB-COMMON-NEXT: lw a4, 8(sp)
+; RV32IB-COMMON-NEXT: lw a5, 12(sp)
+; RV32IB-COMMON-NEXT: bnez a0, .LBB5_7
+; RV32IB-COMMON-NEXT: .LBB5_2: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32IB-COMMON-NEXT: beq a5, s0, .LBB5_4
+; RV32IB-COMMON-NEXT: # %bb.3: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # in Loop: Header=BB5_2 Depth=1
+; RV32IB-COMMON-NEXT: slt a0, a5, s0
+; RV32IB-COMMON-NEXT: j .LBB5_5
+; RV32IB-COMMON-NEXT: .LBB5_4: # in Loop: Header=BB5_2 Depth=1
+; RV32IB-COMMON-NEXT: sltu a0, a4, s2
+; RV32IB-COMMON-NEXT: .LBB5_5: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # in Loop: Header=BB5_2 Depth=1
+; RV32IB-COMMON-NEXT: mv a2, a4
+; RV32IB-COMMON-NEXT: mv a3, a5
+; RV32IB-COMMON-NEXT: bnez a0, .LBB5_1
+; RV32IB-COMMON-NEXT: # %bb.6: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # in Loop: Header=BB5_2 Depth=1
+; RV32IB-COMMON-NEXT: mv a2, s2
+; RV32IB-COMMON-NEXT: mv a3, s0
+; RV32IB-COMMON-NEXT: j .LBB5_1
+; RV32IB-COMMON-NEXT: .LBB5_7: # %atomicrmw.end
+; RV32IB-COMMON-NEXT: mv a0, a4
+; RV32IB-COMMON-NEXT: mv a1, a5
+; RV32IB-COMMON-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT: addi sp, sp, 32
+; RV32IB-COMMON-NEXT: ret
+;
+; RV64IB-ZALRSC-LABEL: atomicrmw_min_i64_seq_cst:
+; RV64IB-ZALRSC: # %bb.0:
+; RV64IB-ZALRSC-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-NEXT: lr.d.aqrl a2, (a0)
+; RV64IB-ZALRSC-NEXT: mv a3, a2
+; RV64IB-ZALRSC-NEXT: bge a1, a3, .LBB5_3
+; RV64IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1
+; RV64IB-ZALRSC-NEXT: mv a3, a1
+; RV64IB-ZALRSC-NEXT: .LBB5_3: # in Loop: Header=BB5_1 Depth=1
+; RV64IB-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64IB-ZALRSC-NEXT: bnez a3, .LBB5_1
+; RV64IB-ZALRSC-NEXT: # %bb.4:
+; RV64IB-ZALRSC-NEXT: mv a0, a2
+; RV64IB-ZALRSC-NEXT: ret
+;
+; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_min_i64_seq_cst:
+; RV64IB-ZALRSC-PERM: # %bb.0:
+; RV64IB-ZALRSC-PERM-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-PERM-NEXT: lr.d.aqrl a2, (a0)
+; RV64IB-ZALRSC-PERM-NEXT: min a3, a2, a1
+; RV64IB-ZALRSC-PERM-NEXT: sc.d.rl a3, a3, (a0)
+; RV64IB-ZALRSC-PERM-NEXT: bnez a3, .LBB5_1
+; RV64IB-ZALRSC-PERM-NEXT: # %bb.2:
+; RV64IB-ZALRSC-PERM-NEXT: mv a0, a2
+; RV64IB-ZALRSC-PERM-NEXT: ret
+;
+; RV64IAB-LABEL: atomicrmw_min_i64_seq_cst:
+; RV64IAB: # %bb.0:
+; RV64IAB-NEXT: amomin.d.aqrl a0, a1, (a0)
+; RV64IAB-NEXT: ret
+ %1 = atomicrmw min ptr %a, i64 %b seq_cst
+ ret i64 %1
+}
+
+define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; RV32IB-COMMON-LABEL: atomicrmw_umax_i64_seq_cst:
+; RV32IB-COMMON: # %bb.0:
+; RV32IB-COMMON-NEXT: addi sp, sp, -32
+; RV32IB-COMMON-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT: mv s0, a2
+; RV32IB-COMMON-NEXT: mv s1, a0
+; RV32IB-COMMON-NEXT: lw a4, 0(a0)
+; RV32IB-COMMON-NEXT: lw a5, 4(a0)
+; RV32IB-COMMON-NEXT: mv s2, a1
+; RV32IB-COMMON-NEXT: j .LBB6_2
+; RV32IB-COMMON-NEXT: .LBB6_1: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # in Loop: Header=BB6_2 Depth=1
+; RV32IB-COMMON-NEXT: sw a4, 8(sp)
+; RV32IB-COMMON-NEXT: sw a5, 12(sp)
+; RV32IB-COMMON-NEXT: addi a1, sp, 8
+; RV32IB-COMMON-NEXT: li a4, 5
+; RV32IB-COMMON-NEXT: li a5, 5
+; RV32IB-COMMON-NEXT: mv a0, s1
+; RV32IB-COMMON-NEXT: call __atomic_compare_exchange_8
+; RV32IB-COMMON-NEXT: lw a4, 8(sp)
+; RV32IB-COMMON-NEXT: lw a5, 12(sp)
+; RV32IB-COMMON-NEXT: bnez a0, .LBB6_7
+; RV32IB-COMMON-NEXT: .LBB6_2: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32IB-COMMON-NEXT: beq a5, s0, .LBB6_4
+; RV32IB-COMMON-NEXT: # %bb.3: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # in Loop: Header=BB6_2 Depth=1
+; RV32IB-COMMON-NEXT: sltu a0, s0, a5
+; RV32IB-COMMON-NEXT: j .LBB6_5
+; RV32IB-COMMON-NEXT: .LBB6_4: # in Loop: Header=BB6_2 Depth=1
+; RV32IB-COMMON-NEXT: sltu a0, s2, a4
+; RV32IB-COMMON-NEXT: .LBB6_5: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # in Loop: Header=BB6_2 Depth=1
+; RV32IB-COMMON-NEXT: mv a2, a4
+; RV32IB-COMMON-NEXT: mv a3, a5
+; RV32IB-COMMON-NEXT: bnez a0, .LBB6_1
+; RV32IB-COMMON-NEXT: # %bb.6: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # in Loop: Header=BB6_2 Depth=1
+; RV32IB-COMMON-NEXT: mv a2, s2
+; RV32IB-COMMON-NEXT: mv a3, s0
+; RV32IB-COMMON-NEXT: j .LBB6_1
+; RV32IB-COMMON-NEXT: .LBB6_7: # %atomicrmw.end
+; RV32IB-COMMON-NEXT: mv a0, a4
+; RV32IB-COMMON-NEXT: mv a1, a5
+; RV32IB-COMMON-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT: addi sp, sp, 32
+; RV32IB-COMMON-NEXT: ret
+;
+; RV64IB-ZALRSC-LABEL: atomicrmw_umax_i64_seq_cst:
+; RV64IB-ZALRSC: # %bb.0:
+; RV64IB-ZALRSC-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-NEXT: lr.d.aqrl a2, (a0)
+; RV64IB-ZALRSC-NEXT: mv a3, a2
+; RV64IB-ZALRSC-NEXT: bgeu a3, a1, .LBB6_3
+; RV64IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1
+; RV64IB-ZALRSC-NEXT: mv a3, a1
+; RV64IB-ZALRSC-NEXT: .LBB6_3: # in Loop: Header=BB6_1 Depth=1
+; RV64IB-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64IB-ZALRSC-NEXT: bnez a3, .LBB6_1
+; RV64IB-ZALRSC-NEXT: # %bb.4:
+; RV64IB-ZALRSC-NEXT: mv a0, a2
+; RV64IB-ZALRSC-NEXT: ret
+;
+; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_umax_i64_seq_cst:
+; RV64IB-ZALRSC-PERM: # %bb.0:
+; RV64IB-ZALRSC-PERM-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-PERM-NEXT: lr.d.aqrl a2, (a0)
+; RV64IB-ZALRSC-PERM-NEXT: maxu a3, a2, a1
+; RV64IB-ZALRSC-PERM-NEXT: sc.d.rl a3, a3, (a0)
+; RV64IB-ZALRSC-PERM-NEXT: bnez a3, .LBB6_1
+; RV64IB-ZALRSC-PERM-NEXT: # %bb.2:
+; RV64IB-ZALRSC-PERM-NEXT: mv a0, a2
+; RV64IB-ZALRSC-PERM-NEXT: ret
+;
+; RV64IAB-LABEL: atomicrmw_umax_i64_seq_cst:
+; RV64IAB: # %bb.0:
+; RV64IAB-NEXT: amomaxu.d.aqrl a0, a1, (a0)
+; RV64IAB-NEXT: ret
+ %1 = atomicrmw umax ptr %a, i64 %b seq_cst
+ ret i64 %1
+}
+
+define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; RV32IB-COMMON-LABEL: atomicrmw_umin_i64_seq_cst:
+; RV32IB-COMMON: # %bb.0:
+; RV32IB-COMMON-NEXT: addi sp, sp, -32
+; RV32IB-COMMON-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT: mv s0, a2
+; RV32IB-COMMON-NEXT: mv s1, a0
+; RV32IB-COMMON-NEXT: lw a4, 0(a0)
+; RV32IB-COMMON-NEXT: lw a5, 4(a0)
+; RV32IB-COMMON-NEXT: mv s2, a1
+; RV32IB-COMMON-NEXT: j .LBB7_2
+; RV32IB-COMMON-NEXT: .LBB7_1: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # in Loop: Header=BB7_2 Depth=1
+; RV32IB-COMMON-NEXT: sw a4, 8(sp)
+; RV32IB-COMMON-NEXT: sw a5, 12(sp)
+; RV32IB-COMMON-NEXT: addi a1, sp, 8
+; RV32IB-COMMON-NEXT: li a4, 5
+; RV32IB-COMMON-NEXT: li a5, 5
+; RV32IB-COMMON-NEXT: mv a0, s1
+; RV32IB-COMMON-NEXT: call __atomic_compare_exchange_8
+; RV32IB-COMMON-NEXT: lw a4, 8(sp)
+; RV32IB-COMMON-NEXT: lw a5, 12(sp)
+; RV32IB-COMMON-NEXT: bnez a0, .LBB7_7
+; RV32IB-COMMON-NEXT: .LBB7_2: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32IB-COMMON-NEXT: beq a5, s0, .LBB7_4
+; RV32IB-COMMON-NEXT: # %bb.3: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # in Loop: Header=BB7_2 Depth=1
+; RV32IB-COMMON-NEXT: sltu a0, a5, s0
+; RV32IB-COMMON-NEXT: j .LBB7_5
+; RV32IB-COMMON-NEXT: .LBB7_4: # in Loop: Header=BB7_2 Depth=1
+; RV32IB-COMMON-NEXT: sltu a0, a4, s2
+; RV32IB-COMMON-NEXT: .LBB7_5: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # in Loop: Header=BB7_2 Depth=1
+; RV32IB-COMMON-NEXT: mv a2, a4
+; RV32IB-COMMON-NEXT: mv a3, a5
+; RV32IB-COMMON-NEXT: bnez a0, .LBB7_1
+; RV32IB-COMMON-NEXT: # %bb.6: # %atomicrmw.start
+; RV32IB-COMMON-NEXT: # in Loop: Header=BB7_2 Depth=1
+; RV32IB-COMMON-NEXT: mv a2, s2
+; RV32IB-COMMON-NEXT: mv a3, s0
+; RV32IB-COMMON-NEXT: j .LBB7_1
+; RV32IB-COMMON-NEXT: .LBB7_7: # %atomicrmw.end
+; RV32IB-COMMON-NEXT: mv a0, a4
+; RV32IB-COMMON-NEXT: mv a1, a5
+; RV32IB-COMMON-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT: addi sp, sp, 32
+; RV32IB-COMMON-NEXT: ret
+;
+; RV64IB-ZALRSC-LABEL: atomicrmw_umin_i64_seq_cst:
+; RV64IB-ZALRSC: # %bb.0:
+; RV64IB-ZALRSC-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-NEXT: lr.d.aqrl a2, (a0)
+; RV64IB-ZALRSC-NEXT: mv a3, a2
+; RV64IB-ZALRSC-NEXT: bgeu a1, a3, .LBB7_3
+; RV64IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1
+; RV64IB-ZALRSC-NEXT: mv a3, a1
+; RV64IB-ZALRSC-NEXT: .LBB7_3: # in Loop: Header=BB7_1 Depth=1
+; RV64IB-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64IB-ZALRSC-NEXT: bnez a3, .LBB7_1
+; RV64IB-ZALRSC-NEXT: # %bb.4:
+; RV64IB-ZALRSC-NEXT: mv a0, a2
+; RV64IB-ZALRSC-NEXT: ret
+;
+; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_umin_i64_seq_cst:
+; RV64IB-ZALRSC-PERM: # %bb.0:
+; RV64IB-ZALRSC-PERM-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-PERM-NEXT: lr.d.aqrl a2, (a0)
+; RV64IB-ZALRSC-PERM-NEXT: minu a3, a2, a1
+; RV64IB-ZALRSC-PERM-NEXT: sc.d.rl a3, a3, (a0)
+; RV64IB-ZALRSC-PERM-NEXT: bnez a3, .LBB7_1
+; RV64IB-ZALRSC-PERM-NEXT: # %bb.2:
+; RV64IB-ZALRSC-PERM-NEXT: mv a0, a2
+; RV64IB-ZALRSC-PERM-NEXT: ret
+;
+; RV64IAB-LABEL: atomicrmw_umin_i64_seq_cst:
+; RV64IAB: # %bb.0:
+; RV64IAB-NEXT: amominu.d.aqrl a0, a1, (a0)
+; RV64IAB-NEXT: ret
+ %1 = atomicrmw umin ptr %a, i64 %b seq_cst
+ ret i64 %1
+}
diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll
index 5e5f2b7..37e11db 100644
--- a/llvm/test/CodeGen/RISCV/features-info.ll
+++ b/llvm/test/CodeGen/RISCV/features-info.ll
@@ -81,6 +81,7 @@
; CHECK-NEXT: optimized-nf7-segment-load-store - vlseg7eN.v and vsseg7eN.v are implemented as a wide memory op and shuffle.
; CHECK-NEXT: optimized-nf8-segment-load-store - vlseg8eN.v and vsseg8eN.v are implemented as a wide memory op and shuffle.
; CHECK-NEXT: optimized-zero-stride-load - Optimized (perform fewer memory operations)zero-stride vector load.
+; CHECK-NEXT: permissive-zalrsc - Implementation permits non-base instructions between LR/SC pairs.
; CHECK-NEXT: predictable-select-expensive - Prefer likely predicted branches over selects.
; CHECK-NEXT: prefer-vsetvli-over-read-vlenb - Prefer vsetvli over read vlenb CSR to calculate VLEN.
; CHECK-NEXT: prefer-w-inst - Prefer instructions with W suffix.
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveActiveMin.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveActiveMin.ll
new file mode 100644
index 0000000..d121c1a
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveActiveMin.ll
@@ -0,0 +1,57 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-vulkan-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-vulkan-unknown %s -o - -filetype=obj | spirv-val %}
+
+; Test lowering to spir-v backend for various types and scalar/vector
+
+; CHECK: OpCapability GroupNonUniformArithmetic
+
+; CHECK-DAG: %[[#f16:]] = OpTypeFloat 16
+; CHECK-DAG: %[[#f32:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#uint:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#v4_half:]] = OpTypeVector %[[#f16]] 4
+; CHECK-DAG: %[[#scope:]] = OpConstant %[[#uint]] 3
+
+; CHECK-LABEL: Begin function test_float
+; CHECK: %[[#fexpr:]] = OpFunctionParameter %[[#f32]]
+define float @test_float(float %fexpr) {
+entry:
+; CHECK: %[[#fret:]] = OpGroupNonUniformFMin %[[#f32]] %[[#scope]] Reduce %[[#fexpr]]
+ %0 = call float @llvm.spv.wave.reduce.min.f32(float %fexpr)
+ ret float %0
+}
+
+; CHECK-LABEL: Begin function test_int_signed
+; CHECK: %[[#iexpr:]] = OpFunctionParameter %[[#uint]]
+define i32 @test_int_signed(i32 %iexpr) {
+entry:
+; CHECK: %[[#iret:]] = OpGroupNonUniformSMin %[[#uint]] %[[#scope]] Reduce %[[#iexpr]]
+ %0 = call i32 @llvm.spv.wave.reduce.min.i32(i32 %iexpr)
+ ret i32 %0
+}
+
+; CHECK-LABEL: Begin function test_int_unsigned
+; CHECK: %[[#iexpr:]] = OpFunctionParameter %[[#uint]]
+define i32 @test_int_unsigned(i32 %iexpr) {
+entry:
+; CHECK: %[[#iret:]] = OpGroupNonUniformUMin %[[#uint]] %[[#scope]] Reduce %[[#iexpr]]
+ %0 = call i32 @llvm.spv.wave.reduce.umin.i32(i32 %iexpr)
+ ret i32 %0
+}
+
+; CHECK-LABEL: Begin function test_vhalf
+; CHECK: %[[#vbexpr:]] = OpFunctionParameter %[[#v4_half]]
+define <4 x half> @test_vhalf(<4 x half> %vbexpr) {
+entry:
+; CHECK: %[[#vhalfret:]] = OpGroupNonUniformFMin %[[#v4_half]] %[[#scope]] Reduce %[[#vbexpr]]
+ %0 = call <4 x half> @llvm.spv.wave.reduce.min.v4half(<4 x half> %vbexpr)
+ ret <4 x half> %0
+}
+
+declare float @llvm.spv.wave.reduce.min.f32(float)
+declare i32 @llvm.spv.wave.reduce.min.i32(i32)
+declare <4 x half> @llvm.spv.wave.reduce.min.v4half(<4 x half>)
+
+declare float @llvm.spv.wave.reduce.umin.f32(float)
+declare i32 @llvm.spv.wave.reduce.umin.i32(i32)
+declare <4 x half> @llvm.spv.wave.reduce.umin.v4half(<4 x half>)
+
diff --git a/llvm/test/CodeGen/X86/bfloat-calling-conv.ll b/llvm/test/CodeGen/X86/bfloat-calling-conv.ll
index ea4d32b..d087491 100644
--- a/llvm/test/CodeGen/X86/bfloat-calling-conv.ll
+++ b/llvm/test/CodeGen/X86/bfloat-calling-conv.ll
@@ -660,8 +660,7 @@ define <3 x bfloat> @call_ret_v3bf16(ptr %ptr) #0 {
; SSE2-LABEL: call_ret_v3bf16:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rax
-; SSE2-NEXT: movl 4(%rdi), %eax
-; SSE2-NEXT: pinsrw $0, %eax, %xmm1
+; SSE2-NEXT: pinsrw $0, 4(%rdi), %xmm1
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: callq returns_v3bf16@PLT
@@ -725,8 +724,7 @@ define <3 x bfloat> @call_ret_v3bf16(ptr %ptr) #0 {
; AVXNECONVERT-LABEL: call_ret_v3bf16:
; AVXNECONVERT: # %bb.0:
; AVXNECONVERT-NEXT: pushq %rax
-; AVXNECONVERT-NEXT: movl 4(%rdi), %eax
-; AVXNECONVERT-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
+; AVXNECONVERT-NEXT: vpinsrw $0, 4(%rdi), %xmm0, %xmm0
; AVXNECONVERT-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVXNECONVERT-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
; AVXNECONVERT-NEXT: callq returns_v3bf16@PLT
diff --git a/llvm/test/CodeGen/X86/trunc-srl-load.ll b/llvm/test/CodeGen/X86/trunc-srl-load.ll
index 4dae143..d9c21d3 100644
--- a/llvm/test/CodeGen/X86/trunc-srl-load.ll
+++ b/llvm/test/CodeGen/X86/trunc-srl-load.ll
@@ -1,9 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s --check-prefixes=X86
-; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=x86-64 | FileCheck %s --check-prefixes=X64,SSE
-; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=X64,SSE
-; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=X64,AVX,AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=X64,AVX,AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=x86-64 | FileCheck %s --check-prefixes=X64
+; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=X64
+; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=X64
+; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=X64
; Tests showing for the analysis of non-constant shift amounts to improve load address math
@@ -12,42 +12,20 @@
define i16 @extractSub64_16(ptr %word, i32 %idx) nounwind {
; X86-LABEL: extractSub64_16:
; X86: # %bb.0:
-; X86-NEXT: pushl %esi
-; X86-NEXT: movb {{[0-9]+}}(%esp), %ch
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl (%eax), %edx
-; X86-NEXT: movl 4(%eax), %esi
-; X86-NEXT: movb %ch, %cl
-; X86-NEXT: andb $16, %cl
-; X86-NEXT: movl %esi, %eax
-; X86-NEXT: shrl %cl, %eax
-; X86-NEXT: shrdl %cl, %esi, %edx
-; X86-NEXT: testb $32, %ch
-; X86-NEXT: jne .LBB0_2
-; X86-NEXT: # %bb.1:
-; X86-NEXT: movl %edx, %eax
-; X86-NEXT: .LBB0_2:
-; X86-NEXT: # kill: def $ax killed $ax killed $eax
-; X86-NEXT: popl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: andl $48, %ecx
+; X86-NEXT: shrl $3, %ecx
+; X86-NEXT: movzwl (%eax,%ecx), %eax
; X86-NEXT: retl
;
-; SSE-LABEL: extractSub64_16:
-; SSE: # %bb.0:
-; SSE-NEXT: movl %esi, %ecx
-; SSE-NEXT: movq (%rdi), %rax
-; SSE-NEXT: andb $48, %cl
-; SSE-NEXT: # kill: def $cl killed $cl killed $ecx
-; SSE-NEXT: shrq %cl, %rax
-; SSE-NEXT: # kill: def $ax killed $ax killed $rax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: extractSub64_16:
-; AVX: # %bb.0:
-; AVX-NEXT: # kill: def $esi killed $esi def $rsi
-; AVX-NEXT: andb $48, %sil
-; AVX-NEXT: shrxq %rsi, (%rdi), %rax
-; AVX-NEXT: # kill: def $ax killed $ax killed $rax
-; AVX-NEXT: retq
+; X64-LABEL: extractSub64_16:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: andl $48, %esi
+; X64-NEXT: shrl $3, %esi
+; X64-NEXT: movzwl (%rdi,%rsi), %eax
+; X64-NEXT: retq
%idx_bounds = and i32 %idx, 63
%idx_align = and i32 %idx_bounds, -16
%sh = zext nneg i32 %idx_align to i64
@@ -60,67 +38,20 @@ define i16 @extractSub64_16(ptr %word, i32 %idx) nounwind {
define i16 @extractSub128_16(ptr %word, i32 %idx) nounwind {
; X86-LABEL: extractSub128_16:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: movl %esp, %ebp
-; X86-NEXT: pushl %edi
-; X86-NEXT: pushl %esi
-; X86-NEXT: andl $-16, %esp
-; X86-NEXT: subl $32, %esp
-; X86-NEXT: movzbl 12(%ebp), %eax
-; X86-NEXT: movl 8(%ebp), %ecx
-; X86-NEXT: movl (%ecx), %edx
-; X86-NEXT: movl 4(%ecx), %esi
-; X86-NEXT: movl 8(%ecx), %edi
-; X86-NEXT: movl 12(%ecx), %ecx
-; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %edx, (%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %eax, %ecx
-; X86-NEXT: andb $16, %cl
-; X86-NEXT: shrb $3, %al
-; X86-NEXT: andb $12, %al
-; X86-NEXT: movzbl %al, %edx
-; X86-NEXT: movl (%esp,%edx), %eax
-; X86-NEXT: movl 4(%esp,%edx), %edx
-; X86-NEXT: shrdl %cl, %edx, %eax
-; X86-NEXT: # kill: def $ax killed $ax killed $eax
-; X86-NEXT: leal -8(%ebp), %esp
-; X86-NEXT: popl %esi
-; X86-NEXT: popl %edi
-; X86-NEXT: popl %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: andl $112, %ecx
+; X86-NEXT: shrl $3, %ecx
+; X86-NEXT: movzwl (%eax,%ecx), %eax
; X86-NEXT: retl
;
-; SSE-LABEL: extractSub128_16:
-; SSE: # %bb.0:
-; SSE-NEXT: movq (%rdi), %rax
-; SSE-NEXT: movq 8(%rdi), %rdx
-; SSE-NEXT: movl %esi, %ecx
-; SSE-NEXT: andb $48, %cl
-; SSE-NEXT: movq %rdx, %rdi
-; SSE-NEXT: shrq %cl, %rdi
-; SSE-NEXT: shrdq %cl, %rdx, %rax
-; SSE-NEXT: testb $64, %sil
-; SSE-NEXT: cmovneq %rdi, %rax
-; SSE-NEXT: # kill: def $ax killed $ax killed $rax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: extractSub128_16:
-; AVX: # %bb.0:
-; AVX-NEXT: movq (%rdi), %rdx
-; AVX-NEXT: movq 8(%rdi), %rax
-; AVX-NEXT: movl %esi, %ecx
-; AVX-NEXT: andb $48, %cl
-; AVX-NEXT: shrdq %cl, %rax, %rdx
-; AVX-NEXT: shrxq %rcx, %rax, %rax
-; AVX-NEXT: testb $64, %sil
-; AVX-NEXT: cmoveq %rdx, %rax
-; AVX-NEXT: # kill: def $ax killed $ax killed $rax
-; AVX-NEXT: retq
+; X64-LABEL: extractSub128_16:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: andl $112, %esi
+; X64-NEXT: shrl $3, %esi
+; X64-NEXT: movzwl (%rdi,%rsi), %eax
+; X64-NEXT: retq
%idx_bounds = and i32 %idx, 127
%idx_align = and i32 %idx_bounds, -16
%sh = zext nneg i32 %idx_align to i128
@@ -133,62 +64,20 @@ define i16 @extractSub128_16(ptr %word, i32 %idx) nounwind {
define i32 @extractSub128_32(ptr %word, i32 %idx) nounwind {
; X86-LABEL: extractSub128_32:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: movl %esp, %ebp
-; X86-NEXT: pushl %edi
-; X86-NEXT: pushl %esi
-; X86-NEXT: andl $-16, %esp
-; X86-NEXT: subl $32, %esp
-; X86-NEXT: movzbl 12(%ebp), %eax
-; X86-NEXT: movl 8(%ebp), %ecx
-; X86-NEXT: movl (%ecx), %edx
-; X86-NEXT: movl 4(%ecx), %esi
-; X86-NEXT: movl 8(%ecx), %edi
-; X86-NEXT: movl 12(%ecx), %ecx
-; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %edx, (%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: andb $96, %al
-; X86-NEXT: shrb $3, %al
-; X86-NEXT: movzbl %al, %eax
-; X86-NEXT: movl (%esp,%eax), %eax
-; X86-NEXT: leal -8(%ebp), %esp
-; X86-NEXT: popl %esi
-; X86-NEXT: popl %edi
-; X86-NEXT: popl %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: andl $96, %ecx
+; X86-NEXT: shrl $3, %ecx
+; X86-NEXT: movl (%eax,%ecx), %eax
; X86-NEXT: retl
;
-; SSE-LABEL: extractSub128_32:
-; SSE: # %bb.0:
-; SSE-NEXT: movq (%rdi), %rax
-; SSE-NEXT: movq 8(%rdi), %rdx
-; SSE-NEXT: movl %esi, %ecx
-; SSE-NEXT: andb $32, %cl
-; SSE-NEXT: movq %rdx, %rdi
-; SSE-NEXT: shrq %cl, %rdi
-; SSE-NEXT: shrdq %cl, %rdx, %rax
-; SSE-NEXT: testb $64, %sil
-; SSE-NEXT: cmovneq %rdi, %rax
-; SSE-NEXT: # kill: def $eax killed $eax killed $rax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: extractSub128_32:
-; AVX: # %bb.0:
-; AVX-NEXT: movq (%rdi), %rdx
-; AVX-NEXT: movq 8(%rdi), %rax
-; AVX-NEXT: movl %esi, %ecx
-; AVX-NEXT: andb $32, %cl
-; AVX-NEXT: shrdq %cl, %rax, %rdx
-; AVX-NEXT: shrxq %rcx, %rax, %rax
-; AVX-NEXT: testb $64, %sil
-; AVX-NEXT: cmoveq %rdx, %rax
-; AVX-NEXT: # kill: def $eax killed $eax killed $rax
-; AVX-NEXT: retq
+; X64-LABEL: extractSub128_32:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: andl $96, %esi
+; X64-NEXT: shrl $3, %esi
+; X64-NEXT: movl (%rdi,%rsi), %eax
+; X64-NEXT: retq
%idx_bounds = and i32 %idx, 127
%idx_align = and i32 %idx_bounds, -32
%sh = zext nneg i32 %idx_align to i128
@@ -201,46 +90,20 @@ define i32 @extractSub128_32(ptr %word, i32 %idx) nounwind {
define i64 @extractSub128_64(ptr %word, i32 %idx) nounwind {
; X86-LABEL: extractSub128_64:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: movl %esp, %ebp
-; X86-NEXT: pushl %edi
-; X86-NEXT: pushl %esi
-; X86-NEXT: andl $-16, %esp
-; X86-NEXT: subl $32, %esp
-; X86-NEXT: movzbl 12(%ebp), %eax
-; X86-NEXT: movl 8(%ebp), %ecx
-; X86-NEXT: movl (%ecx), %edx
-; X86-NEXT: movl 4(%ecx), %esi
-; X86-NEXT: movl 8(%ecx), %edi
-; X86-NEXT: movl 12(%ecx), %ecx
-; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %edx, (%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: andb $64, %al
-; X86-NEXT: shrb $3, %al
-; X86-NEXT: movzbl %al, %ecx
-; X86-NEXT: movl (%esp,%ecx), %eax
-; X86-NEXT: movl 4(%esp,%ecx), %edx
-; X86-NEXT: leal -8(%ebp), %esp
-; X86-NEXT: popl %esi
-; X86-NEXT: popl %edi
-; X86-NEXT: popl %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: andl $64, %edx
+; X86-NEXT: shrl $3, %edx
+; X86-NEXT: movl (%ecx,%edx), %eax
+; X86-NEXT: movl 4(%ecx,%edx), %edx
; X86-NEXT: retl
;
; X64-LABEL: extractSub128_64:
; X64: # %bb.0:
-; X64-NEXT: testb $64, %sil
-; X64-NEXT: je .LBB3_1
-; X64-NEXT: # %bb.2:
-; X64-NEXT: movq 8(%rdi), %rax
-; X64-NEXT: retq
-; X64-NEXT: .LBB3_1:
-; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: andl $64, %esi
+; X64-NEXT: shrl $3, %esi
+; X64-NEXT: movq (%rdi,%rsi), %rax
; X64-NEXT: retq
%idx_bounds = and i32 %idx, 127
%idx_align = and i32 %idx_bounds, -64
@@ -254,185 +117,20 @@ define i64 @extractSub128_64(ptr %word, i32 %idx) nounwind {
define i8 @extractSub512_8(ptr %word, i32 %idx) nounwind {
; X86-LABEL: extractSub512_8:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: movl %esp, %ebp
-; X86-NEXT: pushl %ebx
-; X86-NEXT: pushl %edi
-; X86-NEXT: pushl %esi
-; X86-NEXT: andl $-16, %esp
-; X86-NEXT: subl $192, %esp
-; X86-NEXT: movl 8(%ebp), %eax
-; X86-NEXT: movl (%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 4(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 8(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 12(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 16(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 20(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 24(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 28(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 32(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 36(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 40(%eax), %ebx
-; X86-NEXT: movl 44(%eax), %edi
-; X86-NEXT: movl 48(%eax), %esi
-; X86-NEXT: movl 52(%eax), %edx
-; X86-NEXT: movl 56(%eax), %ecx
-; X86-NEXT: movl 60(%eax), %eax
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl 12(%ebp), %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %edx, %ecx
-; X86-NEXT: andl $24, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrl $3, %edx
-; X86-NEXT: andl $60, %edx
-; X86-NEXT: movl 48(%esp,%edx), %eax
-; X86-NEXT: movl 52(%esp,%edx), %edx
-; X86-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NEXT: shrdl %cl, %edx, %eax
-; X86-NEXT: # kill: def $al killed $al killed $eax
-; X86-NEXT: leal -12(%ebp), %esp
-; X86-NEXT: popl %esi
-; X86-NEXT: popl %edi
-; X86-NEXT: popl %ebx
-; X86-NEXT: popl %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shrl $3, %ecx
+; X86-NEXT: andl $63, %ecx
+; X86-NEXT: movzbl (%eax,%ecx), %eax
; X86-NEXT: retl
;
-; SSE-LABEL: extractSub512_8:
-; SSE: # %bb.0:
-; SSE-NEXT: pushq %rax
-; SSE-NEXT: # kill: def $esi killed $esi def $rsi
-; SSE-NEXT: movups (%rdi), %xmm0
-; SSE-NEXT: movups 16(%rdi), %xmm1
-; SSE-NEXT: movups 32(%rdi), %xmm2
-; SSE-NEXT: movups 48(%rdi), %xmm3
-; SSE-NEXT: xorps %xmm4, %xmm4
-; SSE-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm3, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movl %esi, %ecx
-; SSE-NEXT: andl $56, %ecx
-; SSE-NEXT: shrl $3, %esi
-; SSE-NEXT: andl $56, %esi
-; SSE-NEXT: movq -128(%rsp,%rsi), %rdx
-; SSE-NEXT: shrq %cl, %rdx
-; SSE-NEXT: movl -120(%rsp,%rsi), %eax
-; SSE-NEXT: addl %eax, %eax
-; SSE-NEXT: notl %ecx
-; SSE-NEXT: # kill: def $cl killed $cl killed $ecx
-; SSE-NEXT: shlq %cl, %rax
-; SSE-NEXT: orl %edx, %eax
-; SSE-NEXT: # kill: def $al killed $al killed $rax
-; SSE-NEXT: popq %rcx
-; SSE-NEXT: retq
-;
-; AVX2-LABEL: extractSub512_8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: pushq %rax
-; AVX2-NEXT: # kill: def $esi killed $esi def $rsi
-; AVX2-NEXT: vmovups (%rdi), %ymm0
-; AVX2-NEXT: vmovups 32(%rdi), %ymm1
-; AVX2-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movl %esi, %ecx
-; AVX2-NEXT: andl $56, %ecx
-; AVX2-NEXT: shrl $3, %esi
-; AVX2-NEXT: andl $56, %esi
-; AVX2-NEXT: shrxq %rcx, -128(%rsp,%rsi), %rax
-; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx
-; AVX2-NEXT: notl %ecx
-; AVX2-NEXT: movl -120(%rsp,%rsi), %edx
-; AVX2-NEXT: addl %edx, %edx
-; AVX2-NEXT: shlxq %rcx, %rdx, %rcx
-; AVX2-NEXT: orl %ecx, %eax
-; AVX2-NEXT: # kill: def $al killed $al killed $rax
-; AVX2-NEXT: popq %rcx
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: extractSub512_8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: pushq %rax
-; AVX512-NEXT: vmovups (%rdi), %ymm0
-; AVX512-NEXT: vmovups 32(%rdi), %ymm1
-; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; AVX512-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: # kill: def $esi killed $esi def $rsi
-; AVX512-NEXT: movl %esi, %ecx
-; AVX512-NEXT: andl $56, %ecx
-; AVX512-NEXT: shrl $3, %esi
-; AVX512-NEXT: andl $56, %esi
-; AVX512-NEXT: shrxq %rcx, -128(%rsp,%rsi), %rax
-; AVX512-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx
-; AVX512-NEXT: notl %ecx
-; AVX512-NEXT: movl -120(%rsp,%rsi), %edx
-; AVX512-NEXT: addl %edx, %edx
-; AVX512-NEXT: shlxq %rcx, %rdx, %rcx
-; AVX512-NEXT: orl %ecx, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $rax
-; AVX512-NEXT: popq %rcx
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; X64-LABEL: extractSub512_8:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: shrl $3, %esi
+; X64-NEXT: andl $63, %esi
+; X64-NEXT: movzbl (%rdi,%rsi), %eax
+; X64-NEXT: retq
%idx_bounds = and i32 %idx, 511
%idx_align = and i32 %idx_bounds, -8
%ld = load i512, ptr %word, align 8
@@ -445,152 +143,21 @@ define i8 @extractSub512_8(ptr %word, i32 %idx) nounwind {
define i64 @extractSub512_64(ptr %word, i32 %idx) nounwind {
; X86-LABEL: extractSub512_64:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: movl %esp, %ebp
-; X86-NEXT: pushl %ebx
-; X86-NEXT: pushl %edi
-; X86-NEXT: pushl %esi
-; X86-NEXT: andl $-16, %esp
-; X86-NEXT: subl $192, %esp
-; X86-NEXT: movl 8(%ebp), %eax
-; X86-NEXT: movl (%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 4(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 8(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 12(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 16(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 20(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 24(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 28(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 32(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 36(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 40(%eax), %ebx
-; X86-NEXT: movl 44(%eax), %edi
-; X86-NEXT: movl 48(%eax), %esi
-; X86-NEXT: movl 52(%eax), %edx
-; X86-NEXT: movl 56(%eax), %ecx
-; X86-NEXT: movl 60(%eax), %eax
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl 12(%ebp), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrl $3, %ecx
-; X86-NEXT: andl $56, %ecx
-; X86-NEXT: movl 48(%esp,%ecx), %eax
-; X86-NEXT: movl 52(%esp,%ecx), %edx
-; X86-NEXT: leal -12(%ebp), %esp
-; X86-NEXT: popl %esi
-; X86-NEXT: popl %edi
-; X86-NEXT: popl %ebx
-; X86-NEXT: popl %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: shrl $3, %edx
+; X86-NEXT: andl $56, %edx
+; X86-NEXT: movl (%ecx,%edx), %eax
+; X86-NEXT: movl 4(%ecx,%edx), %edx
; X86-NEXT: retl
;
-; SSE-LABEL: extractSub512_64:
-; SSE: # %bb.0:
-; SSE-NEXT: pushq %rax
-; SSE-NEXT: # kill: def $esi killed $esi def $rsi
-; SSE-NEXT: movups (%rdi), %xmm0
-; SSE-NEXT: movups 16(%rdi), %xmm1
-; SSE-NEXT: movups 32(%rdi), %xmm2
-; SSE-NEXT: movups 48(%rdi), %xmm3
-; SSE-NEXT: xorps %xmm4, %xmm4
-; SSE-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm3, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: shrl $3, %esi
-; SSE-NEXT: andl $56, %esi
-; SSE-NEXT: movq -128(%rsp,%rsi), %rax
-; SSE-NEXT: popq %rcx
-; SSE-NEXT: retq
-;
-; AVX2-LABEL: extractSub512_64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: pushq %rax
-; AVX2-NEXT: vmovups (%rdi), %ymm0
-; AVX2-NEXT: vmovups 32(%rdi), %ymm1
-; AVX2-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: # kill: def $esi killed $esi def $rsi
-; AVX2-NEXT: shrl $3, %esi
-; AVX2-NEXT: andl $56, %esi
-; AVX2-NEXT: movq -128(%rsp,%rsi), %rax
-; AVX2-NEXT: popq %rcx
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: extractSub512_64:
-; AVX512: # %bb.0:
-; AVX512-NEXT: pushq %rax
-; AVX512-NEXT: vmovups (%rdi), %ymm0
-; AVX512-NEXT: vmovups 32(%rdi), %ymm1
-; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; AVX512-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: # kill: def $esi killed $esi def $rsi
-; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: shrl $3, %esi
-; AVX512-NEXT: andl $56, %esi
-; AVX512-NEXT: movq -128(%rsp,%rsi), %rax
-; AVX512-NEXT: popq %rcx
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; X64-LABEL: extractSub512_64:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: shrl $3, %esi
+; X64-NEXT: andl $56, %esi
+; X64-NEXT: movq (%rdi,%rsi), %rax
+; X64-NEXT: retq
%idx_bounds = and i32 %idx, 511
%idx_align = and i32 %idx_bounds, -64
%sh = zext nneg i32 %idx_align to i512
@@ -603,143 +170,35 @@ define i64 @extractSub512_64(ptr %word, i32 %idx) nounwind {
define i128 @extractSub512_128(ptr %word, i32 %idx) nounwind {
; X86-LABEL: extractSub512_128:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: movl %esp, %ebp
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
-; X86-NEXT: andl $-16, %esp
-; X86-NEXT: subl $192, %esp
-; X86-NEXT: movl 12(%ebp), %eax
-; X86-NEXT: movl (%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 4(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 8(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 12(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 16(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 20(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 24(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 28(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 32(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 36(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 40(%eax), %ebx
-; X86-NEXT: movl 44(%eax), %edi
-; X86-NEXT: movl 48(%eax), %esi
-; X86-NEXT: movl 52(%eax), %edx
-; X86-NEXT: movl 56(%eax), %ecx
-; X86-NEXT: movl 60(%eax), %eax
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl 8(%ebp), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl 16(%ebp), %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrl $3, %edi
-; X86-NEXT: andl $48, %edi
-; X86-NEXT: movl 48(%esp,%edi), %ecx
-; X86-NEXT: movl 52(%esp,%edi), %edx
-; X86-NEXT: movl 56(%esp,%edi), %esi
-; X86-NEXT: movl 60(%esp,%edi), %edi
-; X86-NEXT: movl %edi, 12(%eax)
-; X86-NEXT: movl %esi, 8(%eax)
-; X86-NEXT: movl %edx, 4(%eax)
-; X86-NEXT: movl %ecx, (%eax)
-; X86-NEXT: leal -12(%ebp), %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: shrl $3, %edx
+; X86-NEXT: andl $48, %edx
+; X86-NEXT: movl (%ecx,%edx), %esi
+; X86-NEXT: movl 4(%ecx,%edx), %edi
+; X86-NEXT: movl 8(%ecx,%edx), %ebx
+; X86-NEXT: movl 12(%ecx,%edx), %ecx
+; X86-NEXT: movl %ecx, 12(%eax)
+; X86-NEXT: movl %ebx, 8(%eax)
+; X86-NEXT: movl %edi, 4(%eax)
+; X86-NEXT: movl %esi, (%eax)
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
-; X86-NEXT: popl %ebp
; X86-NEXT: retl $4
;
-; SSE-LABEL: extractSub512_128:
-; SSE: # %bb.0:
-; SSE-NEXT: pushq %rax
-; SSE-NEXT: # kill: def $esi killed $esi def $rsi
-; SSE-NEXT: movups (%rdi), %xmm0
-; SSE-NEXT: movups 16(%rdi), %xmm1
-; SSE-NEXT: movups 32(%rdi), %xmm2
-; SSE-NEXT: movups 48(%rdi), %xmm3
-; SSE-NEXT: xorps %xmm4, %xmm4
-; SSE-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm3, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: shrl $3, %esi
-; SSE-NEXT: andl $48, %esi
-; SSE-NEXT: movq -128(%rsp,%rsi), %rax
-; SSE-NEXT: movq -120(%rsp,%rsi), %rdx
-; SSE-NEXT: popq %rcx
-; SSE-NEXT: retq
-;
-; AVX-LABEL: extractSub512_128:
-; AVX: # %bb.0:
-; AVX-NEXT: pushq %rax
-; AVX-NEXT: # kill: def $esi killed $esi def $rsi
-; AVX-NEXT: vmovups (%rdi), %ymm0
-; AVX-NEXT: vmovups 32(%rdi), %ymm1
-; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
-; AVX-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
-; AVX-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
-; AVX-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX-NEXT: shrl $3, %esi
-; AVX-NEXT: andl $48, %esi
-; AVX-NEXT: movq -128(%rsp,%rsi), %rax
-; AVX-NEXT: movq -120(%rsp,%rsi), %rdx
-; AVX-NEXT: popq %rcx
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; X64-LABEL: extractSub512_128:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: shrl $3, %esi
+; X64-NEXT: andl $48, %esi
+; X64-NEXT: movq (%rdi,%rsi), %rax
+; X64-NEXT: movq 8(%rdi,%rsi), %rdx
+; X64-NEXT: retq
%idx_bounds = and i32 %idx, 511
%idx_align = and i32 %idx_bounds, -128
%sh = zext nneg i32 %idx_align to i512
@@ -752,916 +211,21 @@ define i128 @extractSub512_128(ptr %word, i32 %idx) nounwind {
define i64 @extractSub4096_64(ptr %word, i32 %idx) nounwind {
; X86-LABEL: extractSub4096_64:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: movl %esp, %ebp
-; X86-NEXT: pushl %ebx
-; X86-NEXT: pushl %edi
-; X86-NEXT: pushl %esi
-; X86-NEXT: andl $-16, %esp
-; X86-NEXT: subl $1536, %esp # imm = 0x600
-; X86-NEXT: movl 8(%ebp), %eax
-; X86-NEXT: movl 4(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 8(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 12(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 16(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 20(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 24(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 28(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 32(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 36(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 40(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 44(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 48(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 52(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 56(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 60(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 64(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 68(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 72(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 76(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 80(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 84(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 88(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 92(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 96(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 100(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 104(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 108(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 112(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 116(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 120(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 124(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 128(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 132(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 136(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 140(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 144(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 148(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 152(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 156(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 160(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 164(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 168(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 172(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 176(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 180(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 184(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 188(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 192(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 196(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 200(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 204(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 208(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 212(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 216(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 220(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 224(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 228(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 232(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 236(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 240(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 244(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 248(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 252(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 256(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 260(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 264(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 268(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 272(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 276(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 280(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 284(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 288(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 292(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 296(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 300(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 304(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 308(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 312(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 316(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 320(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 324(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 328(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 332(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 336(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 340(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 344(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 348(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 352(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 356(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 360(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 364(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 368(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 372(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 376(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 380(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl (%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 384(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 388(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 392(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 396(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 400(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 404(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 408(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 412(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 416(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 420(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 424(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 428(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 432(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 436(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 440(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 444(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 448(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 452(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 456(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 460(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 464(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 468(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 472(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 476(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 480(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 484(%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 488(%eax), %ebx
-; X86-NEXT: movl 492(%eax), %edi
-; X86-NEXT: movl 496(%eax), %esi
-; X86-NEXT: movl 500(%eax), %edx
-; X86-NEXT: movl 504(%eax), %ecx
-; X86-NEXT: movl 508(%eax), %eax
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $4032, %ecx # imm = 0xFC0
-; X86-NEXT: andl 12(%ebp), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrl $3, %ecx
-; X86-NEXT: movl 496(%esp,%ecx), %eax
-; X86-NEXT: movl 500(%esp,%ecx), %edx
-; X86-NEXT: leal -12(%ebp), %esp
-; X86-NEXT: popl %esi
-; X86-NEXT: popl %edi
-; X86-NEXT: popl %ebx
-; X86-NEXT: popl %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $4032, %edx # imm = 0xFC0
+; X86-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: shrl $3, %edx
+; X86-NEXT: movl (%ecx,%edx), %eax
+; X86-NEXT: movl 4(%ecx,%edx), %edx
; X86-NEXT: retl
;
-; SSE-LABEL: extractSub4096_64:
-; SSE: # %bb.0:
-; SSE-NEXT: subq $1176, %rsp # imm = 0x498
-; SSE-NEXT: # kill: def $esi killed $esi def $rsi
-; SSE-NEXT: movups (%rdi), %xmm0
-; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movups 16(%rdi), %xmm0
-; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movups 32(%rdi), %xmm0
-; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movups 48(%rdi), %xmm0
-; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movups 64(%rdi), %xmm0
-; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movups 80(%rdi), %xmm0
-; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movups 96(%rdi), %xmm0
-; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movups 112(%rdi), %xmm0
-; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movups 128(%rdi), %xmm0
-; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; SSE-NEXT: movups 144(%rdi), %xmm0
-; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movups 160(%rdi), %xmm0
-; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movups 176(%rdi), %xmm0
-; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movups 192(%rdi), %xmm0
-; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movups 208(%rdi), %xmm0
-; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movups 224(%rdi), %xmm0
-; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movups 240(%rdi), %xmm0
-; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movups 256(%rdi), %xmm0
-; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movups 272(%rdi), %xmm15
-; SSE-NEXT: movups 288(%rdi), %xmm14
-; SSE-NEXT: movups 304(%rdi), %xmm13
-; SSE-NEXT: movups 320(%rdi), %xmm12
-; SSE-NEXT: movups 336(%rdi), %xmm11
-; SSE-NEXT: movups 352(%rdi), %xmm10
-; SSE-NEXT: movups 368(%rdi), %xmm9
-; SSE-NEXT: movups 384(%rdi), %xmm8
-; SSE-NEXT: movups 400(%rdi), %xmm7
-; SSE-NEXT: movups 416(%rdi), %xmm6
-; SSE-NEXT: movups 432(%rdi), %xmm5
-; SSE-NEXT: movups 448(%rdi), %xmm4
-; SSE-NEXT: movups 464(%rdi), %xmm3
-; SSE-NEXT: movups 480(%rdi), %xmm2
-; SSE-NEXT: movups 496(%rdi), %xmm1
-; SSE-NEXT: xorps %xmm0, %xmm0
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm2, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm3, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm4, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm5, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm6, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm7, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm8, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm9, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm10, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm11, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm12, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm13, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm14, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm15, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: andl $4032, %esi # imm = 0xFC0
-; SSE-NEXT: shrl $3, %esi
-; SSE-NEXT: movq 144(%rsp,%rsi), %rax
-; SSE-NEXT: addq $1176, %rsp # imm = 0x498
-; SSE-NEXT: retq
-;
-; AVX2-LABEL: extractSub4096_64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: subq $936, %rsp # imm = 0x3A8
-; AVX2-NEXT: vmovups (%rdi), %ymm0
-; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vmovups 32(%rdi), %ymm1
-; AVX2-NEXT: vmovups 64(%rdi), %ymm2
-; AVX2-NEXT: vmovups 96(%rdi), %ymm3
-; AVX2-NEXT: vmovups 128(%rdi), %ymm4
-; AVX2-NEXT: vmovups 160(%rdi), %ymm5
-; AVX2-NEXT: vmovups 192(%rdi), %ymm6
-; AVX2-NEXT: vmovups 224(%rdi), %ymm7
-; AVX2-NEXT: vmovups 256(%rdi), %ymm8
-; AVX2-NEXT: vmovups 288(%rdi), %ymm9
-; AVX2-NEXT: vmovups 320(%rdi), %ymm10
-; AVX2-NEXT: vmovups 352(%rdi), %ymm11
-; AVX2-NEXT: vmovups 384(%rdi), %ymm12
-; AVX2-NEXT: vmovups 416(%rdi), %ymm13
-; AVX2-NEXT: vmovups 448(%rdi), %ymm14
-; AVX2-NEXT: vmovups 480(%rdi), %ymm15
-; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm15, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm14, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm13, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm12, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm11, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm10, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm9, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm8, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm7, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm6, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm5, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm4, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm3, (%rsp)
-; AVX2-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: # kill: def $esi killed $esi def $rsi
-; AVX2-NEXT: andl $4032, %esi # imm = 0xFC0
-; AVX2-NEXT: shrl $3, %esi
-; AVX2-NEXT: movq -96(%rsp,%rsi), %rax
-; AVX2-NEXT: addq $936, %rsp # imm = 0x3A8
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: extractSub4096_64:
-; AVX512: # %bb.0:
-; AVX512-NEXT: subq $904, %rsp # imm = 0x388
-; AVX512-NEXT: # kill: def $esi killed $esi def $rsi
-; AVX512-NEXT: vmovups (%rdi), %ymm0
-; AVX512-NEXT: vmovups 32(%rdi), %ymm1
-; AVX512-NEXT: vmovups 64(%rdi), %ymm2
-; AVX512-NEXT: vmovups 96(%rdi), %ymm3
-; AVX512-NEXT: vmovups 128(%rdi), %ymm4
-; AVX512-NEXT: vmovups 160(%rdi), %ymm5
-; AVX512-NEXT: vmovups 192(%rdi), %ymm6
-; AVX512-NEXT: vmovups 224(%rdi), %ymm7
-; AVX512-NEXT: vmovups 256(%rdi), %ymm8
-; AVX512-NEXT: vmovups 288(%rdi), %ymm9
-; AVX512-NEXT: vmovups 320(%rdi), %ymm10
-; AVX512-NEXT: vmovups 352(%rdi), %ymm11
-; AVX512-NEXT: vmovups 384(%rdi), %ymm12
-; AVX512-NEXT: vmovups 416(%rdi), %ymm13
-; AVX512-NEXT: andl $4032, %esi # imm = 0xFC0
-; AVX512-NEXT: vmovups 448(%rdi), %ymm14
-; AVX512-NEXT: vmovups 480(%rdi), %ymm15
-; AVX512-NEXT: vxorps %xmm16, %xmm16, %xmm16
-; AVX512-NEXT: vmovups %ymm16, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm16, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm16, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm16, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm16, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm16, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm16, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm16, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm16, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm16, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm16, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm16, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm16, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm16, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm16, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm16, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm15, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm14, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm13, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm12, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm11, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm10, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm9, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm8, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm7, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm6, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm5, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm4, (%rsp)
-; AVX512-NEXT: vmovups %ymm3, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: shrl $3, %esi
-; AVX512-NEXT: movq -128(%rsp,%rsi), %rax
-; AVX512-NEXT: addq $904, %rsp # imm = 0x388
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; X64-LABEL: extractSub4096_64:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: andl $4032, %esi # imm = 0xFC0
+; X64-NEXT: shrl $3, %esi
+; X64-NEXT: movq (%rdi,%rsi), %rax
+; X64-NEXT: retq
%idx_bounds = and i32 %idx, 4095
%idx_align = and i32 %idx_bounds, -64
%sh = zext nneg i32 %idx_align to i4096
diff --git a/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll b/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll
index 81c4d5d..c3054a3 100644
--- a/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll
+++ b/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll
@@ -962,39 +962,22 @@ define void @load_8byte_chunk_of_16byte_alloca_with_zero_upper_half(ptr %src, i6
}
define void @load_1byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
-; X64-NO-BMI2-LABEL: load_1byte_chunk_of_32byte_alloca_with_zero_upper_half:
-; X64-NO-BMI2: # %bb.0:
-; X64-NO-BMI2-NEXT: movups (%rdi), %xmm0
-; X64-NO-BMI2-NEXT: xorps %xmm1, %xmm1
-; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
-; X64-NO-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NEXT: movl %ecx, %eax
-; X64-NO-BMI2-NEXT: shrb $6, %al
-; X64-NO-BMI2-NEXT: movzbl %al, %eax
-; X64-NO-BMI2-NEXT: movq -72(%rsp,%rax,8), %rax
-; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-NO-BMI2-NEXT: shrq %cl, %rax
-; X64-NO-BMI2-NEXT: movb %al, (%rdx)
-; X64-NO-BMI2-NEXT: retq
-;
-; X64-BMI2-LABEL: load_1byte_chunk_of_32byte_alloca_with_zero_upper_half:
-; X64-BMI2: # %bb.0:
-; X64-BMI2-NEXT: movups (%rdi), %xmm0
-; X64-BMI2-NEXT: xorps %xmm1, %xmm1
-; X64-BMI2-NEXT: shll $3, %esi
-; X64-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
-; X64-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
-; X64-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
-; X64-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; X64-BMI2-NEXT: movl %esi, %eax
-; X64-BMI2-NEXT: shrb $6, %al
-; X64-BMI2-NEXT: movzbl %al, %eax
-; X64-BMI2-NEXT: shrxq %rsi, -72(%rsp,%rax,8), %rax
-; X64-BMI2-NEXT: movb %al, (%rdx)
-; X64-BMI2-NEXT: retq
+; X64-LABEL: load_1byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X64: # %bb.0:
+; X64-NEXT: movups (%rdi), %xmm0
+; X64-NEXT: xorps %xmm1, %xmm1
+; X64-NEXT: leal (,%rsi,8), %eax
+; X64-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NEXT: shrb $6, %al
+; X64-NEXT: movzbl %al, %eax
+; X64-NEXT: leaq -72(%rsp,%rax,8), %rax
+; X64-NEXT: andl $7, %esi
+; X64-NEXT: movzbl (%rsi,%rax), %eax
+; X64-NEXT: movb %al, (%rdx)
+; X64-NEXT: retq
;
; X86-NO-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_32byte_alloca_with_zero_upper_half:
; X86-NO-BMI2-NO-SHLD: # %bb.0:
@@ -3417,7 +3400,6 @@ define void @load_32byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; ALL: {{.*}}
-; X64: {{.*}}
; X64-NO-SHLD: {{.*}}
; X86: {{.*}}
; X86-HAVE-BMI2-HAVE-SHLD: {{.*}}
diff --git a/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll b/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll
index 8d36eef..84c2cc6 100644
--- a/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll
+++ b/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll
@@ -1220,41 +1220,23 @@ define void @load_8byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst)
; no @load_16byte_chunk_of_16byte_alloca
define void @load_1byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
-; X64-NO-BMI2-LABEL: load_1byte_chunk_of_32byte_alloca:
-; X64-NO-BMI2: # %bb.0:
-; X64-NO-BMI2-NEXT: movups (%rdi), %xmm0
-; X64-NO-BMI2-NEXT: movups 16(%rdi), %xmm1
-; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
-; X64-NO-BMI2-NEXT: xorps %xmm2, %xmm2
-; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NEXT: movl %ecx, %eax
-; X64-NO-BMI2-NEXT: shrb $6, %al
-; X64-NO-BMI2-NEXT: movzbl %al, %eax
-; X64-NO-BMI2-NEXT: movq -72(%rsp,%rax,8), %rax
-; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-NO-BMI2-NEXT: shrq %cl, %rax
-; X64-NO-BMI2-NEXT: movb %al, (%rdx)
-; X64-NO-BMI2-NEXT: retq
-;
-; X64-BMI2-LABEL: load_1byte_chunk_of_32byte_alloca:
-; X64-BMI2: # %bb.0:
-; X64-BMI2-NEXT: movups (%rdi), %xmm0
-; X64-BMI2-NEXT: movups 16(%rdi), %xmm1
-; X64-BMI2-NEXT: shll $3, %esi
-; X64-BMI2-NEXT: xorps %xmm2, %xmm2
-; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
-; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
-; X64-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
-; X64-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; X64-BMI2-NEXT: movl %esi, %eax
-; X64-BMI2-NEXT: shrb $6, %al
-; X64-BMI2-NEXT: movzbl %al, %eax
-; X64-BMI2-NEXT: shrxq %rsi, -72(%rsp,%rax,8), %rax
-; X64-BMI2-NEXT: movb %al, (%rdx)
-; X64-BMI2-NEXT: retq
+; X64-LABEL: load_1byte_chunk_of_32byte_alloca:
+; X64: # %bb.0:
+; X64-NEXT: movups (%rdi), %xmm0
+; X64-NEXT: movups 16(%rdi), %xmm1
+; X64-NEXT: leal (,%rsi,8), %eax
+; X64-NEXT: xorps %xmm2, %xmm2
+; X64-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NEXT: shrb $6, %al
+; X64-NEXT: movzbl %al, %eax
+; X64-NEXT: leaq -72(%rsp,%rax,8), %rax
+; X64-NEXT: andl $7, %esi
+; X64-NEXT: movzbl (%rsi,%rax), %eax
+; X64-NEXT: movb %al, (%rdx)
+; X64-NEXT: retq
;
; X86-NO-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_32byte_alloca:
; X86-NO-BMI2-NO-SHLD: # %bb.0:
@@ -2156,7 +2138,6 @@ define void @load_16byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst
; no @load_32byte_chunk_of_32byte_alloca
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; ALL: {{.*}}
-; X64: {{.*}}
; X64-NO-SHLD: {{.*}}
; X86: {{.*}}
; X86-NO-SHLD: {{.*}}
diff --git a/llvm/test/MC/AMDGPU/buffer-op-swz-operand.s b/llvm/test/MC/AMDGPU/buffer-op-swz-operand.s
index 8bd9148..4542027 100644
--- a/llvm/test/MC/AMDGPU/buffer-op-swz-operand.s
+++ b/llvm/test/MC/AMDGPU/buffer-op-swz-operand.s
@@ -2,7 +2,7 @@
// CHECK: .amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
buffer_load_dwordx4 v[0:3], v0, s[0:3], 0, offen offset:4092 slc
-// CHECK: buffer_load_b128 v[0:3], v0, s[0:3], 0 offen offset:4092 slc ; <MCInst #13135 BUFFER_LOAD_DWORDX4_OFFEN_gfx11
+// CHECK: buffer_load_b128 v[0:3], v0, s[0:3], 0 offen offset:4092 slc ; <MCInst #{{[0-9]+}} BUFFER_LOAD_DWORDX4_OFFEN_gfx11
// CHECK-NEXT: ; <MCOperand Reg:10104>
// CHECK-NEXT: ; <MCOperand Reg:486>
// CHECK-NEXT: ; <MCOperand Reg:7754>
@@ -11,7 +11,7 @@ buffer_load_dwordx4 v[0:3], v0, s[0:3], 0, offen offset:4092 slc
// CHECK-NEXT: ; <MCOperand Imm:2>
// CHECK-NEXT: ; <MCOperand Imm:0>>
buffer_store_dword v0, v1, s[0:3], 0 offen slc
-// CHECK: buffer_store_b32 v0, v1, s[0:3], 0 offen slc ; <MCInst #14553 BUFFER_STORE_DWORD_OFFEN_gfx11
+// CHECK: buffer_store_b32 v0, v1, s[0:3], 0 offen slc ; <MCInst #{{[0-9]+}} BUFFER_STORE_DWORD_OFFEN_gfx11
// CHECK-NEXT: ; <MCOperand Reg:486>
// CHECK-NEXT: ; <MCOperand Reg:487>
// CHECK-NEXT: ; <MCOperand Reg:7754>
@@ -22,7 +22,7 @@ buffer_store_dword v0, v1, s[0:3], 0 offen slc
; tbuffer ops use autogenerate asm parsers
tbuffer_load_format_xyzw v[0:3], v0, s[0:3], 0 format:[BUF_FMT_32_32_SINT] offen offset:4092 slc
-// CHECK: tbuffer_load_format_xyzw v[0:3], v0, s[0:3], 0 format:[BUF_FMT_32_32_SINT] offen offset:4092 slc ; <MCInst #34095 TBUFFER_LOAD_FORMAT_XYZW_OFFEN_gfx11
+// CHECK: tbuffer_load_format_xyzw v[0:3], v0, s[0:3], 0 format:[BUF_FMT_32_32_SINT] offen offset:4092 slc ; <MCInst #{{[0-9]+}} TBUFFER_LOAD_FORMAT_XYZW_OFFEN_gfx11
// CHECK-NEXT: ; <MCOperand Reg:10104>
// CHECK-NEXT: ; <MCOperand Reg:486>
// CHECK-NEXT: ; <MCOperand Reg:7754>
@@ -32,7 +32,7 @@ tbuffer_load_format_xyzw v[0:3], v0, s[0:3], 0 format:[BUF_FMT_32_32_SINT] offen
// CHECK-NEXT: ; <MCOperand Imm:2>
// CHECK-NEXT: ; <MCOperand Imm:0>>
tbuffer_store_d16_format_x v0, v1, s[0:3], 0 format:[BUF_FMT_10_10_10_2_SNORM] offen slc
-// CHECK: tbuffer_store_d16_format_x v0, v1, s[0:3], 0 format:[BUF_FMT_10_10_10_2_SNORM] offen slc ; <MCInst #34264 TBUFFER_STORE_FORMAT_D16_X_OFFEN_gfx11
+// CHECK: tbuffer_store_d16_format_x v0, v1, s[0:3], 0 format:[BUF_FMT_10_10_10_2_SNORM] offen slc ; <MCInst #{{[0-9]+}} TBUFFER_STORE_FORMAT_D16_X_OFFEN_gfx11
// CHECK-NEXT: ; <MCOperand Reg:486>
// CHECK-NEXT: ; <MCOperand Reg:487>
// CHECK-NEXT: ; <MCOperand Reg:7754>
diff --git a/llvm/test/MC/Disassembler/PowerPC/ppc-encoding-ISAFuture.txt b/llvm/test/MC/Disassembler/PowerPC/ppc-encoding-ISAFuture.txt
index 054489c..f5cb4b7 100644
--- a/llvm/test/MC/Disassembler/PowerPC/ppc-encoding-ISAFuture.txt
+++ b/llvm/test/MC/Disassembler/PowerPC/ppc-encoding-ISAFuture.txt
@@ -286,6 +286,9 @@
#CHECK: xvmulhuh 4, 5, 7
0xf0,0x85,0x3b,0xd0
+#CHECK: mtlpl 3, 4
+0x7c,0x80,0x1a,0x26
+
#CHECK: xxmulmul 8, 3, 4, 2
0xed,0x03,0x22,0x08
diff --git a/llvm/test/MC/Disassembler/PowerPC/ppc64le-encoding-ISAFuture.txt b/llvm/test/MC/Disassembler/PowerPC/ppc64le-encoding-ISAFuture.txt
index 17d1413..f0df8ce 100644
--- a/llvm/test/MC/Disassembler/PowerPC/ppc64le-encoding-ISAFuture.txt
+++ b/llvm/test/MC/Disassembler/PowerPC/ppc64le-encoding-ISAFuture.txt
@@ -280,6 +280,9 @@
#CHECK: xvmulhuh 4, 5, 7
0xd0,0x3b,0x85,0xf0
+#CHECK: mtlpl 3, 4
+0x26,0x1a,0x80,0x7c
+
#CHECK: xxmulmul 8, 3, 4, 2
0x08,0x22,0x03,0xed
diff --git a/llvm/test/MC/PowerPC/ppc-encoding-ISAFuture.s b/llvm/test/MC/PowerPC/ppc-encoding-ISAFuture.s
index e5bc1f4..bc0683e 100644
--- a/llvm/test/MC/PowerPC/ppc-encoding-ISAFuture.s
+++ b/llvm/test/MC/PowerPC/ppc-encoding-ISAFuture.s
@@ -403,6 +403,10 @@
#CHECK-BE: xvmulhuh 4, 5, 7 # encoding: [0xf0,0x85,0x3b,0xd0]
#CHECK-LE: xvmulhuh 4, 5, 7 # encoding: [0xd0,0x3b,0x85,0xf0]
+ mtlpl 3, 4
+#CHECK-BE: mtlpl 3, 4 # encoding: [0x7c,0x80,0x1a,0x26]
+#CHECK-LE: mtlpl 3, 4 # encoding: [0x26,0x1a,0x80,0x7c]
+
xxmulmul 8, 3, 4, 2
#CHECK-BE: xxmulmul 8, 3, 4, 2 # encoding: [0xed,0x03,0x22,0x08]
#CHECK-LE: xxmulmul 8, 3, 4, 2 # encoding: [0x08,0x22,0x03,0xed]
diff --git a/llvm/test/MC/RISCV/xqcili-linker-relaxation.s b/llvm/test/MC/RISCV/xqcili-linker-relaxation.s
new file mode 100644
index 0000000..ace6779
--- /dev/null
+++ b/llvm/test/MC/RISCV/xqcili-linker-relaxation.s
@@ -0,0 +1,37 @@
+# RUN: llvm-mc --triple=riscv32 -mattr=+relax,+experimental-xqcili \
+# RUN: %s -filetype=obj -o - -riscv-add-build-attributes \
+# RUN: | llvm-objdump -dr -M no-aliases - \
+# RUN: | FileCheck %s
+
+## This tests that we correctly emit relocations for linker relaxation when
+## emitting `QC.E.LI` and `QC.LI`.
+
+ .section .text.ex1, "ax", @progbits
+# CHECK-LABEL: <.text.ex1>:
+ blez a1, .L1
+# CHECK-NEXT: bge zero, a1, 0x0 <.text.ex1>
+# CHECK-NEXT: R_RISCV_BRANCH .L1{{$}}
+ qc.e.li a0, sym
+# CHECK-NEXT: qc.e.li a0, 0x0
+# CHECK-NEXT: R_RISCV_VENDOR QUALCOMM{{$}}
+# CHECK-NEXT: R_RISCV_CUSTOM194 sym{{$}}
+# CHECK-NEXT: R_RISCV_RELAX *ABS*{{$}}
+.L1:
+# CHECK: <.L1>:
+ ret
+# CHECK-NEXT: c.jr ra
+
+ .section .text.ex2, "ax", @progbits
+# CHECK-LABEL: <.text.ex2>:
+ blez a1, .L2
+# CHECK-NEXT: bge zero, a1, 0x0 <.text.ex2>
+# CHECK-NEXT: R_RISCV_BRANCH .L2{{$}}
+ qc.li a0, %qc.abs20(sym)
+# CHECK-NEXT: qc.li a0, 0x0
+# CHECK-NEXT: R_RISCV_VENDOR QUALCOMM{{$}}
+# CHECK-NEXT: R_RISCV_CUSTOM192 sym{{$}}
+# CHECK-NEXT: R_RISCV_RELAX *ABS*{{$}}
+.L2:
+# CHECK: <.L2>:
+ ret
+# CHECK-NEXT: c.jr ra
diff --git a/llvm/test/Transforms/InstCombine/assume.ll b/llvm/test/Transforms/InstCombine/assume.ll
index 7b0b871..cc87d65 100644
--- a/llvm/test/Transforms/InstCombine/assume.ll
+++ b/llvm/test/Transforms/InstCombine/assume.ll
@@ -10,8 +10,8 @@ declare void @llvm.assume(i1) #1
; Check that the assume has not been removed:
-define i32 @foo1(ptr %a) #0 {
-; DEFAULT-LABEL: @foo1(
+define i32 @align_to_bundle(ptr %a) #0 {
+; DEFAULT-LABEL: @align_to_bundle(
; DEFAULT-NEXT: [[T0:%.*]] = load i32, ptr [[A:%.*]], align 4
; DEFAULT-NEXT: [[PTRINT:%.*]] = ptrtoint ptr [[A]] to i64
; DEFAULT-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
@@ -19,7 +19,7 @@ define i32 @foo1(ptr %a) #0 {
; DEFAULT-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
; DEFAULT-NEXT: ret i32 [[T0]]
;
-; BUNDLES-LABEL: @foo1(
+; BUNDLES-LABEL: @align_to_bundle(
; BUNDLES-NEXT: [[T0:%.*]] = load i32, ptr [[A:%.*]], align 4
; BUNDLES-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 32) ]
; BUNDLES-NEXT: ret i32 [[T0]]
@@ -32,6 +32,28 @@ define i32 @foo1(ptr %a) #0 {
ret i32 %t0
}
+define i32 @align_to_bundle_ptrtoaddr(ptr %a) #0 {
+; DEFAULT-LABEL: @align_to_bundle_ptrtoaddr(
+; DEFAULT-NEXT: [[T0:%.*]] = load i32, ptr [[A:%.*]], align 4
+; DEFAULT-NEXT: [[PTRINT:%.*]] = ptrtoaddr ptr [[A]] to i64
+; DEFAULT-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+; DEFAULT-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; DEFAULT-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
+; DEFAULT-NEXT: ret i32 [[T0]]
+;
+; BUNDLES-LABEL: @align_to_bundle_ptrtoaddr(
+; BUNDLES-NEXT: [[T0:%.*]] = load i32, ptr [[A:%.*]], align 4
+; BUNDLES-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 32) ]
+; BUNDLES-NEXT: ret i32 [[T0]]
+;
+ %t0 = load i32, ptr %a, align 4
+ %ptrint = ptrtoaddr ptr %a to i64
+ %maskedptr = and i64 %ptrint, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
+ ret i32 %t0
+}
+
define i32 @align_assume_trunc_cond(ptr %a) #0 {
; DEFAULT-LABEL: @align_assume_trunc_cond(
; DEFAULT-NEXT: [[T0:%.*]] = load i32, ptr [[A:%.*]], align 4
diff --git a/llvm/test/Transforms/InstCombine/ptrtoaddr.ll b/llvm/test/Transforms/InstCombine/ptrtoaddr.ll
index a7434a2..adf3aa1 100644
--- a/llvm/test/Transforms/InstCombine/ptrtoaddr.ll
+++ b/llvm/test/Transforms/InstCombine/ptrtoaddr.ll
@@ -237,3 +237,75 @@ define ptr addrspace(1) @gep_sub_ptrtoaddr_different_obj_addrsize(ptr addrspace(
call void @use.i32(i32 %addr)
ret ptr addrspace(1) %gep
}
+
+define i64 @ptrtoaddr_of_ptrmask(ptr %p, i64 %mask) {
+; CHECK-LABEL: define i64 @ptrtoaddr_of_ptrmask(
+; CHECK-SAME: ptr [[P:%.*]], i64 [[MASK:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoaddr ptr [[P]] to i64
+; CHECK-NEXT: [[ADDR:%.*]] = and i64 [[MASK]], [[TMP1]]
+; CHECK-NEXT: ret i64 [[ADDR]]
+;
+ %masked = call ptr @llvm.ptrmask(ptr %p, i64 %mask)
+ %addr = ptrtoaddr ptr %masked to i64
+ ret i64 %addr
+}
+
+define i32 @ptrtoaddr_of_ptrmask_addrsize(ptr addrspace(1) %p, i32 %mask) {
+; CHECK-LABEL: define i32 @ptrtoaddr_of_ptrmask_addrsize(
+; CHECK-SAME: ptr addrspace(1) [[P:%.*]], i32 [[MASK:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoaddr ptr addrspace(1) [[P]] to i32
+; CHECK-NEXT: [[ADDR:%.*]] = and i32 [[MASK]], [[TMP1]]
+; CHECK-NEXT: ret i32 [[ADDR]]
+;
+ %masked = call ptr addrspace(1) @llvm.ptrmask(ptr addrspace(1) %p, i32 %mask)
+ %addr = ptrtoaddr ptr addrspace(1) %masked to i32
+ ret i32 %addr
+}
+
+define i64 @ptrtoaddr_of_gep_of_inttoptr(i64 %int, i64 %offset) {
+; CHECK-LABEL: define i64 @ptrtoaddr_of_gep_of_inttoptr(
+; CHECK-SAME: i64 [[INT:%.*]], i64 [[OFFSET:%.*]]) {
+; CHECK-NEXT: [[ADDR:%.*]] = add i64 [[INT]], [[OFFSET]]
+; CHECK-NEXT: ret i64 [[ADDR]]
+;
+ %ptr = inttoptr i64 %int to ptr
+ %gep = getelementptr i8, ptr %ptr, i64 %offset
+ %addr = ptrtoaddr ptr %gep to i64
+ ret i64 %addr
+}
+
+; FIXME: This could be supported by truncating %int before performing the
+; arithmetic.
+define i32 @ptrtoaddr_of_gep_of_inttoptr_addrsize(i64 %int, i32 %offset) {
+; CHECK-LABEL: define i32 @ptrtoaddr_of_gep_of_inttoptr_addrsize(
+; CHECK-SAME: i64 [[INT:%.*]], i32 [[OFFSET:%.*]]) {
+; CHECK-NEXT: [[PTR:%.*]] = inttoptr i64 [[INT]] to ptr addrspace(1)
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr addrspace(1) [[PTR]], i32 [[OFFSET]]
+; CHECK-NEXT: [[ADDR:%.*]] = ptrtoaddr ptr addrspace(1) [[GEP]] to i32
+; CHECK-NEXT: ret i32 [[ADDR]]
+;
+ %ptr = inttoptr i64 %int to ptr addrspace(1)
+ %gep = getelementptr i8, ptr addrspace(1) %ptr, i32 %offset
+ %addr = ptrtoaddr ptr addrspace(1) %gep to i32
+ ret i32 %addr
+}
+
+define i64 @ptrtoaddr_of_gep_of_null(i64 %offset) {
+; CHECK-LABEL: define i64 @ptrtoaddr_of_gep_of_null(
+; CHECK-SAME: i64 [[OFFSET:%.*]]) {
+; CHECK-NEXT: ret i64 [[OFFSET]]
+;
+ %gep = getelementptr i8, ptr null, i64 %offset
+ %addr = ptrtoaddr ptr %gep to i64
+ ret i64 %addr
+}
+
+define i32 @ptrtoaddr_of_gep_of_null_addrsize(i32 %offset) {
+; CHECK-LABEL: define i32 @ptrtoaddr_of_gep_of_null_addrsize(
+; CHECK-SAME: i32 [[OFFSET:%.*]]) {
+; CHECK-NEXT: ret i32 [[OFFSET]]
+;
+ %gep = getelementptr i8, ptr addrspace(1) null, i32 %offset
+ %addr = ptrtoaddr ptr addrspace(1) %gep to i32
+ ret i32 %addr
+}
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/vecreduce.ll b/llvm/test/Transforms/InstSimplify/ConstProp/vecreduce.ll
index 9f9e3f9..77a7f0d 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/vecreduce.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/vecreduce.ll
@@ -1,26 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
-; RUN: opt < %s -passes=instsimplify -use-constant-int-for-fixed-length-splat -S | FileCheck %s
-
-declare i32 @llvm.vector.reduce.add.v1i32(<1 x i32> %a)
-declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %a)
-declare i32 @llvm.vector.reduce.mul.v1i32(<1 x i32> %a)
-declare i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> %a)
-declare i32 @llvm.vector.reduce.and.v1i32(<1 x i32> %a)
-declare i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %a)
-declare i32 @llvm.vector.reduce.or.v1i32(<1 x i32> %a)
-declare i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %a)
-declare i32 @llvm.vector.reduce.xor.v1i32(<1 x i32> %a)
-declare i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %a)
-declare i32 @llvm.vector.reduce.smin.v1i32(<1 x i32> %a)
-declare i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %a)
-declare i32 @llvm.vector.reduce.smax.v1i32(<1 x i32> %a)
-declare i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> %a)
-declare i32 @llvm.vector.reduce.umin.v1i32(<1 x i32> %a)
-declare i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %a)
-declare i32 @llvm.vector.reduce.umax.v1i32(<1 x i32> %a)
-declare i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %a)
-
+; RUN: opt < %s -passes=instsimplify -use-constant-int-for-fixed-length-splat -use-constant-int-for-scalable-splat -S | FileCheck %s
define i32 @add_0() {
; CHECK-LABEL: @add_0(
@@ -30,6 +10,15 @@ define i32 @add_0() {
ret i32 %x
}
+define i32 @add_0_scalable_vector() {
+; CHECK-LABEL: @add_0_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> zeroinitializer)
+ ret i32 %x
+}
+
define i32 @add_1() {
; CHECK-LABEL: @add_1(
; CHECK-NEXT: ret i32 8
@@ -38,6 +27,15 @@ define i32 @add_1() {
ret i32 %x
}
+define i32 @add_1_scalable_vector() {
+; CHECK-LABEL: @add_1_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> splat (i32 1))
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> splat (i32 1))
+ ret i32 %x
+}
+
define i32 @add_inc() {
; CHECK-LABEL: @add_inc(
; CHECK-NEXT: ret i32 18
@@ -63,8 +61,17 @@ define i32 @add_undef() {
ret i32 %x
}
-define i32 @add_undef1() {
-; CHECK-LABEL: @add_undef1(
+define i32 @add_undef_scalable_vector() {
+; CHECK-LABEL: @add_undef_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> undef)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> undef)
+ ret i32 %x
+}
+
+define i32 @add_undef_elt() {
+; CHECK-LABEL: @add_undef_elt(
; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>)
; CHECK-NEXT: ret i32 [[X]]
;
@@ -80,8 +87,17 @@ define i32 @add_poison() {
ret i32 %x
}
-define i32 @add_poison1() {
-; CHECK-LABEL: @add_poison1(
+define i32 @add_poison_scalable_vector() {
+; CHECK-LABEL: @add_poison_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> poison)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> poison)
+ ret i32 %x
+}
+
+define i32 @add_poison_elt() {
+; CHECK-LABEL: @add_poison_elt(
; CHECK-NEXT: ret i32 poison
;
%x = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> <i32 1, i32 1, i32 poison, i32 1, i32 1, i32 42, i32 1, i32 1>)
@@ -105,6 +121,15 @@ define i32 @mul_0() {
ret i32 %x
}
+define i32 @mul_0_scalable_vector() {
+; CHECK-LABEL: @mul_0_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.mul.nxv8i32(<vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.mul.nxv8i32(<vscale x 8 x i32> zeroinitializer)
+ ret i32 %x
+}
+
define i32 @mul_1() {
; CHECK-LABEL: @mul_1(
; CHECK-NEXT: ret i32 1
@@ -113,6 +138,15 @@ define i32 @mul_1() {
ret i32 %x
}
+define i32 @mul_1_scalable_vector() {
+; CHECK-LABEL: @mul_1_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.mul.nxv8i32(<vscale x 8 x i32> splat (i32 1))
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.mul.nxv8i32(<vscale x 8 x i32> splat (i32 1))
+ ret i32 %x
+}
+
define i32 @mul_inc() {
; CHECK-LABEL: @mul_inc(
; CHECK-NEXT: ret i32 40320
@@ -138,8 +172,17 @@ define i32 @mul_undef() {
ret i32 %x
}
-define i32 @mul_undef1() {
-; CHECK-LABEL: @mul_undef1(
+define i32 @mul_undef_scalable_vector() {
+; CHECK-LABEL: @mul_undef_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.mul.nxv8i32(<vscale x 8 x i32> undef)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.mul.nxv8i32(<vscale x 8 x i32> undef)
+ ret i32 %x
+}
+
+define i32 @mul_undef_elt() {
+; CHECK-LABEL: @mul_undef_elt(
; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>)
; CHECK-NEXT: ret i32 [[X]]
;
@@ -155,8 +198,17 @@ define i32 @mul_poison() {
ret i32 %x
}
-define i32 @mul_poison1() {
-; CHECK-LABEL: @mul_poison1(
+define i32 @mul_poison_scalable_vector() {
+; CHECK-LABEL: @mul_poison_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.mul.nxv8i32(<vscale x 8 x i32> poison)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.mul.nxv8i32(<vscale x 8 x i32> poison)
+ ret i32 %x
+}
+
+define i32 @mul_poison_elt() {
+; CHECK-LABEL: @mul_poison_elt(
; CHECK-NEXT: ret i32 poison
;
%x = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> <i32 0, i32 1, i32 poison, i32 1, i32 1, i32 1, i32 1, i32 1>)
@@ -171,6 +223,15 @@ define i32 @and_0() {
ret i32 %x
}
+define i32 @and_0_scalable_vector() {
+; CHECK-LABEL: @and_0_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.and.nxv8i32(<vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.and.nxv8i32(<vscale x 8 x i32> zeroinitializer)
+ ret i32 %x
+}
+
define i32 @and_1() {
; CHECK-LABEL: @and_1(
; CHECK-NEXT: ret i32 1
@@ -179,6 +240,15 @@ define i32 @and_1() {
ret i32 %x
}
+define i32 @and_1_scalable_vector() {
+; CHECK-LABEL: @and_1_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.and.nxv8i32(<vscale x 8 x i32> splat (i32 1))
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.and.nxv8i32(<vscale x 8 x i32> splat (i32 1))
+ ret i32 %x
+}
+
define i32 @and_inc() {
; CHECK-LABEL: @and_inc(
; CHECK-NEXT: ret i32 0
@@ -204,8 +274,17 @@ define i32 @and_undef() {
ret i32 %x
}
-define i32 @and_undef1() {
-; CHECK-LABEL: @and_undef1(
+define i32 @and_undef_scalable_vector() {
+; CHECK-LABEL: @and_undef_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.and.nxv8i32(<vscale x 8 x i32> undef)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.and.nxv8i32(<vscale x 8 x i32> undef)
+ ret i32 %x
+}
+
+define i32 @and_undef_elt() {
+; CHECK-LABEL: @and_undef_elt(
; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>)
; CHECK-NEXT: ret i32 [[X]]
;
@@ -221,8 +300,17 @@ define i32 @and_poison() {
ret i32 %x
}
-define i32 @and_poison1() {
-; CHECK-LABEL: @and_poison1(
+define i32 @and_poison_scalable_vector() {
+; CHECK-LABEL: @and_poison_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.and.nxv8i32(<vscale x 8 x i32> poison)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.and.nxv8i32(<vscale x 8 x i32> poison)
+ ret i32 %x
+}
+
+define i32 @and_poison_elt() {
+; CHECK-LABEL: @and_poison_elt(
; CHECK-NEXT: ret i32 poison
;
%x = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> <i32 -1, i32 1, i32 poison, i32 1, i32 1, i32 1, i32 1, i32 1>)
@@ -237,6 +325,15 @@ define i32 @or_0() {
ret i32 %x
}
+define i32 @or_0_scalable_vector() {
+; CHECK-LABEL: @or_0_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.or.nxv8i32(<vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.or.nxv8i32(<vscale x 8 x i32> zeroinitializer)
+ ret i32 %x
+}
+
define i32 @or_1() {
; CHECK-LABEL: @or_1(
; CHECK-NEXT: ret i32 1
@@ -245,6 +342,15 @@ define i32 @or_1() {
ret i32 %x
}
+define i32 @or_1_scalable_vector() {
+; CHECK-LABEL: @or_1_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.or.nxv8i32(<vscale x 8 x i32> splat (i32 1))
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.or.nxv8i32(<vscale x 8 x i32> splat (i32 1))
+ ret i32 %x
+}
+
define i32 @or_inc() {
; CHECK-LABEL: @or_inc(
; CHECK-NEXT: ret i32 -1
@@ -270,8 +376,17 @@ define i32 @or_undef() {
ret i32 %x
}
-define i32 @or_undef1() {
-; CHECK-LABEL: @or_undef1(
+define i32 @or_undef_scalable_vector() {
+; CHECK-LABEL: @or_undef_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.or.nxv8i32(<vscale x 8 x i32> undef)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.or.v8i32(<vscale x 8 x i32> undef)
+ ret i32 %x
+}
+
+define i32 @or_undef_elt() {
+; CHECK-LABEL: @or_undef_elt(
; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>)
; CHECK-NEXT: ret i32 [[X]]
;
@@ -287,8 +402,17 @@ define i32 @or_poison() {
ret i32 %x
}
-define i32 @or_poison1() {
-; CHECK-LABEL: @or_poison1(
+define i32 @or_poison_scalable_vector() {
+; CHECK-LABEL: @or_poison_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.or.nxv8i32(<vscale x 8 x i32> poison)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.or.nxv8i32(<vscale x 8 x i32> poison)
+ ret i32 %x
+}
+
+define i32 @or_poison_elt() {
+; CHECK-LABEL: @or_poison_elt(
; CHECK-NEXT: ret i32 poison
;
%x = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> <i32 1, i32 0, i32 poison, i32 1, i32 1, i32 1, i32 1, i32 1>)
@@ -303,6 +427,15 @@ define i32 @xor_0() {
ret i32 %x
}
+define i32 @xor_0_scalable_vector() {
+; CHECK-LABEL: @xor_0_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.xor.nxv8i32(<vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.xor.nxv8i32(<vscale x 8 x i32> zeroinitializer)
+ ret i32 %x
+}
+
define i32 @xor_1() {
; CHECK-LABEL: @xor_1(
; CHECK-NEXT: ret i32 0
@@ -311,6 +444,15 @@ define i32 @xor_1() {
ret i32 %x
}
+define i32 @xor_1_scalable_vector() {
+; CHECK-LABEL: @xor_1_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.xor.nxv8i32(<vscale x 8 x i32> splat (i32 1))
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.xor.nxv8i32(<vscale x 8 x i32> splat(i32 1))
+ ret i32 %x
+}
+
define i32 @xor_inc() {
; CHECK-LABEL: @xor_inc(
; CHECK-NEXT: ret i32 10
@@ -336,8 +478,17 @@ define i32 @xor_undef() {
ret i32 %x
}
-define i32 @xor_undef1() {
-; CHECK-LABEL: @xor_undef1(
+define i32 @xor_undef_scalable_vector() {
+; CHECK-LABEL: @xor_undef_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.xor.nxv8i32(<vscale x 8 x i32> undef)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.xor.nxv8i32(<vscale x 8 x i32> undef)
+ ret i32 %x
+}
+
+define i32 @xor_undef_elt() {
+; CHECK-LABEL: @xor_undef_elt(
; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>)
; CHECK-NEXT: ret i32 [[X]]
;
@@ -353,8 +504,17 @@ define i32 @xor_poison() {
ret i32 %x
}
-define i32 @xor_poison1() {
-; CHECK-LABEL: @xor_poison1(
+define i32 @xor_poison_scalable_vector() {
+; CHECK-LABEL: @xor_poison_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.xor.nxv8i32(<vscale x 8 x i32> poison)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.xor.nxv8i32(<vscale x 8 x i32> poison)
+ ret i32 %x
+}
+
+define i32 @xor_poison_elt() {
+; CHECK-LABEL: @xor_poison_elt(
; CHECK-NEXT: ret i32 poison
;
%x = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> <i32 poison, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>)
@@ -369,6 +529,15 @@ define i32 @smin_0() {
ret i32 %x
}
+define i32 @smin_0_scalable_vector() {
+; CHECK-LABEL: @smin_0_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.smin.nxv8i32(<vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.smin.nxv8i32(<vscale x 8 x i32> zeroinitializer)
+ ret i32 %x
+}
+
define i32 @smin_1() {
; CHECK-LABEL: @smin_1(
; CHECK-NEXT: ret i32 1
@@ -377,6 +546,15 @@ define i32 @smin_1() {
ret i32 %x
}
+define i32 @smin_1_scalable_vector() {
+; CHECK-LABEL: @smin_1_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.smin.nxv8i32(<vscale x 8 x i32> splat (i32 1))
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.smin.nxv8i32(<vscale x 8 x i32> splat(i32 1))
+ ret i32 %x
+}
+
define i32 @smin_inc() {
; CHECK-LABEL: @smin_inc(
; CHECK-NEXT: ret i32 -6
@@ -402,8 +580,17 @@ define i32 @smin_undef() {
ret i32 %x
}
-define i32 @smin_undef1() {
-; CHECK-LABEL: @smin_undef1(
+define i32 @smin_undef_scalable_vector() {
+; CHECK-LABEL: @smin_undef_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.smin.nxv8i32(<vscale x 8 x i32> undef)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.smin.nxv8i32(<vscale x 8 x i32> undef)
+ ret i32 %x
+}
+
+define i32 @smin_undef_elt() {
+; CHECK-LABEL: @smin_undef_elt(
; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>)
; CHECK-NEXT: ret i32 [[X]]
;
@@ -419,8 +606,17 @@ define i32 @smin_poison() {
ret i32 %x
}
-define i32 @smin_poison1() {
-; CHECK-LABEL: @smin_poison1(
+define i32 @smin_poison_scalable_vector() {
+; CHECK-LABEL: @smin_poison_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.smin.nxv8i32(<vscale x 8 x i32> poison)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.smin.nxv8i32(<vscale x 8 x i32> poison)
+ ret i32 %x
+}
+
+define i32 @smin_poison_elt() {
+; CHECK-LABEL: @smin_poison_elt(
; CHECK-NEXT: ret i32 poison
;
%x = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 poison, i32 1, i32 1, i32 1>)
@@ -435,6 +631,15 @@ define i32 @smax_0() {
ret i32 %x
}
+define i32 @smax_0_scalable_vector() {
+; CHECK-LABEL: @smax_0_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.smax.nxv8i32(<vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.smax.nxv8i32(<vscale x 8 x i32> zeroinitializer)
+ ret i32 %x
+}
+
define i32 @smax_1() {
; CHECK-LABEL: @smax_1(
; CHECK-NEXT: ret i32 1
@@ -443,6 +648,15 @@ define i32 @smax_1() {
ret i32 %x
}
+define i32 @smax_1_scalable_vector() {
+; CHECK-LABEL: @smax_1_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.smax.nxv8i32(<vscale x 8 x i32> splat (i32 1))
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.smax.nxv8i32(<vscale x 8 x i32> splat(i32 1))
+ ret i32 %x
+}
+
define i32 @smax_inc() {
; CHECK-LABEL: @smax_inc(
; CHECK-NEXT: ret i32 8
@@ -468,8 +682,17 @@ define i32 @smax_undef() {
ret i32 %x
}
-define i32 @smax_undef1() {
-; CHECK-LABEL: @smax_undef1(
+define i32 @smax_undef_scalable_vector() {
+; CHECK-LABEL: @smax_undef_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.smax.nxv8i32(<vscale x 8 x i32> undef)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.smax.nxv8i32(<vscale x 8 x i32> undef)
+ ret i32 %x
+}
+
+define i32 @smax_undef_elt() {
+; CHECK-LABEL: @smax_undef_elt(
; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>)
; CHECK-NEXT: ret i32 [[X]]
;
@@ -485,8 +708,17 @@ define i32 @smax_poison() {
ret i32 %x
}
-define i32 @smax_poison1() {
-; CHECK-LABEL: @smax_poison1(
+define i32 @smax_poison_scalable_vector() {
+; CHECK-LABEL: @smax_poison_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.smax.nxv8i32(<vscale x 8 x i32> poison)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.smax.nxv8i32(<vscale x 8 x i32> poison)
+ ret i32 %x
+}
+
+define i32 @smax_poison_elt() {
+; CHECK-LABEL: @smax_poison_elt(
; CHECK-NEXT: ret i32 poison
;
%x = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> <i32 1, i32 1, i32 0, i32 1, i32 1, i32 1, i32 1, i32 poison>)
@@ -501,6 +733,15 @@ define i32 @umin_0() {
ret i32 %x
}
+define i32 @umin_0_scalable_vector() {
+; CHECK-LABEL: @umin_0_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.umin.nxv8i32(<vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.umin.nxv8i32(<vscale x 8 x i32> zeroinitializer)
+ ret i32 %x
+}
+
define i32 @umin_1() {
; CHECK-LABEL: @umin_1(
; CHECK-NEXT: ret i32 1
@@ -509,6 +750,15 @@ define i32 @umin_1() {
ret i32 %x
}
+define i32 @umin_1_scalable_vector() {
+; CHECK-LABEL: @umin_1_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.umin.nxv8i32(<vscale x 8 x i32> splat (i32 1))
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.umin.nxv8i32(<vscale x 8 x i32> splat (i32 1))
+ ret i32 %x
+}
+
define i32 @umin_inc() {
; CHECK-LABEL: @umin_inc(
; CHECK-NEXT: ret i32 1
@@ -534,8 +784,17 @@ define i32 @umin_undef() {
ret i32 %x
}
-define i32 @umin_undef1() {
-; CHECK-LABEL: @umin_undef1(
+define i32 @umin_undef_scalable_vector() {
+; CHECK-LABEL: @umin_undef_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.umin.nxv8i32(<vscale x 8 x i32> undef)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.umin.nxv8i32(<vscale x 8 x i32> undef)
+ ret i32 %x
+}
+
+define i32 @umin_undef_elt() {
+; CHECK-LABEL: @umin_undef_elt(
; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>)
; CHECK-NEXT: ret i32 [[X]]
;
@@ -551,8 +810,17 @@ define i32 @umin_poison() {
ret i32 %x
}
-define i32 @umin_poison1() {
-; CHECK-LABEL: @umin_poison1(
+define i32 @umin_poison_scalable_vector() {
+; CHECK-LABEL: @umin_poison_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.umin.nxv8i32(<vscale x 8 x i32> poison)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.umin.nxv8i32(<vscale x 8 x i32> poison)
+ ret i32 %x
+}
+
+define i32 @umin_poison_elt() {
+; CHECK-LABEL: @umin_poison_elt(
; CHECK-NEXT: ret i32 poison
;
%x = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> <i32 1, i32 1, i32 -1, i32 poison, i32 1, i32 1, i32 1, i32 1>)
@@ -567,6 +835,15 @@ define i32 @umax_0() {
ret i32 %x
}
+define i32 @umax_0_scalable_vector() {
+; CHECK-LABEL: @umax_0_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.umax.nxv8i32(<vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.umax.nxv8i32(<vscale x 8 x i32> zeroinitializer)
+ ret i32 %x
+}
+
define i32 @umax_1() {
; CHECK-LABEL: @umax_1(
; CHECK-NEXT: ret i32 1
@@ -575,6 +852,15 @@ define i32 @umax_1() {
ret i32 %x
}
+define i32 @umax_1_scalable_vector() {
+; CHECK-LABEL: @umax_1_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.umax.nxv8i32(<vscale x 8 x i32> splat (i32 1))
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.umax.nxv8i32(<vscale x 8 x i32> splat(i32 1))
+ ret i32 %x
+}
+
define i32 @umax_inc() {
; CHECK-LABEL: @umax_inc(
; CHECK-NEXT: ret i32 -3
@@ -600,8 +886,17 @@ define i32 @umax_undef() {
ret i32 %x
}
-define i32 @umax_undef1() {
-; CHECK-LABEL: @umax_undef1(
+define i32 @umax_undef_scalable_vector() {
+; CHECK-LABEL: @umax_undef_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.umax.nxv8i32(<vscale x 8 x i32> undef)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.umax.nxv8i32(<vscale x 8 x i32> undef)
+ ret i32 %x
+}
+
+define i32 @umax_undef_elt() {
+; CHECK-LABEL: @umax_undef_elt(
; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>)
; CHECK-NEXT: ret i32 [[X]]
;
@@ -617,8 +912,17 @@ define i32 @umax_poison() {
ret i32 %x
}
-define i32 @umax_poison1() {
-; CHECK-LABEL: @umax_poison1(
+define i32 @umax_poison_scalable_vector() {
+; CHECK-LABEL: @umax_poison_scalable_vector(
+; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.umax.nxv8i32(<vscale x 8 x i32> poison)
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %x = call i32 @llvm.vector.reduce.umax.nxv8i32(<vscale x 8 x i32> poison)
+ ret i32 %x
+}
+
+define i32 @umax_poison_elt() {
+; CHECK-LABEL: @umax_poison_elt(
; CHECK-NEXT: ret i32 poison
;
%x = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> <i32 1, i32 1, i32 poison, i32 1, i32 1, i32 poison, i32 1, i32 1>)
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
index 964a257..fafa82c 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
@@ -2800,6 +2800,88 @@ exit:
ret i64 %r.0.lcssa
}
+define i32 @reduction_expression_ext_mulacc_livein(ptr %a, i16 %c) {
+; CHECK-LABEL: define i32 @reduction_expression_ext_mulacc_livein(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[C:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i16> poison, i16 [[C]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i16> [[BROADCAST_SPLATINSERT]], <4 x i16> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP0]], align 1
+; CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i8> [[WIDE_LOAD]] to <4 x i16>
+; CHECK-NEXT: [[TMP2:%.*]] = mul <4 x i16> [[BROADCAST_SPLAT]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[TMP2]] to <4 x i32>
+; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP3]])
+; CHECK-NEXT: [[TMP5]] = add i32 [[VEC_PHI]], [[TMP4]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br label %[[FOR_EXIT:.*]]
+; CHECK: [[FOR_EXIT]]:
+; CHECK-NEXT: ret i32 [[TMP5]]
+;
+; CHECK-INTERLEAVED-LABEL: define i32 @reduction_expression_ext_mulacc_livein(
+; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], i16 [[C:%.*]]) {
+; CHECK-INTERLEAVED-NEXT: [[ENTRY:.*:]]
+; CHECK-INTERLEAVED-NEXT: br label %[[VECTOR_PH:.*]]
+; CHECK-INTERLEAVED: [[VECTOR_PH]]:
+; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i16> poison, i16 [[C]], i64 0
+; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i16> [[BROADCAST_SPLATINSERT]], <4 x i16> poison, <4 x i32> zeroinitializer
+; CHECK-INTERLEAVED-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK-INTERLEAVED: [[VECTOR_BODY]]:
+; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[TMP11:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
+; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP0]], i32 4
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP0]], align 1
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP1]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <4 x i8> [[WIDE_LOAD]] to <4 x i16>
+; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[WIDE_LOAD2]] to <4 x i16>
+; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul <4 x i16> [[BROADCAST_SPLAT]], [[TMP2]]
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul <4 x i16> [[BROADCAST_SPLAT]], [[TMP3]]
+; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <4 x i16> [[TMP4]] to <4 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP6]])
+; CHECK-INTERLEAVED-NEXT: [[TMP8]] = add i32 [[VEC_PHI]], [[TMP7]]
+; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = zext <4 x i16> [[TMP5]] to <4 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP9]])
+; CHECK-INTERLEAVED-NEXT: [[TMP11]] = add i32 [[VEC_PHI1]], [[TMP10]]
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
+; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
+; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP11]], [[TMP8]]
+; CHECK-INTERLEAVED-NEXT: br label %[[FOR_EXIT:.*]]
+; CHECK-INTERLEAVED: [[FOR_EXIT]]:
+; CHECK-INTERLEAVED-NEXT: ret i32 [[BIN_RDX]]
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %accum = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ %gep.a = getelementptr i8, ptr %a, i64 %iv
+ %load.a = load i8, ptr %gep.a, align 1
+ %ext.a = zext i8 %load.a to i16
+ %mul = mul i16 %c, %ext.a
+ %mul.ext = zext i16 %mul to i32
+ %add = add i32 %mul.ext, %accum
+ %iv.next = add i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, 1024
+ br i1 %exitcond.not, label %for.exit, label %for.body
+
+for.exit: ; preds = %for.body
+ ret i32 %add
+}
+
declare float @llvm.fmuladd.f32(float, float, float)
!6 = distinct !{!6, !7, !8}
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll
index 06b0448..291ada8 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll
@@ -800,3 +800,545 @@ exit:
%r.0.lcssa = phi i64 [ %rdx.next, %loop ]
ret i64 %r.0.lcssa
}
+
+define i32 @print_mulacc_extended_const(ptr %start, ptr %end) {
+; CHECK-LABEL: 'print_mulacc_extended_const'
+; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' {
+; CHECK-NEXT: Live-in vp<%0> = VF
+; CHECK-NEXT: Live-in vp<%1> = VF * UF
+; CHECK-NEXT: Live-in vp<%2> = vector-trip-count
+; CHECK-NEXT: vp<%3> = original trip-count
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<entry>:
+; CHECK-NEXT: EMIT vp<%3> = EXPAND SCEV (1 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64))
+; CHECK-NEXT: Successor(s): scalar.ph, vector.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: vp<%4> = DERIVED-IV ir<%start> + vp<%2> * ir<1>
+; CHECK-NEXT: EMIT vp<%5> = reduction-start-vector ir<0>, ir<0>, ir<1>
+; CHECK-NEXT: Successor(s): vector loop
+; CHECK-EMPTY:
+; CHECK-NEXT: <x1> vector loop: {
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT vp<%6> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
+; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi vp<%5>, vp<%9>
+; CHECK-NEXT: vp<%7> = SCALAR-STEPS vp<%6>, ir<1>, vp<%0>
+; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<%7>
+; CHECK-NEXT: vp<%8> = vector-pointer vp<%next.gep>
+; CHECK-NEXT: WIDEN ir<%l> = load vp<%8>
+; CHECK-NEXT: EXPRESSION vp<%9> = ir<%red> + reduce.add (mul (ir<%l> zext to i32), (ir<63> zext to i32))
+; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%6>, vp<%1>
+; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%2>
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK-NEXT: Successor(s): middle.block
+; CHECK-EMPTY:
+; CHECK-NEXT: middle.block:
+; CHECK-NEXT: EMIT vp<%11> = compute-reduction-result ir<%red>, vp<%9>
+; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<%3>, vp<%2>
+; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n>
+; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<exit>:
+; CHECK-NEXT: IR %red.next.lcssa = phi i32 [ %red.next, %loop ] (extra operand: vp<%11> from middle.block)
+; CHECK-NEXT: No successors
+; CHECK-EMPTY:
+; CHECK-NEXT: scalar.ph:
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%4>, middle.block ], [ ir<%start>, ir-bb<entry> ]
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<%11>, middle.block ], [ ir<0>, ir-bb<entry> ]
+; CHECK-NEXT: Successor(s): ir-bb<loop>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<loop>:
+; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph)
+; CHECK-NEXT: IR %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph)
+; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1
+; CHECK-NEXT: IR %l.ext = zext i8 %l to i32
+; CHECK-NEXT: IR %mul = mul i32 %l.ext, 63
+; CHECK-NEXT: IR %red.next = add i32 %red, %mul
+; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1
+; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK: VPlan 'Final VPlan for VF={4},UF={1}' {
+; CHECK-NEXT: Live-in ir<%1> = original trip-count
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<entry>:
+; CHECK-NEXT: IR %start2 = ptrtoint ptr %start to i64
+; CHECK-NEXT: IR %end1 = ptrtoint ptr %end to i64
+; CHECK-NEXT: IR %0 = add i64 %end1, 1
+; CHECK-NEXT: IR %1 = sub i64 %0, %start2
+; CHECK-NEXT: EMIT vp<%min.iters.check> = icmp ult ir<%1>, ir<4>
+; CHECK-NEXT: EMIT branch-on-cond vp<%min.iters.check>
+; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, vector.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: EMIT vp<%n.mod.vf> = urem ir<%1>, ir<4>
+; CHECK-NEXT: EMIT vp<%n.vec> = sub ir<%1>, vp<%n.mod.vf>
+; CHECK-NEXT: vp<%3> = DERIVED-IV ir<%start> + vp<%n.vec> * ir<1>
+; CHECK-NEXT: Successor(s): vector.body
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT-SCALAR vp<%index> = phi [ ir<0>, vector.ph ], [ vp<%index.next>, vector.body ]
+; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi ir<0>, ir<%red.next>
+; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<%index>
+; CHECK-NEXT: WIDEN ir<%l> = load vp<%next.gep>
+; CHECK-NEXT: WIDEN-CAST ir<%l.ext> = zext ir<%l> to i32
+; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%l.ext>, ir<63>
+; CHECK-NEXT: REDUCE ir<%red.next> = ir<%red> + reduce.add (ir<%mul>)
+; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%index>, ir<4>
+; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%n.vec>
+; CHECK-NEXT: Successor(s): middle.block, vector.body
+; CHECK-EMPTY:
+; CHECK-NEXT: middle.block:
+; CHECK-NEXT: EMIT vp<%5> = compute-reduction-result ir<%red>, ir<%red.next>
+; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%1>, vp<%n.vec>
+; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n>
+; CHECK-NEXT: Successor(s): ir-bb<exit>, ir-bb<scalar.ph>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<exit>:
+; CHECK-NEXT: IR %red.next.lcssa = phi i32 [ %red.next, %loop ] (extra operand: vp<%5> from middle.block)
+; CHECK-NEXT: No successors
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<scalar.ph>:
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%3>, middle.block ], [ ir<%start>, ir-bb<entry> ]
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<%5>, middle.block ], [ ir<0>, ir-bb<entry> ]
+; CHECK-NEXT: Successor(s): ir-bb<loop>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<loop>:
+; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %scalar.ph ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from ir-bb<scalar.ph>)
+; CHECK-NEXT: IR %red = phi i32 [ 0, %scalar.ph ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from ir-bb<scalar.ph>)
+; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1
+; CHECK-NEXT: IR %l.ext = zext i8 %l to i32
+; CHECK-NEXT: IR %mul = mul i32 %l.ext, 63
+; CHECK-NEXT: IR %red.next = add i32 %red, %mul
+; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1
+; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+entry:
+ br label %loop
+
+loop:
+ %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ]
+ %red = phi i32 [ 0, %entry ], [ %red.next, %loop ]
+ %l = load i8, ptr %ptr.iv, align 1
+ %l.ext = zext i8 %l to i32
+ %mul = mul i32 %l.ext, 63
+ %red.next = add i32 %red, %mul
+ %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1
+ %ec = icmp eq ptr %ptr.iv, %end
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret i32 %red.next
+}
+
+; Constants >= 128 cannot be treated as sign-extended, so the expression shouldn't extend 128
+define i32 @print_mulacc_not_extended_const(ptr %start, ptr %end) {
+; CHECK-LABEL: 'print_mulacc_not_extended_const'
+; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' {
+; CHECK-NEXT: Live-in vp<%0> = VF
+; CHECK-NEXT: Live-in vp<%1> = VF * UF
+; CHECK-NEXT: Live-in vp<%2> = vector-trip-count
+; CHECK-NEXT: vp<%3> = original trip-count
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<entry>:
+; CHECK-NEXT: EMIT vp<%3> = EXPAND SCEV (1 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64))
+; CHECK-NEXT: Successor(s): scalar.ph, vector.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: vp<%4> = DERIVED-IV ir<%start> + vp<%2> * ir<1>
+; CHECK-NEXT: EMIT vp<%5> = reduction-start-vector ir<0>, ir<0>, ir<1>
+; CHECK-NEXT: Successor(s): vector loop
+; CHECK-EMPTY:
+; CHECK-NEXT: <x1> vector loop: {
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT vp<%6> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
+; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi vp<%5>, vp<%9>
+; CHECK-NEXT: vp<%7> = SCALAR-STEPS vp<%6>, ir<1>, vp<%0>
+; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<%7>
+; CHECK-NEXT: vp<%8> = vector-pointer vp<%next.gep>
+; CHECK-NEXT: WIDEN ir<%l> = load vp<%8>
+; CHECK-NEXT: WIDEN-CAST ir<%l.ext> = sext ir<%l> to i32
+; CHECK-NEXT: EXPRESSION vp<%9> = ir<%red> + reduce.add (mul ir<%l.ext>, ir<128>)
+; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%6>, vp<%1>
+; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%2>
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK-NEXT: Successor(s): middle.block
+; CHECK-EMPTY:
+; CHECK-NEXT: middle.block:
+; CHECK-NEXT: EMIT vp<%11> = compute-reduction-result ir<%red>, vp<%9>
+; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<%3>, vp<%2>
+; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n>
+; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<exit>:
+; CHECK-NEXT: IR %red.next.lcssa = phi i32 [ %red.next, %loop ] (extra operand: vp<%11> from middle.block)
+; CHECK-NEXT: No successors
+; CHECK-EMPTY:
+; CHECK-NEXT: scalar.ph:
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%4>, middle.block ], [ ir<%start>, ir-bb<entry> ]
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<%11>, middle.block ], [ ir<0>, ir-bb<entry> ]
+; CHECK-NEXT: Successor(s): ir-bb<loop>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<loop>:
+; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph)
+; CHECK-NEXT: IR %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph)
+; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1
+; CHECK-NEXT: IR %l.ext = sext i8 %l to i32
+; CHECK-NEXT: IR %mul = mul i32 %l.ext, 128
+; CHECK-NEXT: IR %red.next = add i32 %red, %mul
+; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1
+; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK: VPlan 'Final VPlan for VF={4},UF={1}' {
+; CHECK-NEXT: Live-in ir<%1> = original trip-count
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<entry>:
+; CHECK-NEXT: IR %start2 = ptrtoint ptr %start to i64
+; CHECK-NEXT: IR %end1 = ptrtoint ptr %end to i64
+; CHECK-NEXT: IR %0 = add i64 %end1, 1
+; CHECK-NEXT: IR %1 = sub i64 %0, %start2
+; CHECK-NEXT: EMIT vp<%min.iters.check> = icmp ult ir<%1>, ir<4>
+; CHECK-NEXT: EMIT branch-on-cond vp<%min.iters.check>
+; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, vector.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: EMIT vp<%n.mod.vf> = urem ir<%1>, ir<4>
+; CHECK-NEXT: EMIT vp<%n.vec> = sub ir<%1>, vp<%n.mod.vf>
+; CHECK-NEXT: vp<%3> = DERIVED-IV ir<%start> + vp<%n.vec> * ir<1>
+; CHECK-NEXT: Successor(s): vector.body
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT-SCALAR vp<%index> = phi [ ir<0>, vector.ph ], [ vp<%index.next>, vector.body ]
+; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi ir<0>, ir<%red.next>
+; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<%index>
+; CHECK-NEXT: WIDEN ir<%l> = load vp<%next.gep>
+; CHECK-NEXT: WIDEN-CAST ir<%l.ext> = sext ir<%l> to i32
+; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%l.ext>, ir<128>
+; CHECK-NEXT: REDUCE ir<%red.next> = ir<%red> + reduce.add (ir<%mul>)
+; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%index>, ir<4>
+; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%n.vec>
+; CHECK-NEXT: Successor(s): middle.block, vector.body
+; CHECK-EMPTY:
+; CHECK-NEXT: middle.block:
+; CHECK-NEXT: EMIT vp<%5> = compute-reduction-result ir<%red>, ir<%red.next>
+; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%1>, vp<%n.vec>
+; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n>
+; CHECK-NEXT: Successor(s): ir-bb<exit>, ir-bb<scalar.ph>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<exit>:
+; CHECK-NEXT: IR %red.next.lcssa = phi i32 [ %red.next, %loop ] (extra operand: vp<%5> from middle.block)
+; CHECK-NEXT: No successors
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<scalar.ph>:
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%3>, middle.block ], [ ir<%start>, ir-bb<entry> ]
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<%5>, middle.block ], [ ir<0>, ir-bb<entry> ]
+; CHECK-NEXT: Successor(s): ir-bb<loop>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<loop>:
+; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %scalar.ph ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from ir-bb<scalar.ph>)
+; CHECK-NEXT: IR %red = phi i32 [ 0, %scalar.ph ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from ir-bb<scalar.ph>)
+; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1
+; CHECK-NEXT: IR %l.ext = sext i8 %l to i32
+; CHECK-NEXT: IR %mul = mul i32 %l.ext, 128
+; CHECK-NEXT: IR %red.next = add i32 %red, %mul
+; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1
+; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+entry:
+ br label %loop
+
+loop:
+ %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ]
+ %red = phi i32 [ 0, %entry ], [ %red.next, %loop ]
+ %l = load i8, ptr %ptr.iv, align 1
+ %l.ext = sext i8 %l to i32
+ %mul = mul i32 %l.ext, 128
+ %red.next = add i32 %red, %mul
+ %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1
+ %ec = icmp eq ptr %ptr.iv, %end
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ %red.next.lcssa = phi i32 [ %red.next, %loop ]
+ ret i32 %red.next.lcssa
+}
+
+define i64 @print_ext_mulacc_extended_const(ptr %start, ptr %end) {
+; CHECK-LABEL: 'print_ext_mulacc_extended_const'
+; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' {
+; CHECK-NEXT: Live-in vp<%0> = VF
+; CHECK-NEXT: Live-in vp<%1> = VF * UF
+; CHECK-NEXT: Live-in vp<%2> = vector-trip-count
+; CHECK-NEXT: vp<%3> = original trip-count
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<entry>:
+; CHECK-NEXT: EMIT vp<%3> = EXPAND SCEV (1 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64))
+; CHECK-NEXT: Successor(s): scalar.ph, vector.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: vp<%4> = DERIVED-IV ir<%start> + vp<%2> * ir<1>
+; CHECK-NEXT: EMIT vp<%5> = reduction-start-vector ir<0>, ir<0>, ir<1>
+; CHECK-NEXT: Successor(s): vector loop
+; CHECK-EMPTY:
+; CHECK-NEXT: <x1> vector loop: {
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT vp<%6> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
+; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi vp<%5>, vp<%9>
+; CHECK-NEXT: vp<%7> = SCALAR-STEPS vp<%6>, ir<1>, vp<%0>
+; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<%7>
+; CHECK-NEXT: vp<%8> = vector-pointer vp<%next.gep>
+; CHECK-NEXT: WIDEN ir<%l> = load vp<%8>
+; CHECK-NEXT: EXPRESSION vp<%9> = ir<%red> + reduce.add (mul (ir<%l> zext to i64), (ir<63> zext to i64))
+; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%6>, vp<%1>
+; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%2>
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK-NEXT: Successor(s): middle.block
+; CHECK-EMPTY:
+; CHECK-NEXT: middle.block:
+; CHECK-NEXT: EMIT vp<%11> = compute-reduction-result ir<%red>, vp<%9>
+; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<%3>, vp<%2>
+; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n>
+; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<exit>:
+; CHECK-NEXT: IR %red.next.lcssa = phi i64 [ %red.next, %loop ] (extra operand: vp<%11> from middle.block)
+; CHECK-NEXT: No successors
+; CHECK-EMPTY:
+; CHECK-NEXT: scalar.ph:
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%4>, middle.block ], [ ir<%start>, ir-bb<entry> ]
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<%11>, middle.block ], [ ir<0>, ir-bb<entry> ]
+; CHECK-NEXT: Successor(s): ir-bb<loop>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<loop>:
+; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph)
+; CHECK-NEXT: IR %red = phi i64 [ 0, %entry ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph)
+; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1
+; CHECK-NEXT: IR %l.ext = zext i8 %l to i32
+; CHECK-NEXT: IR %mul = mul i32 %l.ext, 63
+; CHECK-NEXT: IR %mul.ext = zext i32 %mul to i64
+; CHECK-NEXT: IR %red.next = add i64 %red, %mul.ext
+; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1
+; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK: VPlan 'Final VPlan for VF={4},UF={1}' {
+; CHECK-NEXT: Live-in ir<%1> = original trip-count
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<entry>:
+; CHECK-NEXT: IR %start2 = ptrtoint ptr %start to i64
+; CHECK-NEXT: IR %end1 = ptrtoint ptr %end to i64
+; CHECK-NEXT: IR %0 = add i64 %end1, 1
+; CHECK-NEXT: IR %1 = sub i64 %0, %start2
+; CHECK-NEXT: EMIT vp<%min.iters.check> = icmp ult ir<%1>, ir<4>
+; CHECK-NEXT: EMIT branch-on-cond vp<%min.iters.check>
+; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, vector.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: EMIT vp<%n.mod.vf> = urem ir<%1>, ir<4>
+; CHECK-NEXT: EMIT vp<%n.vec> = sub ir<%1>, vp<%n.mod.vf>
+; CHECK-NEXT: vp<%3> = DERIVED-IV ir<%start> + vp<%n.vec> * ir<1>
+; CHECK-NEXT: Successor(s): vector.body
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT-SCALAR vp<%index> = phi [ ir<0>, vector.ph ], [ vp<%index.next>, vector.body ]
+; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi ir<0>, ir<%red.next>
+; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<%index>
+; CHECK-NEXT: WIDEN ir<%l> = load vp<%next.gep>
+; CHECK-NEXT: WIDEN-CAST vp<%4> = zext ir<%l> to i64
+; CHECK-NEXT: WIDEN ir<%mul> = mul vp<%4>, ir<63>
+; CHECK-NEXT: REDUCE ir<%red.next> = ir<%red> + reduce.add (ir<%mul>)
+; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%index>, ir<4>
+; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%n.vec>
+; CHECK-NEXT: Successor(s): middle.block, vector.body
+; CHECK-EMPTY:
+; CHECK-NEXT: middle.block:
+; CHECK-NEXT: EMIT vp<%6> = compute-reduction-result ir<%red>, ir<%red.next>
+; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%1>, vp<%n.vec>
+; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n>
+; CHECK-NEXT: Successor(s): ir-bb<exit>, ir-bb<scalar.ph>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<exit>:
+; CHECK-NEXT: IR %red.next.lcssa = phi i64 [ %red.next, %loop ] (extra operand: vp<%6> from middle.block)
+; CHECK-NEXT: No successors
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<scalar.ph>:
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%3>, middle.block ], [ ir<%start>, ir-bb<entry> ]
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<%6>, middle.block ], [ ir<0>, ir-bb<entry> ]
+; CHECK-NEXT: Successor(s): ir-bb<loop>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<loop>:
+; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %scalar.ph ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from ir-bb<scalar.ph>)
+; CHECK-NEXT: IR %red = phi i64 [ 0, %scalar.ph ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from ir-bb<scalar.ph>)
+; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1
+; CHECK-NEXT: IR %l.ext = zext i8 %l to i32
+; CHECK-NEXT: IR %mul = mul i32 %l.ext, 63
+; CHECK-NEXT: IR %mul.ext = zext i32 %mul to i64
+; CHECK-NEXT: IR %red.next = add i64 %red, %mul.ext
+; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1
+; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+entry:
+ br label %loop
+
+loop:
+ %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ]
+ %red = phi i64 [ 0, %entry ], [ %red.next, %loop ]
+ %l = load i8, ptr %ptr.iv, align 1
+ %l.ext = zext i8 %l to i32
+ %mul = mul i32 %l.ext, 63
+ %mul.ext = zext i32 %mul to i64
+ %red.next = add i64 %red, %mul.ext
+ %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1
+ %ec = icmp eq ptr %ptr.iv, %end
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret i64 %red.next
+}
+
+; Constants >= 128 cannot be treated as sign-extended, so the expression shouldn't extend 128
+define i64 @print_ext_mulacc_not_extended_const(ptr %start, ptr %end) {
+; CHECK-LABEL: 'print_ext_mulacc_not_extended_const'
+; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' {
+; CHECK-NEXT: Live-in vp<%0> = VF
+; CHECK-NEXT: Live-in vp<%1> = VF * UF
+; CHECK-NEXT: Live-in vp<%2> = vector-trip-count
+; CHECK-NEXT: vp<%3> = original trip-count
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<entry>:
+; CHECK-NEXT: EMIT vp<%3> = EXPAND SCEV (1 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64))
+; CHECK-NEXT: Successor(s): scalar.ph, vector.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: vp<%4> = DERIVED-IV ir<%start> + vp<%2> * ir<1>
+; CHECK-NEXT: EMIT vp<%5> = reduction-start-vector ir<0>, ir<0>, ir<1>
+; CHECK-NEXT: Successor(s): vector loop
+; CHECK-EMPTY:
+; CHECK-NEXT: <x1> vector loop: {
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT vp<%6> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
+; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi vp<%5>, vp<%9>
+; CHECK-NEXT: vp<%7> = SCALAR-STEPS vp<%6>, ir<1>, vp<%0>
+; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<%7>
+; CHECK-NEXT: vp<%8> = vector-pointer vp<%next.gep>
+; CHECK-NEXT: WIDEN ir<%l> = load vp<%8>
+; CHECK-NEXT: WIDEN-CAST ir<%l.ext> = sext ir<%l> to i32
+; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%l.ext>, ir<128>
+; CHECK-NEXT: EXPRESSION vp<%9> = ir<%red> + reduce.add (ir<%mul> sext to i64)
+; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%6>, vp<%1>
+; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%2>
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK-NEXT: Successor(s): middle.block
+; CHECK-EMPTY:
+; CHECK-NEXT: middle.block:
+; CHECK-NEXT: EMIT vp<%11> = compute-reduction-result ir<%red>, vp<%9>
+; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<%3>, vp<%2>
+; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n>
+; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<exit>:
+; CHECK-NEXT: IR %red.next.lcssa = phi i64 [ %red.next, %loop ] (extra operand: vp<%11> from middle.block)
+; CHECK-NEXT: No successors
+; CHECK-EMPTY:
+; CHECK-NEXT: scalar.ph:
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%4>, middle.block ], [ ir<%start>, ir-bb<entry> ]
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<%11>, middle.block ], [ ir<0>, ir-bb<entry> ]
+; CHECK-NEXT: Successor(s): ir-bb<loop>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<loop>:
+; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph)
+; CHECK-NEXT: IR %red = phi i64 [ 0, %entry ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph)
+; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1
+; CHECK-NEXT: IR %l.ext = sext i8 %l to i32
+; CHECK-NEXT: IR %mul = mul i32 %l.ext, 128
+; CHECK-NEXT: IR %mul.ext = sext i32 %mul to i64
+; CHECK-NEXT: IR %red.next = add i64 %red, %mul.ext
+; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1
+; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK: VPlan 'Final VPlan for VF={4},UF={1}' {
+; CHECK-NEXT: Live-in ir<%1> = original trip-count
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<entry>:
+; CHECK-NEXT: IR %start2 = ptrtoint ptr %start to i64
+; CHECK-NEXT: IR %end1 = ptrtoint ptr %end to i64
+; CHECK-NEXT: IR %0 = add i64 %end1, 1
+; CHECK-NEXT: IR %1 = sub i64 %0, %start2
+; CHECK-NEXT: EMIT vp<%min.iters.check> = icmp ult ir<%1>, ir<4>
+; CHECK-NEXT: EMIT branch-on-cond vp<%min.iters.check>
+; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, vector.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: EMIT vp<%n.mod.vf> = urem ir<%1>, ir<4>
+; CHECK-NEXT: EMIT vp<%n.vec> = sub ir<%1>, vp<%n.mod.vf>
+; CHECK-NEXT: vp<%3> = DERIVED-IV ir<%start> + vp<%n.vec> * ir<1>
+; CHECK-NEXT: Successor(s): vector.body
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT-SCALAR vp<%index> = phi [ ir<0>, vector.ph ], [ vp<%index.next>, vector.body ]
+; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi ir<0>, ir<%red.next>
+; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<%index>
+; CHECK-NEXT: WIDEN ir<%l> = load vp<%next.gep>
+; CHECK-NEXT: WIDEN-CAST ir<%l.ext> = sext ir<%l> to i32
+; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%l.ext>, ir<128>
+; CHECK-NEXT: WIDEN-CAST ir<%mul.ext> = sext ir<%mul> to i64
+; CHECK-NEXT: REDUCE ir<%red.next> = ir<%red> + reduce.add (ir<%mul.ext>)
+; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%index>, ir<4>
+; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%n.vec>
+; CHECK-NEXT: Successor(s): middle.block, vector.body
+; CHECK-EMPTY:
+; CHECK-NEXT: middle.block:
+; CHECK-NEXT: EMIT vp<%5> = compute-reduction-result ir<%red>, ir<%red.next>
+; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%1>, vp<%n.vec>
+; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n>
+; CHECK-NEXT: Successor(s): ir-bb<exit>, ir-bb<scalar.ph>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<exit>:
+; CHECK-NEXT: IR %red.next.lcssa = phi i64 [ %red.next, %loop ] (extra operand: vp<%5> from middle.block)
+; CHECK-NEXT: No successors
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<scalar.ph>:
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%3>, middle.block ], [ ir<%start>, ir-bb<entry> ]
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<%5>, middle.block ], [ ir<0>, ir-bb<entry> ]
+; CHECK-NEXT: Successor(s): ir-bb<loop>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<loop>:
+; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %scalar.ph ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from ir-bb<scalar.ph>)
+; CHECK-NEXT: IR %red = phi i64 [ 0, %scalar.ph ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from ir-bb<scalar.ph>)
+; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1
+; CHECK-NEXT: IR %l.ext = sext i8 %l to i32
+; CHECK-NEXT: IR %mul = mul i32 %l.ext, 128
+; CHECK-NEXT: IR %mul.ext = sext i32 %mul to i64
+; CHECK-NEXT: IR %red.next = add i64 %red, %mul.ext
+; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1
+; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+entry:
+ br label %loop
+
+loop:
+ %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ]
+ %red = phi i64 [ 0, %entry ], [ %red.next, %loop ]
+ %l = load i8, ptr %ptr.iv, align 1
+ %l.ext = sext i8 %l to i32
+ %mul = mul i32 %l.ext, 128
+ %mul.ext = sext i32 %mul to i64
+ %red.next = add i64 %red, %mul.ext
+ %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1
+ %ec = icmp eq ptr %ptr.iv, %end
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ %red.next.lcssa = phi i64 [ %red.next, %loop ]
+ ret i64 %red.next.lcssa
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/parent-node-schedulable-with-multi-copyables.ll b/llvm/test/Transforms/SLPVectorizer/X86/parent-node-schedulable-with-multi-copyables.ll
new file mode 100644
index 0000000..9e96e93
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/parent-node-schedulable-with-multi-copyables.ll
@@ -0,0 +1,170 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -passes=slp-vectorizer -S -slp-threshold=-100 -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+
+define i64 @test(ptr %arg1, i64 %alloca.promoted344, i8 %load.311.i, i1 %load1.i) {
+; CHECK-LABEL: define i64 @test(
+; CHECK-SAME: ptr [[ARG1:%.*]], i64 [[ALLOCA_PROMOTED344:%.*]], i8 [[LOAD_311_I:%.*]], i1 [[LOAD1_I:%.*]]) {
+; CHECK-NEXT: [[BB:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i8> <i8 0, i8 0, i8 0, i8 poison>, i8 [[LOAD_311_I]], i32 3
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i8> <i8 poison, i8 poison, i8 0, i8 0>, i8 [[LOAD_311_I]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i64> poison, i64 [[ALLOCA_PROMOTED344]], i32 0
+; CHECK-NEXT: br label %[[BB2:.*]]
+; CHECK: [[BB2]]:
+; CHECK-NEXT: [[TMP3:%.*]] = phi <2 x i64> [ zeroinitializer, %[[BB]] ], [ [[TMP28:%.*]], %[[BB12_8_I:.*]] ]
+; CHECK-NEXT: [[TMP4:%.*]] = phi <8 x i8> [ zeroinitializer, %[[BB]] ], [ [[TMP29:%.*]], %[[BB12_8_I]] ]
+; CHECK-NEXT: br i1 [[LOAD1_I]], label %[[SPAM_EXIT:.*]], label %[[BB4_LR_PH_I:.*]]
+; CHECK: [[BB4_LR_PH_I]]:
+; CHECK-NEXT: br i1 true, label %[[BB3_I_I_PEEL:.*]], label %[[EGGS_EXIT_I_PEEL:.*]]
+; CHECK: [[BB3_I_I_PEEL]]:
+; CHECK-NEXT: [[TMP5:%.*]] = and <2 x i64> [[TMP3]], splat (i64 1)
+; CHECK-NEXT: [[LOAD4_I_I_PEEL:%.*]] = load i64, ptr [[ARG1]], align 8
+; CHECK-NEXT: [[SHL_I_I_PEEL:%.*]] = shl i64 [[LOAD4_I_I_PEEL]], 1
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> poison, <2 x i32> <i32 poison, i32 0>
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x i64> [[TMP6]], i64 [[SHL_I_I_PEEL]], i32 0
+; CHECK-NEXT: [[TMP8:%.*]] = or <2 x i64> [[TMP5]], [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = xor <2 x i64> [[TMP5]], [[TMP7]]
+; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP8]], <2 x i64> [[TMP9]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: br label %[[EGGS_EXIT_I_PEEL]]
+; CHECK: [[EGGS_EXIT_I_PEEL]]:
+; CHECK-NEXT: [[TMP11:%.*]] = phi <2 x i64> [ [[TMP10]], %[[BB3_I_I_PEEL]] ], [ zeroinitializer, %[[BB4_LR_PH_I]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <2 x i64> [[TMP11]], <2 x i64> poison, <4 x i32> <i32 0, i32 1, i32 0, i32 0>
+; CHECK-NEXT: [[TMP13:%.*]] = trunc <4 x i64> [[TMP12]] to <4 x i8>
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i64> [[TMP12]], i32 1
+; CHECK-NEXT: br label %[[SPAM_EXIT]]
+; CHECK: [[SPAM_EXIT]]:
+; CHECK-NEXT: [[GETELEMENTPTR_I_I_PROMOTED346:%.*]] = phi i64 [ [[TMP14]], %[[EGGS_EXIT_I_PEEL]] ], [ 0, %[[BB2]] ]
+; CHECK-NEXT: [[LOAD_8_I:%.*]] = phi i8 [ 0, %[[EGGS_EXIT_I_PEEL]] ], [ 1, %[[BB2]] ]
+; CHECK-NEXT: [[TMP15:%.*]] = phi <4 x i8> [ [[TMP13]], %[[EGGS_EXIT_I_PEEL]] ], [ zeroinitializer, %[[BB2]] ]
+; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <4 x i8> [[TMP15]], <4 x i8> poison, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
+; CHECK-NEXT: br i1 [[LOAD1_I]], label %[[BB12_8_I]], label %[[BB12_1_THREAD_I:.*]]
+; CHECK: [[BB12_1_THREAD_I]]:
+; CHECK-NEXT: [[TMP17:%.*]] = extractelement <8 x i8> [[TMP4]], i32 0
+; CHECK-NEXT: [[ICMP5_3_I:%.*]] = icmp eq i8 [[TMP17]], 0
+; CHECK-NEXT: br i1 [[ICMP5_3_I]], label %[[BB12_3_I:.*]], label %[[BB8_3_I:.*]]
+; CHECK: [[BB8_3_I]]:
+; CHECK-NEXT: br label %[[BB12_3_I]]
+; CHECK: [[BB12_3_I]]:
+; CHECK-NEXT: [[TMP18:%.*]] = extractelement <8 x i8> [[TMP4]], i32 1
+; CHECK-NEXT: [[ICMP5_4_I:%.*]] = icmp eq i8 [[TMP18]], 0
+; CHECK-NEXT: br i1 [[ICMP5_4_I]], label %[[BB12_4_I:.*]], label %[[BB8_4_I:.*]]
+; CHECK: [[BB8_4_I]]:
+; CHECK-NEXT: br label %[[BB12_4_I]]
+; CHECK: [[BB12_4_I]]:
+; CHECK-NEXT: [[TMP19:%.*]] = extractelement <8 x i8> [[TMP4]], i32 2
+; CHECK-NEXT: [[ICMP5_5_I:%.*]] = icmp eq i8 [[TMP19]], 0
+; CHECK-NEXT: br i1 [[ICMP5_5_I]], label %[[BB12_5_I:.*]], label %[[BB8_5_I:.*]]
+; CHECK: [[BB8_5_I]]:
+; CHECK-NEXT: br label %[[BB12_5_I]]
+; CHECK: [[BB12_5_I]]:
+; CHECK-NEXT: [[TMP20:%.*]] = extractelement <8 x i8> [[TMP4]], i32 3
+; CHECK-NEXT: [[ICMP5_7_I:%.*]] = icmp eq i8 [[TMP20]], 0
+; CHECK-NEXT: br i1 [[ICMP5_7_I]], label %[[BB12_7_I:.*]], label %[[BB8_7_I:.*]]
+; CHECK: [[BB8_7_I]]:
+; CHECK-NEXT: br label %[[BB12_7_I]]
+; CHECK: [[BB12_7_I]]:
+; CHECK-NEXT: [[TMP21:%.*]] = extractelement <8 x i8> [[TMP4]], i32 4
+; CHECK-NEXT: [[ICMP5_8_I:%.*]] = icmp eq i8 [[TMP21]], 0
+; CHECK-NEXT: br i1 [[ICMP5_8_I]], label %[[BB12_8_I]], label %[[BB8_8_I:.*]]
+; CHECK: [[BB8_8_I]]:
+; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i8> [[TMP1]], i8 [[LOAD_8_I]], i32 1
+; CHECK-NEXT: [[TMP23:%.*]] = insertelement <4 x i8> poison, i8 [[LOAD_8_I]], i32 0
+; CHECK-NEXT: [[TMP24:%.*]] = shufflevector <8 x i8> [[TMP4]], <8 x i8> poison, <4 x i32> <i32 poison, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP25:%.*]] = shufflevector <4 x i8> [[TMP23]], <4 x i8> [[TMP24]], <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+; CHECK-NEXT: br label %[[BB12_8_I]]
+; CHECK: [[BB12_8_I]]:
+; CHECK-NEXT: [[TMP26:%.*]] = phi <4 x i8> [ [[TMP0]], %[[BB12_7_I]] ], [ [[TMP22]], %[[BB8_8_I]] ], [ [[TMP15]], %[[SPAM_EXIT]] ]
+; CHECK-NEXT: [[TMP27:%.*]] = phi <4 x i8> [ zeroinitializer, %[[BB12_7_I]] ], [ [[TMP25]], %[[BB8_8_I]] ], [ [[TMP16]], %[[SPAM_EXIT]] ]
+; CHECK-NEXT: [[TMP28]] = insertelement <2 x i64> [[TMP2]], i64 [[GETELEMENTPTR_I_I_PROMOTED346]], i32 1
+; CHECK-NEXT: [[TMP29]] = shufflevector <4 x i8> [[TMP26]], <4 x i8> [[TMP27]], <8 x i32> <i32 2, i32 7, i32 5, i32 0, i32 1, i32 3, i32 4, i32 6>
+; CHECK-NEXT: br label %[[BB2]]
+;
+bb:
+ br label %bb2
+
+bb2:
+ %getelementptr.i.i.promoted = phi i64 [ 0, %bb ], [ %getelementptr.i.i.promoted346, %bb12.8.i ]
+ %alloca.promoted = phi i64 [ 0, %bb ], [ %alloca.promoted344, %bb12.8.i ]
+ %load.8.i231 = phi i8 [ 0, %bb ], [ %load.8.i239, %bb12.8.i ]
+ %load.7.i217 = phi i8 [ 0, %bb ], [ %load.7.i225, %bb12.8.i ]
+ %load.626.i200 = phi i8 [ 0, %bb ], [ %load.626.i208, %bb12.8.i ]
+ %load.6.i183 = phi i8 [ 0, %bb ], [ %load.6.i191, %bb12.8.i ]
+ %load.5.i167 = phi i8 [ 0, %bb ], [ %load.5.i175, %bb12.8.i ]
+ %load.418.i148 = phi i8 [ 0, %bb ], [ %load.418.i156, %bb12.8.i ]
+ %load.4.i129 = phi i8 [ 0, %bb ], [ %load.4.i137, %bb12.8.i ]
+ %load.3.i111 = phi i8 [ 0, %bb ], [ %load.3.i119, %bb12.8.i ]
+ br i1 %load1.i, label %spam.exit, label %bb4.lr.ph.i
+
+bb4.lr.ph.i:
+ br i1 true, label %bb3.i.i.peel, label %eggs.exit.i.peel
+
+bb3.i.i.peel:
+ %and.i.i.peel = and i64 %alloca.promoted, 1
+ %load4.i.i.peel = load i64, ptr %arg1, align 8
+ %shl.i.i.peel = shl i64 %load4.i.i.peel, 1
+ %or.i.i.peel = or i64 %shl.i.i.peel, %and.i.i.peel
+ %and6.i.i.peel = and i64 %getelementptr.i.i.promoted, 1
+ %xor.i.i.peel = xor i64 %and6.i.i.peel, %alloca.promoted
+ br label %eggs.exit.i.peel
+
+eggs.exit.i.peel:
+ %load5.i.i93.peel = phi i64 [ %xor.i.i.peel, %bb3.i.i.peel ], [ 0, %bb4.lr.ph.i ]
+ %or.i.i91.peel = phi i64 [ %or.i.i.peel, %bb3.i.i.peel ], [ 0, %bb4.lr.ph.i ]
+ %0 = trunc i64 %or.i.i91.peel to i8
+ %1 = trunc nuw i64 %or.i.i91.peel to i8
+ %2 = trunc i64 %load5.i.i93.peel to i8
+ br label %spam.exit
+
+spam.exit:
+ %getelementptr.i.i.promoted346 = phi i64 [ %load5.i.i93.peel, %eggs.exit.i.peel ], [ 0, %bb2 ]
+ %load.834.i = phi i8 [ %2, %eggs.exit.i.peel ], [ 0, %bb2 ]
+ %load.7.i25 = phi i8 [ %1, %eggs.exit.i.peel ], [ 0, %bb2 ]
+ %load.8.i = phi i8 [ 0, %eggs.exit.i.peel ], [ 1, %bb2 ]
+ %load.6.i18 = phi i8 [ %0, %eggs.exit.i.peel ], [ 0, %bb2 ]
+ br i1 %load1.i, label %bb12.8.i, label %bb12.1.thread.i
+
+bb12.1.thread.i:
+ %icmp5.3.i = icmp eq i8 %load.3.i111, 0
+ br i1 %icmp5.3.i, label %bb12.3.i, label %bb8.3.i
+
+bb8.3.i:
+ br label %bb12.3.i
+
+bb12.3.i:
+ %icmp5.4.i = icmp eq i8 %load.4.i129, 0
+ br i1 %icmp5.4.i, label %bb12.4.i, label %bb8.4.i
+
+bb8.4.i:
+ br label %bb12.4.i
+
+bb12.4.i:
+ %icmp5.5.i = icmp eq i8 %load.5.i167, 0
+ br i1 %icmp5.5.i, label %bb12.5.i, label %bb8.5.i
+
+bb8.5.i:
+ br label %bb12.5.i
+
+bb12.5.i:
+ %icmp5.7.i = icmp eq i8 %load.7.i217, 0
+ br i1 %icmp5.7.i, label %bb12.7.i, label %bb8.7.i
+
+bb8.7.i:
+ br label %bb12.7.i
+
+bb12.7.i:
+ %icmp5.8.i = icmp eq i8 %load.8.i231, 0
+ br i1 %icmp5.8.i, label %bb12.8.i, label %bb8.8.i
+
+bb8.8.i:
+ br label %bb12.8.i
+
+bb12.8.i:
+ %load.8.i239 = phi i8 [ 0, %bb12.7.i ], [ %load.8.i, %bb8.8.i ], [ %load.834.i, %spam.exit ]
+ %load.7.i225 = phi i8 [ 0, %bb12.7.i ], [ %load.311.i, %bb8.8.i ], [ %load.7.i25, %spam.exit ]
+ %load.626.i208 = phi i8 [ 0, %bb12.7.i ], [ %load.8.i, %bb8.8.i ], [ %load.6.i18, %spam.exit ]
+ %load.6.i191 = phi i8 [ %load.311.i, %bb12.7.i ], [ 0, %bb8.8.i ], [ %load.6.i18, %spam.exit ]
+ %load.5.i175 = phi i8 [ 0, %bb12.7.i ], [ %load.6.i183, %bb8.8.i ], [ %load.6.i18, %spam.exit ]
+ %load.418.i156 = phi i8 [ 0, %bb12.7.i ], [ %load.626.i200, %bb8.8.i ], [ %load.6.i18, %spam.exit ]
+ %load.4.i137 = phi i8 [ 0, %bb12.7.i ], [ %load.418.i148, %bb8.8.i ], [ %load.6.i18, %spam.exit ]
+ %load.3.i119 = phi i8 [ 0, %bb12.7.i ], [ 0, %bb8.8.i ], [ %load.6.i18, %spam.exit ]
+ br label %bb2
+}
diff --git a/llvm/test/Transforms/SimplifyCFG/pr165088.ll b/llvm/test/Transforms/SimplifyCFG/pr165088.ll
new file mode 100644
index 0000000..4514a19
--- /dev/null
+++ b/llvm/test/Transforms/SimplifyCFG/pr165088.ll
@@ -0,0 +1,186 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -passes="simplifycfg<switch-range-to-icmp>" < %s | FileCheck %s
+
+; Avoid getting stuck in the cycle pr165088_cycle_[1-4].
+
+define void @pr165088_cycle_1(i8 %x) {
+; CHECK-LABEL: define void @pr165088_cycle_1(
+; CHECK-SAME: i8 [[X:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i8 [[X]], 2
+; CHECK-NEXT: br i1 [[TMP0]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]]
+; CHECK: [[BLOCK1:.*]]:
+; CHECK-NEXT: [[COND2:%.*]] = icmp ugt i8 [[X]], 1
+; CHECK-NEXT: br i1 [[COND2]], label %[[BLOCK3]], label %[[BLOCK2]]
+; CHECK: [[BLOCK2]]:
+; CHECK-NEXT: br label %[[BLOCK3]]
+; CHECK: [[BLOCK3]]:
+; CHECK-NEXT: [[COND3:%.*]] = icmp eq i8 [[X]], 0
+; CHECK-NEXT: br i1 [[COND3]], label %[[EXIT:.*]], label %[[BLOCK1]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ %switch = icmp uge i8 %x, 2
+ %cond1 = icmp ugt i8 %x, 1
+ %or.cond = and i1 %switch, %cond1
+ br i1 %or.cond, label %block3, label %block2
+
+block1:
+ %cond2 = icmp ugt i8 %x, 1
+ br i1 %cond2, label %block3, label %block2
+
+block2:
+ br label %block3
+
+block3:
+ %cond3 = icmp eq i8 %x, 0
+ br i1 %cond3, label %exit, label %block1
+
+exit:
+ ret void
+}
+
+define void @pr165088_cycle_2(i8 %x) {
+; CHECK-LABEL: define void @pr165088_cycle_2(
+; CHECK-SAME: i8 [[X:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[X]], 2
+; CHECK-NEXT: br i1 [[SWITCH]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]]
+; CHECK: [[BLOCK1:.*]]:
+; CHECK-NEXT: [[COND2:%.*]] = icmp ugt i8 [[X]], 1
+; CHECK-NEXT: br i1 [[COND2]], label %[[BLOCK3]], label %[[BLOCK2]]
+; CHECK: [[BLOCK2]]:
+; CHECK-NEXT: br label %[[BLOCK3]]
+; CHECK: [[BLOCK3]]:
+; CHECK-NEXT: [[COND3:%.*]] = icmp eq i8 [[X]], 0
+; CHECK-NEXT: br i1 [[COND3]], label %[[EXIT:.*]], label %[[BLOCK1]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ switch i8 %x, label %block3 [
+ i8 1, label %block2
+ i8 0, label %block2
+ ]
+
+block1: ; preds = %block3
+ %cond2 = icmp ugt i8 %x, 1
+ br i1 %cond2, label %block3, label %block2
+
+block2: ; preds = %entry, %entry, %block1
+ br label %block3
+
+block3: ; preds = %entry, %block2, %block1
+ %cond3 = icmp eq i8 %x, 0
+ br i1 %cond3, label %exit, label %block1
+
+exit: ; preds = %block3
+ ret void
+}
+
+define void @pr165088_cycle_3(i8 %x) {
+; CHECK-LABEL: define void @pr165088_cycle_3(
+; CHECK-SAME: i8 [[X:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br label %[[BLOCK3:.*]]
+; CHECK: [[BLOCK3]]:
+; CHECK-NEXT: [[COND3:%.*]] = icmp eq i8 [[X]], 0
+; CHECK-NEXT: br i1 [[COND3]], label %[[EXIT:.*]], label %[[BLOCK3]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ switch i8 %x, label %block1 [
+ i8 1, label %block2
+ i8 0, label %block2
+ ]
+
+block1: ; preds = %entry, %block3
+ %cond2 = icmp ugt i8 %x, 1
+ br i1 %cond2, label %block3, label %block2
+
+block2: ; preds = %entry, %entry, %block1
+ br label %block3
+
+block3: ; preds = %block2, %block1
+ %cond3 = icmp eq i8 %x, 0
+ br i1 %cond3, label %exit, label %block1
+
+exit: ; preds = %block3
+ ret void
+}
+
+define void @pr165088_cycle_4(i8 %x) {
+; CHECK-LABEL: define void @pr165088_cycle_4(
+; CHECK-SAME: i8 [[X:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i8 [[X]], 2
+; CHECK-NEXT: br i1 [[TMP0]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]]
+; CHECK: [[BLOCK1:.*]]:
+; CHECK-NEXT: [[COND2_OLD:%.*]] = icmp ugt i8 [[X]], 1
+; CHECK-NEXT: br i1 [[COND2_OLD]], label %[[BLOCK3]], label %[[BLOCK2]]
+; CHECK: [[BLOCK2]]:
+; CHECK-NEXT: br label %[[BLOCK3]]
+; CHECK: [[BLOCK3]]:
+; CHECK-NEXT: [[COND3:%.*]] = icmp eq i8 [[X]], 0
+; CHECK-NEXT: br i1 [[COND3]], label %[[EXIT:.*]], label %[[BLOCK1]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ %switch = icmp ult i8 %x, 2
+ br i1 %switch, label %block2, label %block1
+
+block1: ; preds = %entry, %block3
+ %cond2 = icmp ugt i8 %x, 1
+ br i1 %cond2, label %block3, label %block2
+
+block2: ; preds = %entry, %block1
+ br label %block3
+
+block3: ; preds = %block2, %block1
+ %cond3 = icmp eq i8 %x, 0
+ br i1 %cond3, label %exit, label %block1
+
+exit: ; preds = %block3
+ ret void
+}
+
+define void @pr165088_original(i8 %x) {
+; CHECK-LABEL: define void @pr165088_original(
+; CHECK-SAME: i8 [[X:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i8 [[X]], 2
+; CHECK-NEXT: br i1 [[TMP0]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]]
+; CHECK: [[BLOCK1:.*]]:
+; CHECK-NEXT: [[COND3_OLD_OLD:%.*]] = icmp ugt i8 [[X]], 1
+; CHECK-NEXT: br i1 [[COND3_OLD_OLD]], label %[[BLOCK3]], label %[[BLOCK2]]
+; CHECK: [[BLOCK2]]:
+; CHECK-NEXT: br label %[[BLOCK3]]
+; CHECK: [[BLOCK3]]:
+; CHECK-NEXT: [[COND4:%.*]] = icmp eq i8 [[X]], 0
+; CHECK-NEXT: br i1 [[COND4]], label %[[EXIT:.*]], label %[[BLOCK1]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ %cond = icmp ne i8 %x, 0
+ %cond3 = icmp ne i8 %x, 0
+ %or.cond = and i1 %cond, %cond3
+ br i1 %or.cond, label %block3, label %block2
+
+block1: ; preds = %block3
+ %cond3.old = icmp ugt i8 %x, 1
+ br i1 %cond3.old, label %block3, label %block2
+
+block2: ; preds = %block1, %entry
+ br label %block3
+
+block3: ; preds = %block2, %block1, %entry
+ %cond4 = icmp eq i8 %x, 0
+ br i1 %cond4, label %exit, label %block1
+
+exit: ; preds = %block3
+ ret void
+}
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/switch_case.ll b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/switch_case.ll
new file mode 100644
index 0000000..a804225
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/switch_case.ll
@@ -0,0 +1,54 @@
+; RUN: opt < %s -S | FileCheck %s
+
+; Test whether the UTC format the switch-cases correctly, which requires TWO extra spaces.
+
+define i8 @testi8(i8 %x) {
+ switch i8 %x, label %default [
+ i8 0, label %case1
+ i8 1, label %case2
+ i8 2, label %case3
+ i8 3, label %case3
+ ]
+default:
+ ret i8 0
+case1:
+ ret i8 1
+case2:
+ ret i8 2
+case3:
+ ret i8 3
+}
+
+define i32 @testi32(i32 %x) {
+ switch i32 %x, label %default [
+ i32 0, label %case1
+ i32 1, label %case2
+ i32 2, label %case3
+ i32 3, label %case3
+ ]
+default:
+ ret i32 0
+case1:
+ ret i32 1
+case2:
+ ret i32 2
+case3:
+ ret i32 3
+}
+
+define i128 @testi128(i128 %x) {
+ switch i128 %x, label %default [
+ i128 0, label %case1
+ i128 1, label %case2
+ i128 2, label %case3
+ i128 3, label %case3
+ ]
+default:
+ ret i128 0
+case1:
+ ret i128 1
+case2:
+ ret i128 2
+case3:
+ ret i128 3
+}
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/switch_case.ll.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/switch_case.ll.expected
new file mode 100644
index 0000000..b1977e7
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/switch_case.ll.expected
@@ -0,0 +1,106 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 7
+; RUN: opt < %s -S | FileCheck %s
+
+; Test whether the UTC format the switch-cases correctly, which requires TWO extra spaces.
+
+define i8 @testi8(i8 %x) {
+; CHECK-LABEL: define i8 @testi8(
+; CHECK-SAME: i8 [[X:%.*]]) {
+; CHECK-NEXT: switch i8 [[X]], label %[[DEFAULT:.*]] [
+; CHECK-NEXT: i8 0, label %[[CASE1:.*]]
+; CHECK-NEXT: i8 1, label %[[CASE2:.*]]
+; CHECK-NEXT: i8 2, label %[[CASE3:.*]]
+; CHECK-NEXT: i8 3, label %[[CASE3]]
+; CHECK-NEXT: ]
+; CHECK: [[DEFAULT]]:
+; CHECK-NEXT: ret i8 0
+; CHECK: [[CASE1]]:
+; CHECK-NEXT: ret i8 1
+; CHECK: [[CASE2]]:
+; CHECK-NEXT: ret i8 2
+; CHECK: [[CASE3]]:
+; CHECK-NEXT: ret i8 3
+;
+ switch i8 %x, label %default [
+ i8 0, label %case1
+ i8 1, label %case2
+ i8 2, label %case3
+ i8 3, label %case3
+ ]
+default:
+ ret i8 0
+case1:
+ ret i8 1
+case2:
+ ret i8 2
+case3:
+ ret i8 3
+}
+
+define i32 @testi32(i32 %x) {
+; CHECK-LABEL: define i32 @testi32(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: switch i32 [[X]], label %[[DEFAULT:.*]] [
+; CHECK-NEXT: i32 0, label %[[CASE1:.*]]
+; CHECK-NEXT: i32 1, label %[[CASE2:.*]]
+; CHECK-NEXT: i32 2, label %[[CASE3:.*]]
+; CHECK-NEXT: i32 3, label %[[CASE3]]
+; CHECK-NEXT: ]
+; CHECK: [[DEFAULT]]:
+; CHECK-NEXT: ret i32 0
+; CHECK: [[CASE1]]:
+; CHECK-NEXT: ret i32 1
+; CHECK: [[CASE2]]:
+; CHECK-NEXT: ret i32 2
+; CHECK: [[CASE3]]:
+; CHECK-NEXT: ret i32 3
+;
+ switch i32 %x, label %default [
+ i32 0, label %case1
+ i32 1, label %case2
+ i32 2, label %case3
+ i32 3, label %case3
+ ]
+default:
+ ret i32 0
+case1:
+ ret i32 1
+case2:
+ ret i32 2
+case3:
+ ret i32 3
+}
+
+define i128 @testi128(i128 %x) {
+; CHECK-LABEL: define i128 @testi128(
+; CHECK-SAME: i128 [[X:%.*]]) {
+; CHECK-NEXT: switch i128 [[X]], label %[[DEFAULT:.*]] [
+; CHECK-NEXT: i128 0, label %[[CASE1:.*]]
+; CHECK-NEXT: i128 1, label %[[CASE2:.*]]
+; CHECK-NEXT: i128 2, label %[[CASE3:.*]]
+; CHECK-NEXT: i128 3, label %[[CASE3]]
+; CHECK-NEXT: ]
+; CHECK: [[DEFAULT]]:
+; CHECK-NEXT: ret i128 0
+; CHECK: [[CASE1]]:
+; CHECK-NEXT: ret i128 1
+; CHECK: [[CASE2]]:
+; CHECK-NEXT: ret i128 2
+; CHECK: [[CASE3]]:
+; CHECK-NEXT: ret i128 3
+;
+ switch i128 %x, label %default [
+ i128 0, label %case1
+ i128 1, label %case2
+ i128 2, label %case3
+ i128 3, label %case3
+ ]
+default:
+ ret i128 0
+case1:
+ ret i128 1
+case2:
+ ret i128 2
+case3:
+ ret i128 3
+}
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/switch_case.test b/llvm/test/tools/UpdateTestChecks/update_test_checks/switch_case.test
new file mode 100644
index 0000000..891dbe0
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/switch_case.test
@@ -0,0 +1,3 @@
+## switch_case test checking that update_test_checks.py works correctly
+# RUN: cp -f %S/Inputs/switch_case.ll %t.ll && %update_test_checks %t.ll --version 7
+# RUN: diff -u %t.ll %S/Inputs/switch_case.ll.expected
diff --git a/llvm/test/tools/llvm-dwarfdump/X86/type_units_split_dwp_v4.s b/llvm/test/tools/llvm-dwarfdump/X86/type_units_split_dwp_v4.s
index becd9d1..519edf04 100644
--- a/llvm/test/tools/llvm-dwarfdump/X86/type_units_split_dwp_v4.s
+++ b/llvm/test/tools/llvm-dwarfdump/X86/type_units_split_dwp_v4.s
@@ -1,6 +1,12 @@
## This test uses TU index for type parsing in dwp and makes sure the DWARF4 type is
## successfully retrieved.
+## cd to a unique dir so we can refer to the file as just "test.dwo" in the
+## assembly test input below.
+# RUN: rm -rf %t
+# RUN: mkdir %t
+# RUN: cd %t
+
# RUN: llvm-mc %s --split-dwarf-file=test.dwo -filetype obj -triple x86_64 -o test.o
# RUN: llvm-dwp -e test.o -o test.dwp
# RUN: llvm-dwarfdump test.dwp | FileCheck %s