aboutsummaryrefslogtreecommitdiff
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-sextinreg.mir3
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-vashr-vlshr.mir9
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir3
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/select-vector-shift.mir6
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll204
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-subvector-extend.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-vabs.ll24
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-vadd.ll341
-rw-r--r--llvm/test/CodeGen/AArch64/combine-sdiv.ll137
-rw-r--r--llvm/test/CodeGen/AArch64/extract-vector-elt.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/fcmp.ll18
-rw-r--r--llvm/test/CodeGen/AArch64/fpclamptosat.ll55
-rw-r--r--llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll101
-rw-r--r--llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/neon-compare-instructions.ll113
-rw-r--r--llvm/test/CodeGen/AArch64/neon-shift-left-long.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/select_cc.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-attributor-flat-scratch-init-asan.ll24
-rw-r--r--llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/true16-fold.mir25
-rw-r--r--llvm/test/CodeGen/ARM/fpclamptosat.ll48
-rw-r--r--llvm/test/CodeGen/ARM/fpclamptosat_vec.ll107
-rw-r--r--llvm/test/CodeGen/Hexagon/inst_setcc_uno_uo.ll93
-rw-r--r--llvm/test/CodeGen/Hexagon/isel-fold-shl-zext.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/fpclamptosat.ll58
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll202
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/ptrcast-bitcast.ll28
-rw-r--r--llvm/test/CodeGen/WebAssembly/fpclamptosat.ll89
-rw-r--r--llvm/test/CodeGen/WebAssembly/fpclamptosat_vec.ll78
-rw-r--r--llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll6
-rw-r--r--llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll6
-rw-r--r--llvm/test/CodeGen/X86/fpclamptosat.ll45
-rw-r--r--llvm/test/CodeGen/X86/fpclamptosat_vec.ll105
-rw-r--r--llvm/test/CodeGen/X86/shuffle-of-splat-multiuses.ll8
-rw-r--r--llvm/test/CodeGen/X86/vec-strict-cmp-128.ll32
-rw-r--r--llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll10
-rw-r--r--llvm/test/CodeGen/X86/vector-fshl-sub128.ll10
-rw-r--r--llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll10
-rw-r--r--llvm/test/CodeGen/X86/vector-fshr-sub128.ll10
-rw-r--r--llvm/test/CodeGen/X86/vector-sext.ll4
-rw-r--r--llvm/test/CodeGen/X86/vector-zext.ll6
-rw-r--r--llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll8
-rw-r--r--llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll8
-rw-r--r--llvm/test/ThinLTO/X86/memprof-dups.ll138
-rw-r--r--llvm/test/ThinLTO/X86/memprof_imported_internal.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/masked_intrinsics.ll58
-rw-r--r--llvm/test/Transforms/InstCombine/pr83947.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/select-and-cmp.ll44
-rw-r--r--llvm/test/Transforms/InstCombine/select-or-cmp.ll50
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll126
-rw-r--r--llvm/test/Transforms/NewGVN/pr159918.ll21
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/no_alternate_divrem.ll101
55 files changed, 1769 insertions, 883 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll
index 7872c02..461a7ef 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll
@@ -177,7 +177,7 @@ define <16 x i8> @combine_vec_udiv_nonuniform4(<16 x i8> %x) {
; GISEL-NEXT: neg v2.16b, v3.16b
; GISEL-NEXT: shl v3.16b, v4.16b, #7
; GISEL-NEXT: ushl v1.16b, v1.16b, v2.16b
-; GISEL-NEXT: sshr v2.16b, v3.16b, #7
+; GISEL-NEXT: cmlt v2.16b, v3.16b, #0
; GISEL-NEXT: bif v0.16b, v1.16b, v2.16b
; GISEL-NEXT: ret
%div = udiv <16 x i8> %x, <i8 -64, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -229,7 +229,7 @@ define <8 x i16> @pr38477(<8 x i16> %a0) {
; GISEL-NEXT: add v1.8h, v2.8h, v1.8h
; GISEL-NEXT: neg v2.8h, v4.8h
; GISEL-NEXT: ushl v1.8h, v1.8h, v2.8h
-; GISEL-NEXT: sshr v2.8h, v3.8h, #15
+; GISEL-NEXT: cmlt v2.8h, v3.8h, #0
; GISEL-NEXT: bif v0.16b, v1.16b, v2.16b
; GISEL-NEXT: ret
%1 = udiv <8 x i16> %a0, <i16 1, i16 119, i16 73, i16 -111, i16 -3, i16 118, i16 32, i16 31>
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-sextinreg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-sextinreg.mir
index 0b950b7..76d4d29 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-sextinreg.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-sextinreg.mir
@@ -14,8 +14,7 @@ body: |
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK-NEXT: [[DUP:%[0-9]+]]:_(<4 x s32>) = G_DUP [[C]](s32)
; CHECK-NEXT: [[SHL:%[0-9]+]]:_(<4 x s32>) = G_SHL %v1, [[DUP]](<4 x s32>)
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CHECK-NEXT: %sext:_(<4 x s32>) = G_VASHR [[SHL]], [[C1]](s32)
+ ; CHECK-NEXT: %sext:_(<4 x s32>) = G_VASHR [[SHL]], 16
; CHECK-NEXT: $q0 = COPY %sext(<4 x s32>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%v1:_(<4 x s32>) = COPY $q0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-vashr-vlshr.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-vashr-vlshr.mir
index b3fb5a4..dfaddba 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-vashr-vlshr.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-vashr-vlshr.mir
@@ -15,8 +15,7 @@ body: |
; CHECK: liveins: $d0, $d1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
- ; CHECK-NEXT: [[VASHR:%[0-9]+]]:_(<4 x s32>) = G_VASHR [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[VASHR:%[0-9]+]]:_(<4 x s32>) = G_VASHR [[COPY]], 5
; CHECK-NEXT: $q0 = COPY [[VASHR]](<4 x s32>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<4 x s32>) = COPY $q0
@@ -39,8 +38,7 @@ body: |
; CHECK: liveins: $d0, $d1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
- ; CHECK-NEXT: [[VLSHR:%[0-9]+]]:_(<4 x s32>) = G_VLSHR [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[VLSHR:%[0-9]+]]:_(<4 x s32>) = G_VLSHR [[COPY]], 5
; CHECK-NEXT: $q0 = COPY [[VLSHR]](<4 x s32>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<4 x s32>) = COPY $q0
@@ -63,8 +61,7 @@ body: |
; CHECK: liveins: $d0, $d1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
- ; CHECK-NEXT: [[VLSHR:%[0-9]+]]:_(<8 x s16>) = G_VLSHR [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[VLSHR:%[0-9]+]]:_(<8 x s16>) = G_VLSHR [[COPY]], 5
; CHECK-NEXT: $q0 = COPY [[VLSHR]](<8 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<8 x s16>) = COPY $q0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir
index c38e4a8..cf227cb 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir
@@ -29,7 +29,6 @@ body: |
; CHECK-NEXT: [[UCVTFd:%[0-9]+]]:fpr64 = UCVTFd [[COPY]], 12
; CHECK-NEXT: $d1 = COPY [[UCVTFd]]
%0(s64) = COPY $d0
- %1(s32) = G_CONSTANT i32 12
- %2(s64) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.vcvtfxu2fp.f64), %0, %1
+ %2(s64) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.vcvtfxu2fp.f64), %0, 12
$d1 = COPY %2(s64)
...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-vector-shift.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-vector-shift.mir
index 0706115..9fa6326 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-vector-shift.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-vector-shift.mir
@@ -499,8 +499,7 @@ body: |
; CHECK-NEXT: $d0 = COPY [[SSHRv4i16_shift]]
; CHECK-NEXT: RET_ReallyLR implicit $d0
%0:fpr(<4 x s16>) = COPY $d0
- %1:gpr(s32) = G_CONSTANT i32 5
- %2:fpr(<4 x s16>) = G_VASHR %0, %1
+ %2:fpr(<4 x s16>) = G_VASHR %0, 5
$d0 = COPY %2(<4 x s16>)
RET_ReallyLR implicit $d0
...
@@ -520,8 +519,7 @@ body: |
; CHECK-NEXT: $d0 = COPY [[USHRv4i16_shift]]
; CHECK-NEXT: RET_ReallyLR implicit $d0
%0:fpr(<4 x s16>) = COPY $d0
- %1:gpr(s32) = G_CONSTANT i32 5
- %2:fpr(<4 x s16>) = G_VLSHR %0, %1
+ %2:fpr(<4 x s16>) = G_VLSHR %0, 5
$d0 = COPY %2(<4 x s16>)
RET_ReallyLR implicit $d0
...
diff --git a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
index cdde110..63c08dd 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
@@ -902,7 +902,7 @@ define void @sink_v8z16_0(ptr %p, ptr %d, i64 %n, <16 x i8> %a) {
; CHECK-GI-NEXT: subs x2, x2, #8
; CHECK-GI-NEXT: add x8, x8, #8
; CHECK-GI-NEXT: umull v1.8h, v1.8b, v0.8b
-; CHECK-GI-NEXT: sshr v1.8h, v1.8h, #15
+; CHECK-GI-NEXT: cmlt v1.8h, v1.8h, #0
; CHECK-GI-NEXT: xtn v1.8b, v1.8h
; CHECK-GI-NEXT: str d1, [x0], #32
; CHECK-GI-NEXT: b.ne .LBB8_1
@@ -967,8 +967,8 @@ define void @sink_v16s16_8(ptr %p, ptr %d, i64 %n, <16 x i8> %a) {
; CHECK-GI-NEXT: mov d2, v1.d[1]
; CHECK-GI-NEXT: smull v1.8h, v1.8b, v0.8b
; CHECK-GI-NEXT: smull v2.8h, v2.8b, v0.8b
-; CHECK-GI-NEXT: sshr v1.8h, v1.8h, #15
-; CHECK-GI-NEXT: sshr v2.8h, v2.8h, #15
+; CHECK-GI-NEXT: cmlt v1.8h, v1.8h, #0
+; CHECK-GI-NEXT: cmlt v2.8h, v2.8h, #0
; CHECK-GI-NEXT: uzp1 v1.16b, v1.16b, v2.16b
; CHECK-GI-NEXT: str q1, [x0], #32
; CHECK-GI-NEXT: b.ne .LBB9_1
diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll b/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll
index 9bafc5b..2a8b3ce2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll
@@ -999,16 +999,10 @@ entry:
}
define <8 x i8> @test_vaddhn_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK-SD-LABEL: test_vaddhn_s16:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: addhn v0.8b, v0.8h, v1.8h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vaddhn_s16:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v0.8h, v0.8h, v1.8h
-; CHECK-GI-NEXT: shrn v0.8b, v0.8h, #8
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vaddhn_s16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: addhn v0.8b, v0.8h, v1.8h
+; CHECK-NEXT: ret
entry:
%vaddhn.i = add <8 x i16> %a, %b
%vaddhn1.i = lshr <8 x i16> %vaddhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -1017,16 +1011,10 @@ entry:
}
define <4 x i16> @test_vaddhn_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK-SD-LABEL: test_vaddhn_s32:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: addhn v0.4h, v0.4s, v1.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vaddhn_s32:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: shrn v0.4h, v0.4s, #16
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vaddhn_s32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT: ret
entry:
%vaddhn.i = add <4 x i32> %a, %b
%vaddhn1.i = lshr <4 x i32> %vaddhn.i, <i32 16, i32 16, i32 16, i32 16>
@@ -1035,16 +1023,10 @@ entry:
}
define <2 x i32> @test_vaddhn_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK-SD-LABEL: test_vaddhn_s64:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: addhn v0.2s, v0.2d, v1.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vaddhn_s64:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v0.2d, v0.2d, v1.2d
-; CHECK-GI-NEXT: shrn v0.2s, v0.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vaddhn_s64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: addhn v0.2s, v0.2d, v1.2d
+; CHECK-NEXT: ret
entry:
%vaddhn.i = add <2 x i64> %a, %b
%vaddhn1.i = lshr <2 x i64> %vaddhn.i, <i64 32, i64 32>
@@ -1053,16 +1035,10 @@ entry:
}
define <8 x i8> @test_vaddhn_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK-SD-LABEL: test_vaddhn_u16:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: addhn v0.8b, v0.8h, v1.8h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vaddhn_u16:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v0.8h, v0.8h, v1.8h
-; CHECK-GI-NEXT: shrn v0.8b, v0.8h, #8
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vaddhn_u16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: addhn v0.8b, v0.8h, v1.8h
+; CHECK-NEXT: ret
entry:
%vaddhn.i = add <8 x i16> %a, %b
%vaddhn1.i = lshr <8 x i16> %vaddhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -1071,16 +1047,10 @@ entry:
}
define <4 x i16> @test_vaddhn_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK-SD-LABEL: test_vaddhn_u32:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: addhn v0.4h, v0.4s, v1.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vaddhn_u32:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: shrn v0.4h, v0.4s, #16
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vaddhn_u32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT: ret
entry:
%vaddhn.i = add <4 x i32> %a, %b
%vaddhn1.i = lshr <4 x i32> %vaddhn.i, <i32 16, i32 16, i32 16, i32 16>
@@ -1089,16 +1059,10 @@ entry:
}
define <2 x i32> @test_vaddhn_u64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK-SD-LABEL: test_vaddhn_u64:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: addhn v0.2s, v0.2d, v1.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vaddhn_u64:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v0.2d, v0.2d, v1.2d
-; CHECK-GI-NEXT: shrn v0.2s, v0.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vaddhn_u64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: addhn v0.2s, v0.2d, v1.2d
+; CHECK-NEXT: ret
entry:
%vaddhn.i = add <2 x i64> %a, %b
%vaddhn1.i = lshr <2 x i64> %vaddhn.i, <i64 32, i64 32>
@@ -1115,9 +1079,8 @@ define <16 x i8> @test_vaddhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b)
;
; CHECK-GI-LABEL: test_vaddhn_high_s16:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v1.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT: addhn v1.8b, v1.8h, v2.8h
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.8b, v1.8h, #8
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1141,9 +1104,8 @@ define <8 x i16> @test_vaddhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b)
;
; CHECK-GI-LABEL: test_vaddhn_high_s32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: addhn v1.4h, v1.4s, v2.4s
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.4h, v1.4s, #16
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1167,9 +1129,8 @@ define <4 x i32> @test_vaddhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b)
;
; CHECK-GI-LABEL: test_vaddhn_high_s64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v1.2d, v1.2d, v2.2d
+; CHECK-GI-NEXT: addhn v1.2s, v1.2d, v2.2d
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.2s, v1.2d, #32
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1193,9 +1154,8 @@ define <16 x i8> @test_vaddhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b)
;
; CHECK-GI-LABEL: test_vaddhn_high_u16:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v1.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT: addhn v1.8b, v1.8h, v2.8h
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.8b, v1.8h, #8
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1219,9 +1179,8 @@ define <8 x i16> @test_vaddhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b)
;
; CHECK-GI-LABEL: test_vaddhn_high_u32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: addhn v1.4h, v1.4s, v2.4s
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.4h, v1.4s, #16
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1245,9 +1204,8 @@ define <4 x i32> @test_vaddhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b)
;
; CHECK-GI-LABEL: test_vaddhn_high_u64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v1.2d, v1.2d, v2.2d
+; CHECK-GI-NEXT: addhn v1.2s, v1.2d, v2.2d
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.2s, v1.2d, #32
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1461,16 +1419,10 @@ entry:
}
define <8 x i8> @test_vsubhn_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK-SD-LABEL: test_vsubhn_s16:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: subhn v0.8b, v0.8h, v1.8h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vsubhn_s16:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v0.8h, v0.8h, v1.8h
-; CHECK-GI-NEXT: shrn v0.8b, v0.8h, #8
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vsubhn_s16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: subhn v0.8b, v0.8h, v1.8h
+; CHECK-NEXT: ret
entry:
%vsubhn.i = sub <8 x i16> %a, %b
%vsubhn1.i = lshr <8 x i16> %vsubhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -1479,16 +1431,10 @@ entry:
}
define <4 x i16> @test_vsubhn_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK-SD-LABEL: test_vsubhn_s32:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: subhn v0.4h, v0.4s, v1.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vsubhn_s32:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: shrn v0.4h, v0.4s, #16
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vsubhn_s32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: subhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT: ret
entry:
%vsubhn.i = sub <4 x i32> %a, %b
%vsubhn1.i = lshr <4 x i32> %vsubhn.i, <i32 16, i32 16, i32 16, i32 16>
@@ -1497,16 +1443,10 @@ entry:
}
define <2 x i32> @test_vsubhn_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK-SD-LABEL: test_vsubhn_s64:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: subhn v0.2s, v0.2d, v1.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vsubhn_s64:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v0.2d, v0.2d, v1.2d
-; CHECK-GI-NEXT: shrn v0.2s, v0.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vsubhn_s64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: subhn v0.2s, v0.2d, v1.2d
+; CHECK-NEXT: ret
entry:
%vsubhn.i = sub <2 x i64> %a, %b
%vsubhn1.i = lshr <2 x i64> %vsubhn.i, <i64 32, i64 32>
@@ -1515,16 +1455,10 @@ entry:
}
define <8 x i8> @test_vsubhn_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK-SD-LABEL: test_vsubhn_u16:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: subhn v0.8b, v0.8h, v1.8h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vsubhn_u16:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v0.8h, v0.8h, v1.8h
-; CHECK-GI-NEXT: shrn v0.8b, v0.8h, #8
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vsubhn_u16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: subhn v0.8b, v0.8h, v1.8h
+; CHECK-NEXT: ret
entry:
%vsubhn.i = sub <8 x i16> %a, %b
%vsubhn1.i = lshr <8 x i16> %vsubhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -1533,16 +1467,10 @@ entry:
}
define <4 x i16> @test_vsubhn_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK-SD-LABEL: test_vsubhn_u32:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: subhn v0.4h, v0.4s, v1.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vsubhn_u32:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: shrn v0.4h, v0.4s, #16
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vsubhn_u32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: subhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT: ret
entry:
%vsubhn.i = sub <4 x i32> %a, %b
%vsubhn1.i = lshr <4 x i32> %vsubhn.i, <i32 16, i32 16, i32 16, i32 16>
@@ -1551,16 +1479,10 @@ entry:
}
define <2 x i32> @test_vsubhn_u64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK-SD-LABEL: test_vsubhn_u64:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: subhn v0.2s, v0.2d, v1.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vsubhn_u64:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v0.2d, v0.2d, v1.2d
-; CHECK-GI-NEXT: shrn v0.2s, v0.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vsubhn_u64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: subhn v0.2s, v0.2d, v1.2d
+; CHECK-NEXT: ret
entry:
%vsubhn.i = sub <2 x i64> %a, %b
%vsubhn1.i = lshr <2 x i64> %vsubhn.i, <i64 32, i64 32>
@@ -1577,9 +1499,8 @@ define <16 x i8> @test_vsubhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b)
;
; CHECK-GI-LABEL: test_vsubhn_high_s16:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v1.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT: subhn v1.8b, v1.8h, v2.8h
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.8b, v1.8h, #8
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1603,9 +1524,8 @@ define <8 x i16> @test_vsubhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b)
;
; CHECK-GI-LABEL: test_vsubhn_high_s32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v1.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: subhn v1.4h, v1.4s, v2.4s
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.4h, v1.4s, #16
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1629,9 +1549,8 @@ define <4 x i32> @test_vsubhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b)
;
; CHECK-GI-LABEL: test_vsubhn_high_s64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v1.2d, v1.2d, v2.2d
+; CHECK-GI-NEXT: subhn v1.2s, v1.2d, v2.2d
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.2s, v1.2d, #32
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1655,9 +1574,8 @@ define <16 x i8> @test_vsubhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b)
;
; CHECK-GI-LABEL: test_vsubhn_high_u16:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v1.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT: subhn v1.8b, v1.8h, v2.8h
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.8b, v1.8h, #8
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1681,9 +1599,8 @@ define <8 x i16> @test_vsubhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b)
;
; CHECK-GI-LABEL: test_vsubhn_high_u32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v1.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: subhn v1.4h, v1.4s, v2.4s
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.4h, v1.4s, #16
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1707,9 +1624,8 @@ define <4 x i32> @test_vsubhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b)
;
; CHECK-GI-LABEL: test_vsubhn_high_u64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v1.2d, v1.2d, v2.2d
+; CHECK-GI-NEXT: subhn v1.2s, v1.2d, v2.2d
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.2s, v1.2d, #32
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/arm64-subvector-extend.ll b/llvm/test/CodeGen/AArch64/arm64-subvector-extend.ll
index 84879d1..03e6ca1 100644
--- a/llvm/test/CodeGen/AArch64/arm64-subvector-extend.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-subvector-extend.ll
@@ -524,8 +524,8 @@ define <32 x i8> @sext_v32i1(<32 x i1> %arg) {
; CHECK-GI-NEXT: mov.b v1[15], w9
; CHECK-GI-NEXT: shl.16b v0, v0, #7
; CHECK-GI-NEXT: shl.16b v1, v1, #7
-; CHECK-GI-NEXT: sshr.16b v0, v0, #7
-; CHECK-GI-NEXT: sshr.16b v1, v1, #7
+; CHECK-GI-NEXT: cmlt.16b v0, v0, #0
+; CHECK-GI-NEXT: cmlt.16b v1, v1, #0
; CHECK-GI-NEXT: ret
%res = sext <32 x i1> %arg to <32 x i8>
ret <32 x i8> %res
@@ -934,10 +934,10 @@ define <64 x i8> @sext_v64i1(<64 x i1> %arg) {
; CHECK-GI-NEXT: shl.16b v1, v1, #7
; CHECK-GI-NEXT: shl.16b v2, v2, #7
; CHECK-GI-NEXT: shl.16b v3, v3, #7
-; CHECK-GI-NEXT: sshr.16b v0, v0, #7
-; CHECK-GI-NEXT: sshr.16b v1, v1, #7
-; CHECK-GI-NEXT: sshr.16b v2, v2, #7
-; CHECK-GI-NEXT: sshr.16b v3, v3, #7
+; CHECK-GI-NEXT: cmlt.16b v0, v0, #0
+; CHECK-GI-NEXT: cmlt.16b v1, v1, #0
+; CHECK-GI-NEXT: cmlt.16b v2, v2, #0
+; CHECK-GI-NEXT: cmlt.16b v3, v3, #0
; CHECK-GI-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-GI-NEXT: ret
%res = sext <64 x i1> %arg to <64 x i8>
diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index c408d7f..a3f4722 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -1914,21 +1914,13 @@ define <2 x i128> @uabd_i64(<2 x i64> %a, <2 x i64> %b) {
}
define <8 x i16> @pr88784(<8 x i8> %l0, <8 x i8> %l1, <8 x i16> %l2) {
-; CHECK-SD-LABEL: pr88784:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: usubl.8h v0, v0, v1
-; CHECK-SD-NEXT: cmlt.8h v1, v2, #0
-; CHECK-SD-NEXT: ssra.8h v0, v2, #15
-; CHECK-SD-NEXT: eor.16b v0, v1, v0
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: pr88784:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: usubl.8h v0, v0, v1
-; CHECK-GI-NEXT: sshr.8h v1, v2, #15
-; CHECK-GI-NEXT: ssra.8h v0, v2, #15
-; CHECK-GI-NEXT: eor.16b v0, v1, v0
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: pr88784:
+; CHECK: // %bb.0:
+; CHECK-NEXT: usubl.8h v0, v0, v1
+; CHECK-NEXT: cmlt.8h v1, v2, #0
+; CHECK-NEXT: ssra.8h v0, v2, #15
+; CHECK-NEXT: eor.16b v0, v1, v0
+; CHECK-NEXT: ret
%l4 = zext <8 x i8> %l0 to <8 x i16>
%l5 = ashr <8 x i16> %l2, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
%l6 = zext <8 x i8> %l1 to <8 x i16>
@@ -1947,7 +1939,7 @@ define <8 x i16> @pr88784_fixed(<8 x i8> %l0, <8 x i8> %l1, <8 x i16> %l2) {
; CHECK-GI-LABEL: pr88784_fixed:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: usubl.8h v0, v0, v1
-; CHECK-GI-NEXT: sshr.8h v1, v0, #15
+; CHECK-GI-NEXT: cmlt.8h v1, v0, #0
; CHECK-GI-NEXT: ssra.8h v0, v0, #15
; CHECK-GI-NEXT: eor.16b v0, v1, v0
; CHECK-GI-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/arm64-vadd.ll b/llvm/test/CodeGen/AArch64/arm64-vadd.ll
index 11fb732..938712a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vadd.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vadd.ll
@@ -1103,20 +1103,12 @@ define <2 x i64> @ssubl2_duplhs(i32 %lhs, <4 x i32> %rhs) {
}
define <8 x i8> @addhn8b_natural(ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: addhn8b_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q0, [x0]
-; CHECK-SD-NEXT: ldr q1, [x1]
-; CHECK-SD-NEXT: addhn v0.8b, v0.8h, v1.8h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: addhn8b_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q0, [x0]
-; CHECK-GI-NEXT: ldr q1, [x1]
-; CHECK-GI-NEXT: add v0.8h, v0.8h, v1.8h
-; CHECK-GI-NEXT: shrn v0.8b, v0.8h, #8
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: addhn8b_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ldr q1, [x1]
+; CHECK-NEXT: addhn v0.8b, v0.8h, v1.8h
+; CHECK-NEXT: ret
%tmp1 = load <8 x i16>, ptr %A
%tmp2 = load <8 x i16>, ptr %B
%sum = add <8 x i16> %tmp1, %tmp2
@@ -1126,20 +1118,12 @@ define <8 x i8> @addhn8b_natural(ptr %A, ptr %B) nounwind {
}
define <4 x i16> @addhn4h_natural(ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: addhn4h_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q0, [x0]
-; CHECK-SD-NEXT: ldr q1, [x1]
-; CHECK-SD-NEXT: addhn v0.4h, v0.4s, v1.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: addhn4h_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q0, [x0]
-; CHECK-GI-NEXT: ldr q1, [x1]
-; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: shrn v0.4h, v0.4s, #16
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: addhn4h_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ldr q1, [x1]
+; CHECK-NEXT: addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT: ret
%tmp1 = load <4 x i32>, ptr %A
%tmp2 = load <4 x i32>, ptr %B
%sum = add <4 x i32> %tmp1, %tmp2
@@ -1149,20 +1133,12 @@ define <4 x i16> @addhn4h_natural(ptr %A, ptr %B) nounwind {
}
define <2 x i32> @addhn2s_natural(ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: addhn2s_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q0, [x0]
-; CHECK-SD-NEXT: ldr q1, [x1]
-; CHECK-SD-NEXT: addhn v0.2s, v0.2d, v1.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: addhn2s_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q0, [x0]
-; CHECK-GI-NEXT: ldr q1, [x1]
-; CHECK-GI-NEXT: add v0.2d, v0.2d, v1.2d
-; CHECK-GI-NEXT: shrn v0.2s, v0.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: addhn2s_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ldr q1, [x1]
+; CHECK-NEXT: addhn v0.2s, v0.2d, v1.2d
+; CHECK-NEXT: ret
%tmp1 = load <2 x i64>, ptr %A
%tmp2 = load <2 x i64>, ptr %B
%sum = add <2 x i64> %tmp1, %tmp2
@@ -1172,22 +1148,13 @@ define <2 x i32> @addhn2s_natural(ptr %A, ptr %B) nounwind {
}
define <16 x i8> @addhn2_16b_natural(<8 x i8> %low, ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: addhn2_16b_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q1, [x0]
-; CHECK-SD-NEXT: ldr q2, [x1]
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: addhn2 v0.16b, v1.8h, v2.8h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: addhn2_16b_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q1, [x0]
-; CHECK-GI-NEXT: ldr q2, [x1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: add v1.8h, v1.8h, v2.8h
-; CHECK-GI-NEXT: shrn2 v0.16b, v1.8h, #8
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: addhn2_16b_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q1, [x0]
+; CHECK-NEXT: ldr q2, [x1]
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: addhn2 v0.16b, v1.8h, v2.8h
+; CHECK-NEXT: ret
%tmp1 = load <8 x i16>, ptr %A
%tmp2 = load <8 x i16>, ptr %B
%sum = add <8 x i16> %tmp1, %tmp2
@@ -1198,22 +1165,13 @@ define <16 x i8> @addhn2_16b_natural(<8 x i8> %low, ptr %A, ptr %B) nounwind {
}
define <8 x i16> @addhn2_8h_natural(<4 x i16> %low, ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: addhn2_8h_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q1, [x0]
-; CHECK-SD-NEXT: ldr q2, [x1]
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: addhn2 v0.8h, v1.4s, v2.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: addhn2_8h_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q1, [x0]
-; CHECK-GI-NEXT: ldr q2, [x1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
-; CHECK-GI-NEXT: shrn2 v0.8h, v1.4s, #16
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: addhn2_8h_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q1, [x0]
+; CHECK-NEXT: ldr q2, [x1]
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: addhn2 v0.8h, v1.4s, v2.4s
+; CHECK-NEXT: ret
%tmp1 = load <4 x i32>, ptr %A
%tmp2 = load <4 x i32>, ptr %B
%sum = add <4 x i32> %tmp1, %tmp2
@@ -1224,22 +1182,13 @@ define <8 x i16> @addhn2_8h_natural(<4 x i16> %low, ptr %A, ptr %B) nounwind {
}
define <4 x i32> @addhn2_4s_natural(<2 x i32> %low, ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: addhn2_4s_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q1, [x0]
-; CHECK-SD-NEXT: ldr q2, [x1]
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: addhn2 v0.4s, v1.2d, v2.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: addhn2_4s_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q1, [x0]
-; CHECK-GI-NEXT: ldr q2, [x1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: add v1.2d, v1.2d, v2.2d
-; CHECK-GI-NEXT: shrn2 v0.4s, v1.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: addhn2_4s_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q1, [x0]
+; CHECK-NEXT: ldr q2, [x1]
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: addhn2 v0.4s, v1.2d, v2.2d
+; CHECK-NEXT: ret
%tmp1 = load <2 x i64>, ptr %A
%tmp2 = load <2 x i64>, ptr %B
%sum = add <2 x i64> %tmp1, %tmp2
@@ -1250,22 +1199,13 @@ define <4 x i32> @addhn2_4s_natural(<2 x i32> %low, ptr %A, ptr %B) nounwind {
}
define <4 x i32> @addhn_addhn2_4s(ptr %A, ptr %B, ptr %C, ptr %D) nounwind {
-; CHECK-SD-LABEL: addhn_addhn2_4s:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q1, [x0]
-; CHECK-SD-NEXT: ldr q2, [x1]
-; CHECK-SD-NEXT: addhn v0.2s, v1.2d, v2.2d
-; CHECK-SD-NEXT: addhn2 v0.4s, v1.2d, v2.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: addhn_addhn2_4s:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q0, [x0]
-; CHECK-GI-NEXT: ldr q1, [x1]
-; CHECK-GI-NEXT: add v1.2d, v0.2d, v1.2d
-; CHECK-GI-NEXT: shrn v0.2s, v1.2d, #32
-; CHECK-GI-NEXT: shrn2 v0.4s, v1.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: addhn_addhn2_4s:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q1, [x0]
+; CHECK-NEXT: ldr q2, [x1]
+; CHECK-NEXT: addhn v0.2s, v1.2d, v2.2d
+; CHECK-NEXT: addhn2 v0.4s, v1.2d, v2.2d
+; CHECK-NEXT: ret
%tmp1 = load <2 x i64>, ptr %A
%tmp2 = load <2 x i64>, ptr %B
%sum1 = add <2 x i64> %tmp1, %tmp2
@@ -1281,20 +1221,12 @@ define <4 x i32> @addhn_addhn2_4s(ptr %A, ptr %B, ptr %C, ptr %D) nounwind {
}
define <8 x i8> @subhn8b_natural(ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: subhn8b_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q0, [x0]
-; CHECK-SD-NEXT: ldr q1, [x1]
-; CHECK-SD-NEXT: subhn v0.8b, v0.8h, v1.8h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: subhn8b_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q0, [x0]
-; CHECK-GI-NEXT: ldr q1, [x1]
-; CHECK-GI-NEXT: sub v0.8h, v0.8h, v1.8h
-; CHECK-GI-NEXT: shrn v0.8b, v0.8h, #8
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: subhn8b_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ldr q1, [x1]
+; CHECK-NEXT: subhn v0.8b, v0.8h, v1.8h
+; CHECK-NEXT: ret
%tmp1 = load <8 x i16>, ptr %A
%tmp2 = load <8 x i16>, ptr %B
%diff = sub <8 x i16> %tmp1, %tmp2
@@ -1304,20 +1236,12 @@ define <8 x i8> @subhn8b_natural(ptr %A, ptr %B) nounwind {
}
define <4 x i16> @subhn4h_natural(ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: subhn4h_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q0, [x0]
-; CHECK-SD-NEXT: ldr q1, [x1]
-; CHECK-SD-NEXT: subhn v0.4h, v0.4s, v1.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: subhn4h_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q0, [x0]
-; CHECK-GI-NEXT: ldr q1, [x1]
-; CHECK-GI-NEXT: sub v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: shrn v0.4h, v0.4s, #16
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: subhn4h_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ldr q1, [x1]
+; CHECK-NEXT: subhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT: ret
%tmp1 = load <4 x i32>, ptr %A
%tmp2 = load <4 x i32>, ptr %B
%diff = sub <4 x i32> %tmp1, %tmp2
@@ -1327,20 +1251,12 @@ define <4 x i16> @subhn4h_natural(ptr %A, ptr %B) nounwind {
}
define <2 x i32> @subhn2s_natural(ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: subhn2s_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q0, [x0]
-; CHECK-SD-NEXT: ldr q1, [x1]
-; CHECK-SD-NEXT: subhn v0.2s, v0.2d, v1.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: subhn2s_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q0, [x0]
-; CHECK-GI-NEXT: ldr q1, [x1]
-; CHECK-GI-NEXT: sub v0.2d, v0.2d, v1.2d
-; CHECK-GI-NEXT: shrn v0.2s, v0.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: subhn2s_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ldr q1, [x1]
+; CHECK-NEXT: subhn v0.2s, v0.2d, v1.2d
+; CHECK-NEXT: ret
%tmp1 = load <2 x i64>, ptr %A
%tmp2 = load <2 x i64>, ptr %B
%diff = sub <2 x i64> %tmp1, %tmp2
@@ -1350,22 +1266,13 @@ define <2 x i32> @subhn2s_natural(ptr %A, ptr %B) nounwind {
}
define <16 x i8> @subhn2_16b_natural(<8 x i8> %low, ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: subhn2_16b_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q1, [x0]
-; CHECK-SD-NEXT: ldr q2, [x1]
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: subhn2 v0.16b, v1.8h, v2.8h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: subhn2_16b_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q1, [x0]
-; CHECK-GI-NEXT: ldr q2, [x1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: sub v1.8h, v1.8h, v2.8h
-; CHECK-GI-NEXT: shrn2 v0.16b, v1.8h, #8
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: subhn2_16b_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q1, [x0]
+; CHECK-NEXT: ldr q2, [x1]
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: subhn2 v0.16b, v1.8h, v2.8h
+; CHECK-NEXT: ret
%tmp1 = load <8 x i16>, ptr %A
%tmp2 = load <8 x i16>, ptr %B
%diff = sub <8 x i16> %tmp1, %tmp2
@@ -1376,22 +1283,13 @@ define <16 x i8> @subhn2_16b_natural(<8 x i8> %low, ptr %A, ptr %B) nounwind {
}
define <8 x i16> @subhn2_8h_natural(<4 x i16> %low, ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: subhn2_8h_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q1, [x0]
-; CHECK-SD-NEXT: ldr q2, [x1]
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: subhn2 v0.8h, v1.4s, v2.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: subhn2_8h_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q1, [x0]
-; CHECK-GI-NEXT: ldr q2, [x1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: sub v1.4s, v1.4s, v2.4s
-; CHECK-GI-NEXT: shrn2 v0.8h, v1.4s, #16
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: subhn2_8h_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q1, [x0]
+; CHECK-NEXT: ldr q2, [x1]
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: subhn2 v0.8h, v1.4s, v2.4s
+; CHECK-NEXT: ret
%tmp1 = load <4 x i32>, ptr %A
%tmp2 = load <4 x i32>, ptr %B
%diff = sub <4 x i32> %tmp1, %tmp2
@@ -1402,22 +1300,13 @@ define <8 x i16> @subhn2_8h_natural(<4 x i16> %low, ptr %A, ptr %B) nounwind {
}
define <4 x i32> @subhn2_4s_natural(<2 x i32> %low, ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: subhn2_4s_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q1, [x0]
-; CHECK-SD-NEXT: ldr q2, [x1]
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: subhn2 v0.4s, v1.2d, v2.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: subhn2_4s_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q1, [x0]
-; CHECK-GI-NEXT: ldr q2, [x1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: sub v1.2d, v1.2d, v2.2d
-; CHECK-GI-NEXT: shrn2 v0.4s, v1.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: subhn2_4s_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q1, [x0]
+; CHECK-NEXT: ldr q2, [x1]
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: subhn2 v0.4s, v1.2d, v2.2d
+; CHECK-NEXT: ret
%tmp1 = load <2 x i64>, ptr %A
%tmp2 = load <2 x i64>, ptr %B
%diff = sub <2 x i64> %tmp1, %tmp2
@@ -1428,20 +1317,12 @@ define <4 x i32> @subhn2_4s_natural(<2 x i32> %low, ptr %A, ptr %B) nounwind {
}
define <16 x i8> @neg_narrow_i8(<16 x i16> %a) {
-; CHECK-SD-LABEL: neg_narrow_i8:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: movi v2.2d, #0xffffffffffffffff
-; CHECK-SD-NEXT: subhn v0.8b, v2.8h, v0.8h
-; CHECK-SD-NEXT: subhn2 v0.16b, v2.8h, v1.8h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: neg_narrow_i8:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: mvn v0.16b, v0.16b
-; CHECK-GI-NEXT: mvn v1.16b, v1.16b
-; CHECK-GI-NEXT: shrn v0.8b, v0.8h, #8
-; CHECK-GI-NEXT: shrn2 v0.16b, v1.8h, #8
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: neg_narrow_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v2.2d, #0xffffffffffffffff
+; CHECK-NEXT: subhn v0.8b, v2.8h, v0.8h
+; CHECK-NEXT: subhn2 v0.16b, v2.8h, v1.8h
+; CHECK-NEXT: ret
%not.i = xor <16 x i16> %a, splat (i16 -1)
%s = lshr <16 x i16> %not.i, splat (i16 8)
%vshrn_n = trunc nuw <16 x i16> %s to <16 x i8>
@@ -1449,20 +1330,12 @@ define <16 x i8> @neg_narrow_i8(<16 x i16> %a) {
}
define <8 x i16> @neg_narrow_i16(<8 x i32> %a) {
-; CHECK-SD-LABEL: neg_narrow_i16:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: movi v2.2d, #0xffffffffffffffff
-; CHECK-SD-NEXT: subhn v0.4h, v2.4s, v0.4s
-; CHECK-SD-NEXT: subhn2 v0.8h, v2.4s, v1.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: neg_narrow_i16:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: mvn v0.16b, v0.16b
-; CHECK-GI-NEXT: mvn v1.16b, v1.16b
-; CHECK-GI-NEXT: shrn v0.4h, v0.4s, #16
-; CHECK-GI-NEXT: shrn2 v0.8h, v1.4s, #16
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: neg_narrow_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v2.2d, #0xffffffffffffffff
+; CHECK-NEXT: subhn v0.4h, v2.4s, v0.4s
+; CHECK-NEXT: subhn2 v0.8h, v2.4s, v1.4s
+; CHECK-NEXT: ret
%not.i = xor <8 x i32> %a, splat (i32 -1)
%s = lshr <8 x i32> %not.i, splat (i32 16)
%vshrn_n = trunc nuw <8 x i32> %s to <8 x i16>
@@ -1470,20 +1343,12 @@ define <8 x i16> @neg_narrow_i16(<8 x i32> %a) {
}
define <4 x i32> @neg_narrow_i32(<4 x i64> %a) {
-; CHECK-SD-LABEL: neg_narrow_i32:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: movi v2.2d, #0xffffffffffffffff
-; CHECK-SD-NEXT: subhn v0.2s, v2.2d, v0.2d
-; CHECK-SD-NEXT: subhn2 v0.4s, v2.2d, v1.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: neg_narrow_i32:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: mvn v0.16b, v0.16b
-; CHECK-GI-NEXT: mvn v1.16b, v1.16b
-; CHECK-GI-NEXT: shrn v0.2s, v0.2d, #32
-; CHECK-GI-NEXT: shrn2 v0.4s, v1.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: neg_narrow_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v2.2d, #0xffffffffffffffff
+; CHECK-NEXT: subhn v0.2s, v2.2d, v0.2d
+; CHECK-NEXT: subhn2 v0.4s, v2.2d, v1.2d
+; CHECK-NEXT: ret
%not.i = xor <4 x i64> %a, splat (i64 -1)
%s = lshr <4 x i64> %not.i, splat (i64 32)
%vshrn_n = trunc nuw <4 x i64> %s to <4 x i32>
diff --git a/llvm/test/CodeGen/AArch64/combine-sdiv.ll b/llvm/test/CodeGen/AArch64/combine-sdiv.ll
index 9d0ade2..dc88f94 100644
--- a/llvm/test/CodeGen/AArch64/combine-sdiv.ll
+++ b/llvm/test/CodeGen/AArch64/combine-sdiv.ll
@@ -66,9 +66,9 @@ define <4 x i32> @combine_vec_sdiv_by_minsigned(<4 x i32> %x) {
;
; CHECK-GI-LABEL: combine_vec_sdiv_by_minsigned:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v1.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v0.4s, #0
; CHECK-GI-NEXT: usra v0.4s, v1.4s, #1
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v0.4s, v0.4s, #0
; CHECK-GI-NEXT: neg v0.4s, v0.4s
; CHECK-GI-NEXT: ret
%1 = sdiv <4 x i32> %x, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
@@ -176,7 +176,7 @@ define <4 x i32> @combine_vec_sdiv_by_pos1(<4 x i32> %x) {
; CHECK-GI-NEXT: mov v1.s[2], w9
; CHECK-GI-NEXT: mov v1.s[3], w9
; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: bif v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: ret
%1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
@@ -185,39 +185,24 @@ define <4 x i32> @combine_vec_sdiv_by_pos1(<4 x i32> %x) {
}
define <4 x i32> @combine_vec_sdiv_by_pow2a(<4 x i32> %x) {
-; CHECK-SD-LABEL: combine_vec_sdiv_by_pow2a:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: cmlt v1.4s, v0.4s, #0
-; CHECK-SD-NEXT: usra v0.4s, v1.4s, #30
-; CHECK-SD-NEXT: sshr v0.4s, v0.4s, #2
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: combine_vec_sdiv_by_pow2a:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v1.4s, v0.4s, #31
-; CHECK-GI-NEXT: usra v0.4s, v1.4s, #30
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #2
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: combine_vec_sdiv_by_pow2a:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v1.4s, v0.4s, #0
+; CHECK-NEXT: usra v0.4s, v1.4s, #30
+; CHECK-NEXT: sshr v0.4s, v0.4s, #2
+; CHECK-NEXT: ret
%1 = sdiv <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4>
ret <4 x i32> %1
}
define <4 x i32> @combine_vec_sdiv_by_pow2a_neg(<4 x i32> %x) {
-; CHECK-SD-LABEL: combine_vec_sdiv_by_pow2a_neg:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: cmlt v1.4s, v0.4s, #0
-; CHECK-SD-NEXT: usra v0.4s, v1.4s, #30
-; CHECK-SD-NEXT: sshr v0.4s, v0.4s, #2
-; CHECK-SD-NEXT: neg v0.4s, v0.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: combine_vec_sdiv_by_pow2a_neg:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v1.4s, v0.4s, #31
-; CHECK-GI-NEXT: usra v0.4s, v1.4s, #30
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #2
-; CHECK-GI-NEXT: neg v0.4s, v0.4s
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: combine_vec_sdiv_by_pow2a_neg:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v1.4s, v0.4s, #0
+; CHECK-NEXT: usra v0.4s, v1.4s, #30
+; CHECK-NEXT: sshr v0.4s, v0.4s, #2
+; CHECK-NEXT: neg v0.4s, v0.4s
+; CHECK-NEXT: ret
%1 = sdiv <4 x i32> %x, <i32 -4, i32 -4, i32 -4, i32 -4>
ret <4 x i32> %1
}
@@ -240,7 +225,7 @@ define <16 x i8> @combine_vec_sdiv_by_pow2b_v16i8(<16 x i8> %x) {
; CHECK-GI-LABEL: combine_vec_sdiv_by_pow2b_v16i8:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: adrp x8, .LCPI14_1
-; CHECK-GI-NEXT: sshr v2.16b, v0.16b, #7
+; CHECK-GI-NEXT: cmlt v2.16b, v0.16b, #0
; CHECK-GI-NEXT: adrp x9, .LCPI14_0
; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI14_1]
; CHECK-GI-NEXT: adrp x8, .LCPI14_2
@@ -252,7 +237,7 @@ define <16 x i8> @combine_vec_sdiv_by_pow2b_v16i8(<16 x i8> %x) {
; CHECK-GI-NEXT: neg v2.16b, v2.16b
; CHECK-GI-NEXT: add v1.16b, v0.16b, v1.16b
; CHECK-GI-NEXT: sshl v1.16b, v1.16b, v2.16b
-; CHECK-GI-NEXT: sshr v2.16b, v3.16b, #7
+; CHECK-GI-NEXT: cmlt v2.16b, v3.16b, #0
; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b
; CHECK-GI-NEXT: ret
%1 = sdiv <16 x i8> %x, <i8 1, i8 4, i8 2, i8 16, i8 8, i8 32, i8 64, i8 2, i8 1, i8 4, i8 2, i8 16, i8 8, i8 32, i8 64, i8 2>
@@ -278,7 +263,7 @@ define <8 x i16> @combine_vec_sdiv_by_pow2b_v8i16(<8 x i16> %x) {
; CHECK-GI-LABEL: combine_vec_sdiv_by_pow2b_v8i16:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: adrp x8, .LCPI15_1
-; CHECK-GI-NEXT: sshr v2.8h, v0.8h, #15
+; CHECK-GI-NEXT: cmlt v2.8h, v0.8h, #0
; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI15_1]
; CHECK-GI-NEXT: adrp x8, .LCPI15_0
; CHECK-GI-NEXT: ldr d3, [x8, :lo12:.LCPI15_0]
@@ -291,7 +276,7 @@ define <8 x i16> @combine_vec_sdiv_by_pow2b_v8i16(<8 x i16> %x) {
; CHECK-GI-NEXT: add v1.8h, v0.8h, v1.8h
; CHECK-GI-NEXT: shl v2.8h, v2.8h, #15
; CHECK-GI-NEXT: sshl v1.8h, v1.8h, v3.8h
-; CHECK-GI-NEXT: sshr v2.8h, v2.8h, #15
+; CHECK-GI-NEXT: cmlt v2.8h, v2.8h, #0
; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b
; CHECK-GI-NEXT: ret
%1 = sdiv <8 x i16> %x, <i16 1, i16 4, i16 2, i16 16, i16 8, i16 32, i16 64, i16 2>
@@ -322,8 +307,8 @@ define <16 x i16> @combine_vec_sdiv_by_pow2b_v16i16(<16 x i16> %x) {
; CHECK-GI-LABEL: combine_vec_sdiv_by_pow2b_v16i16:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: adrp x8, .LCPI16_1
-; CHECK-GI-NEXT: sshr v3.8h, v0.8h, #15
-; CHECK-GI-NEXT: sshr v4.8h, v1.8h, #15
+; CHECK-GI-NEXT: cmlt v3.8h, v0.8h, #0
+; CHECK-GI-NEXT: cmlt v4.8h, v1.8h, #0
; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI16_1]
; CHECK-GI-NEXT: adrp x8, .LCPI16_0
; CHECK-GI-NEXT: ldr d5, [x8, :lo12:.LCPI16_0]
@@ -339,7 +324,7 @@ define <16 x i16> @combine_vec_sdiv_by_pow2b_v16i16(<16 x i16> %x) {
; CHECK-GI-NEXT: add v2.8h, v1.8h, v2.8h
; CHECK-GI-NEXT: sshl v3.8h, v3.8h, v4.8h
; CHECK-GI-NEXT: sshl v2.8h, v2.8h, v4.8h
-; CHECK-GI-NEXT: sshr v4.8h, v5.8h, #15
+; CHECK-GI-NEXT: cmlt v4.8h, v5.8h, #0
; CHECK-GI-NEXT: bif v0.16b, v3.16b, v4.16b
; CHECK-GI-NEXT: bif v1.16b, v2.16b, v4.16b
; CHECK-GI-NEXT: ret
@@ -381,12 +366,12 @@ define <32 x i16> @combine_vec_sdiv_by_pow2b_v32i16(<32 x i16> %x) {
; CHECK-GI-LABEL: combine_vec_sdiv_by_pow2b_v32i16:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: adrp x8, .LCPI17_1
-; CHECK-GI-NEXT: sshr v5.8h, v0.8h, #15
-; CHECK-GI-NEXT: sshr v6.8h, v1.8h, #15
+; CHECK-GI-NEXT: cmlt v5.8h, v0.8h, #0
+; CHECK-GI-NEXT: cmlt v6.8h, v1.8h, #0
; CHECK-GI-NEXT: ldr q4, [x8, :lo12:.LCPI17_1]
; CHECK-GI-NEXT: adrp x8, .LCPI17_0
-; CHECK-GI-NEXT: sshr v7.8h, v2.8h, #15
-; CHECK-GI-NEXT: sshr v16.8h, v3.8h, #15
+; CHECK-GI-NEXT: cmlt v7.8h, v2.8h, #0
+; CHECK-GI-NEXT: cmlt v16.8h, v3.8h, #0
; CHECK-GI-NEXT: ldr d17, [x8, :lo12:.LCPI17_0]
; CHECK-GI-NEXT: adrp x8, .LCPI17_2
; CHECK-GI-NEXT: neg v4.8h, v4.8h
@@ -402,7 +387,7 @@ define <32 x i16> @combine_vec_sdiv_by_pow2b_v32i16(<32 x i16> %x) {
; CHECK-GI-NEXT: add v6.8h, v1.8h, v6.8h
; CHECK-GI-NEXT: add v7.8h, v2.8h, v7.8h
; CHECK-GI-NEXT: add v4.8h, v3.8h, v4.8h
-; CHECK-GI-NEXT: sshr v17.8h, v17.8h, #15
+; CHECK-GI-NEXT: cmlt v17.8h, v17.8h, #0
; CHECK-GI-NEXT: sshl v5.8h, v5.8h, v16.8h
; CHECK-GI-NEXT: sshl v6.8h, v6.8h, v16.8h
; CHECK-GI-NEXT: sshl v7.8h, v7.8h, v16.8h
@@ -436,7 +421,7 @@ define <4 x i32> @combine_vec_sdiv_by_pow2b_v4i32(<4 x i32> %x) {
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: mov w8, #1 // =0x1
; CHECK-GI-NEXT: mov w9, #0 // =0x0
-; CHECK-GI-NEXT: sshr v3.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v3.4s, v0.4s, #0
; CHECK-GI-NEXT: fmov s1, w8
; CHECK-GI-NEXT: adrp x8, .LCPI18_0
; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI18_0]
@@ -451,7 +436,7 @@ define <4 x i32> @combine_vec_sdiv_by_pow2b_v4i32(<4 x i32> %x) {
; CHECK-GI-NEXT: mov v1.s[3], w9
; CHECK-GI-NEXT: sshl v2.4s, v2.4s, v3.4s
; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: bif v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: ret
%1 = sdiv <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16>
@@ -483,10 +468,10 @@ define <8 x i32> @combine_vec_sdiv_by_pow2b_v8i32(<8 x i32> %x) {
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: mov w8, #1 // =0x1
; CHECK-GI-NEXT: mov w9, #0 // =0x0
-; CHECK-GI-NEXT: sshr v4.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v4.4s, v0.4s, #0
; CHECK-GI-NEXT: fmov s2, w8
; CHECK-GI-NEXT: adrp x8, .LCPI19_0
-; CHECK-GI-NEXT: sshr v5.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v5.4s, v1.4s, #0
; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI19_0]
; CHECK-GI-NEXT: adrp x8, .LCPI19_1
; CHECK-GI-NEXT: mov v2.h[1], w9
@@ -503,7 +488,7 @@ define <8 x i32> @combine_vec_sdiv_by_pow2b_v8i32(<8 x i32> %x) {
; CHECK-GI-NEXT: sshl v3.4s, v3.4s, v5.4s
; CHECK-GI-NEXT: ushll v2.4s, v2.4h, #0
; CHECK-GI-NEXT: shl v2.4s, v2.4s, #31
-; CHECK-GI-NEXT: sshr v2.4s, v2.4s, #31
+; CHECK-GI-NEXT: cmlt v2.4s, v2.4s, #0
; CHECK-GI-NEXT: bif v0.16b, v4.16b, v2.16b
; CHECK-GI-NEXT: bif v1.16b, v3.16b, v2.16b
; CHECK-GI-NEXT: ret
@@ -546,13 +531,13 @@ define <16 x i32> @combine_vec_sdiv_by_pow2b_v16i32(<16 x i32> %x) {
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: mov w8, #1 // =0x1
; CHECK-GI-NEXT: mov w9, #0 // =0x0
-; CHECK-GI-NEXT: sshr v6.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v6.4s, v0.4s, #0
; CHECK-GI-NEXT: fmov s4, w8
; CHECK-GI-NEXT: adrp x8, .LCPI20_0
-; CHECK-GI-NEXT: sshr v7.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v7.4s, v1.4s, #0
; CHECK-GI-NEXT: ldr q5, [x8, :lo12:.LCPI20_0]
-; CHECK-GI-NEXT: sshr v16.4s, v2.4s, #31
-; CHECK-GI-NEXT: sshr v17.4s, v3.4s, #31
+; CHECK-GI-NEXT: cmlt v16.4s, v2.4s, #0
+; CHECK-GI-NEXT: cmlt v17.4s, v3.4s, #0
; CHECK-GI-NEXT: adrp x8, .LCPI20_1
; CHECK-GI-NEXT: mov v4.h[1], w9
; CHECK-GI-NEXT: neg v5.4s, v5.4s
@@ -574,7 +559,7 @@ define <16 x i32> @combine_vec_sdiv_by_pow2b_v16i32(<16 x i32> %x) {
; CHECK-GI-NEXT: sshl v5.4s, v5.4s, v17.4s
; CHECK-GI-NEXT: ushll v4.4s, v4.4h, #0
; CHECK-GI-NEXT: shl v4.4s, v4.4s, #31
-; CHECK-GI-NEXT: sshr v4.4s, v4.4s, #31
+; CHECK-GI-NEXT: cmlt v4.4s, v4.4s, #0
; CHECK-GI-NEXT: bif v0.16b, v6.16b, v4.16b
; CHECK-GI-NEXT: bif v1.16b, v7.16b, v4.16b
; CHECK-GI-NEXT: bif v2.16b, v16.16b, v4.16b
@@ -603,7 +588,7 @@ define <2 x i64> @combine_vec_sdiv_by_pow2b_v2i64(<2 x i64> %x) {
; CHECK-GI-LABEL: combine_vec_sdiv_by_pow2b_v2i64:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: adrp x8, .LCPI21_1
-; CHECK-GI-NEXT: sshr v2.2d, v0.2d, #63
+; CHECK-GI-NEXT: cmlt v2.2d, v0.2d, #0
; CHECK-GI-NEXT: adrp x9, .LCPI21_0
; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI21_1]
; CHECK-GI-NEXT: adrp x8, .LCPI21_2
@@ -615,7 +600,7 @@ define <2 x i64> @combine_vec_sdiv_by_pow2b_v2i64(<2 x i64> %x) {
; CHECK-GI-NEXT: neg v2.2d, v2.2d
; CHECK-GI-NEXT: add v1.2d, v0.2d, v1.2d
; CHECK-GI-NEXT: sshl v1.2d, v1.2d, v2.2d
-; CHECK-GI-NEXT: sshr v2.2d, v3.2d, #63
+; CHECK-GI-NEXT: cmlt v2.2d, v3.2d, #0
; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b
; CHECK-GI-NEXT: ret
%1 = sdiv <2 x i64> %x, <i64 1, i64 4>
@@ -649,7 +634,7 @@ define <4 x i64> @combine_vec_sdiv_by_pow2b_v4i64(<4 x i64> %x) {
; CHECK-GI-LABEL: combine_vec_sdiv_by_pow2b_v4i64:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: adrp x8, .LCPI22_2
-; CHECK-GI-NEXT: sshr v3.2d, v0.2d, #63
+; CHECK-GI-NEXT: cmlt v3.2d, v0.2d, #0
; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI22_2]
; CHECK-GI-NEXT: adrp x8, .LCPI22_1
; CHECK-GI-NEXT: ldr q4, [x8, :lo12:.LCPI22_1]
@@ -662,13 +647,13 @@ define <4 x i64> @combine_vec_sdiv_by_pow2b_v4i64(<4 x i64> %x) {
; CHECK-GI-NEXT: adrp x8, .LCPI22_3
; CHECK-GI-NEXT: neg v5.2d, v5.2d
; CHECK-GI-NEXT: ushl v2.2d, v3.2d, v2.2d
-; CHECK-GI-NEXT: sshr v3.2d, v1.2d, #63
+; CHECK-GI-NEXT: cmlt v3.2d, v1.2d, #0
; CHECK-GI-NEXT: shl v6.2d, v6.2d, #63
; CHECK-GI-NEXT: add v2.2d, v0.2d, v2.2d
; CHECK-GI-NEXT: ushl v3.2d, v3.2d, v4.2d
; CHECK-GI-NEXT: ldr q4, [x8, :lo12:.LCPI22_3]
; CHECK-GI-NEXT: sshl v2.2d, v2.2d, v5.2d
-; CHECK-GI-NEXT: sshr v5.2d, v6.2d, #63
+; CHECK-GI-NEXT: cmlt v5.2d, v6.2d, #0
; CHECK-GI-NEXT: add v1.2d, v1.2d, v3.2d
; CHECK-GI-NEXT: neg v3.2d, v4.2d
; CHECK-GI-NEXT: bif v0.16b, v2.16b, v5.16b
@@ -715,13 +700,13 @@ define <8 x i64> @combine_vec_sdiv_by_pow2b_v8i64(<8 x i64> %x) {
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: mov w8, #1 // =0x1
; CHECK-GI-NEXT: mov w9, #0 // =0x0
-; CHECK-GI-NEXT: sshr v7.2d, v0.2d, #63
+; CHECK-GI-NEXT: cmlt v7.2d, v0.2d, #0
; CHECK-GI-NEXT: fmov s4, w8
; CHECK-GI-NEXT: adrp x8, .LCPI23_1
-; CHECK-GI-NEXT: sshr v16.2d, v1.2d, #63
+; CHECK-GI-NEXT: cmlt v16.2d, v1.2d, #0
; CHECK-GI-NEXT: ldr q5, [x8, :lo12:.LCPI23_1]
-; CHECK-GI-NEXT: sshr v17.2d, v2.2d, #63
-; CHECK-GI-NEXT: sshr v18.2d, v3.2d, #63
+; CHECK-GI-NEXT: cmlt v17.2d, v2.2d, #0
+; CHECK-GI-NEXT: cmlt v18.2d, v3.2d, #0
; CHECK-GI-NEXT: adrp x8, .LCPI23_3
; CHECK-GI-NEXT: mov v4.h[1], w9
; CHECK-GI-NEXT: neg v5.2d, v5.2d
@@ -754,9 +739,9 @@ define <8 x i64> @combine_vec_sdiv_by_pow2b_v8i64(<8 x i64> %x) {
; CHECK-GI-NEXT: shl v4.2d, v4.2d, #63
; CHECK-GI-NEXT: sshl v16.2d, v16.2d, v20.2d
; CHECK-GI-NEXT: sshl v6.2d, v6.2d, v20.2d
-; CHECK-GI-NEXT: sshr v17.2d, v17.2d, #63
-; CHECK-GI-NEXT: sshr v18.2d, v18.2d, #63
-; CHECK-GI-NEXT: sshr v4.2d, v4.2d, #63
+; CHECK-GI-NEXT: cmlt v17.2d, v17.2d, #0
+; CHECK-GI-NEXT: cmlt v18.2d, v18.2d, #0
+; CHECK-GI-NEXT: cmlt v4.2d, v4.2d, #0
; CHECK-GI-NEXT: bif v0.16b, v7.16b, v17.16b
; CHECK-GI-NEXT: bif v1.16b, v16.16b, v18.16b
; CHECK-GI-NEXT: bif v2.16b, v5.16b, v4.16b
@@ -792,7 +777,7 @@ define <4 x i32> @combine_vec_sdiv_by_pow2b_PosAndNeg(<4 x i32> %x) {
; CHECK-GI-NEXT: adrp x10, .LCPI24_0
; CHECK-GI-NEXT: fmov s1, w8
; CHECK-GI-NEXT: ldr q2, [x10, :lo12:.LCPI24_0]
-; CHECK-GI-NEXT: sshr v3.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v3.4s, v0.4s, #0
; CHECK-GI-NEXT: fmov s4, w9
; CHECK-GI-NEXT: adrp x10, .LCPI24_1
; CHECK-GI-NEXT: neg v2.4s, v2.4s
@@ -807,10 +792,10 @@ define <4 x i32> @combine_vec_sdiv_by_pow2b_PosAndNeg(<4 x i32> %x) {
; CHECK-GI-NEXT: mov v1.s[3], w9
; CHECK-GI-NEXT: sshl v2.4s, v2.4s, v3.4s
; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: bif v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: shl v1.4s, v4.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: neg v2.4s, v0.4s
; CHECK-GI-NEXT: bit v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: ret
@@ -871,7 +856,7 @@ define <16 x i8> @non_splat_minus_one_divisor_0(<16 x i8> %A) {
; CHECK-GI-NEXT: neg v2.16b, v0.16b
; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI25_0]
; CHECK-GI-NEXT: shl v1.16b, v1.16b, #7
-; CHECK-GI-NEXT: sshr v1.16b, v1.16b, #7
+; CHECK-GI-NEXT: cmlt v1.16b, v1.16b, #0
; CHECK-GI-NEXT: bit v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: ret
%div = sdiv <16 x i8> %A, <i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -901,7 +886,7 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) {
; CHECK-GI-LABEL: non_splat_minus_one_divisor_1:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: adrp x8, .LCPI26_2
-; CHECK-GI-NEXT: sshr v2.16b, v0.16b, #7
+; CHECK-GI-NEXT: cmlt v2.16b, v0.16b, #0
; CHECK-GI-NEXT: adrp x9, .LCPI26_1
; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI26_2]
; CHECK-GI-NEXT: adrp x8, .LCPI26_3
@@ -914,11 +899,11 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) {
; CHECK-GI-NEXT: neg v2.16b, v2.16b
; CHECK-GI-NEXT: add v1.16b, v0.16b, v1.16b
; CHECK-GI-NEXT: sshl v1.16b, v1.16b, v2.16b
-; CHECK-GI-NEXT: sshr v2.16b, v3.16b, #7
+; CHECK-GI-NEXT: cmlt v2.16b, v3.16b, #0
; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI26_0]
; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b
; CHECK-GI-NEXT: shl v1.16b, v3.16b, #7
-; CHECK-GI-NEXT: sshr v1.16b, v1.16b, #7
+; CHECK-GI-NEXT: cmlt v1.16b, v1.16b, #0
; CHECK-GI-NEXT: neg v2.16b, v0.16b
; CHECK-GI-NEXT: bit v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: ret
@@ -954,7 +939,7 @@ define <4 x i32> @non_splat_minus_one_divisor_2(<4 x i32> %A) {
; CHECK-GI-NEXT: fmov s1, w8
; CHECK-GI-NEXT: ldr q2, [x9, :lo12:.LCPI27_0]
; CHECK-GI-NEXT: fmov s4, w8
-; CHECK-GI-NEXT: sshr v3.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v3.4s, v0.4s, #0
; CHECK-GI-NEXT: adrp x9, .LCPI27_1
; CHECK-GI-NEXT: neg v2.4s, v2.4s
; CHECK-GI-NEXT: mov v1.s[1], w8
@@ -969,10 +954,10 @@ define <4 x i32> @non_splat_minus_one_divisor_2(<4 x i32> %A) {
; CHECK-GI-NEXT: sshl v2.4s, v2.4s, v3.4s
; CHECK-GI-NEXT: mov v4.s[3], w8
; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: bif v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: shl v1.4s, v4.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: neg v2.4s, v0.4s
; CHECK-GI-NEXT: bit v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: ret
@@ -1207,7 +1192,7 @@ define <8 x i16> @combine_vec_sdiv_nonuniform7(<8 x i16> %x) {
; CHECK-GI-NEXT: ldr d1, [x8, :lo12:.LCPI34_0]
; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
; CHECK-GI-NEXT: shl v1.8h, v1.8h, #15
-; CHECK-GI-NEXT: sshr v1.8h, v1.8h, #15
+; CHECK-GI-NEXT: cmlt v1.8h, v1.8h, #0
; CHECK-GI-NEXT: bit v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: ret
%1 = sdiv <8 x i16> %x, <i16 -1, i16 -1, i16 -1, i16 -1, i16 1, i16 1, i16 1, i16 1>
diff --git a/llvm/test/CodeGen/AArch64/extract-vector-elt.ll b/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
index 121cc30..babb4ed 100644
--- a/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
+++ b/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
@@ -605,7 +605,7 @@ define i32 @extract_v4i32_select(<4 x i32> %a, <4 x i32> %b, i32 %c, <4 x i1> %c
; CHECK-GI-NEXT: mov w8, w0
; CHECK-GI-NEXT: and x8, x8, #0x3
; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: bif v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: str q0, [sp]
; CHECK-GI-NEXT: ldr w0, [x9, x8, lsl #2]
@@ -634,7 +634,7 @@ define i32 @extract_v4i32_select_const(<4 x i32> %a, <4 x i32> %b, i32 %c, <4 x
; CHECK-GI-NEXT: adrp x8, .LCPI23_0
; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI23_0]
; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: bif v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: mov s0, v0.s[2]
; CHECK-GI-NEXT: fmov w0, s0
diff --git a/llvm/test/CodeGen/AArch64/fcmp.ll b/llvm/test/CodeGen/AArch64/fcmp.ll
index 6d673f1..30fb82e 100644
--- a/llvm/test/CodeGen/AArch64/fcmp.ll
+++ b/llvm/test/CodeGen/AArch64/fcmp.ll
@@ -661,7 +661,7 @@ define <2 x double> @v2f128_double(<2 x fp128> %a, <2 x fp128> %b, <2 x double>
; CHECK-GI-NEXT: ldp x30, x19, [sp, #64] // 16-byte Folded Reload
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: shl v0.2d, v0.2d, #63
-; CHECK-GI-NEXT: sshr v0.2d, v0.2d, #63
+; CHECK-GI-NEXT: cmlt v0.2d, v0.2d, #0
; CHECK-GI-NEXT: bsl v0.16b, v1.16b, v2.16b
; CHECK-GI-NEXT: add sp, sp, #80
; CHECK-GI-NEXT: ret
@@ -1540,7 +1540,7 @@ define <7 x i32> @v7f16_i32(<7 x half> %a, <7 x half> %b, <7 x i32> %d, <7 x i32
; CHECK-GI-FP16-NEXT: shl v0.4s, v0.4s, #31
; CHECK-GI-FP16-NEXT: mov v1.s[2], w8
; CHECK-GI-FP16-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-GI-FP16-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-FP16-NEXT: cmlt v0.4s, v0.4s, #0
; CHECK-GI-FP16-NEXT: fmov s4, w8
; CHECK-GI-FP16-NEXT: mov v4.s[1], w8
; CHECK-GI-FP16-NEXT: ushl v1.4s, v1.4s, v2.4s
@@ -1602,7 +1602,7 @@ define <4 x i32> @v4f16_i32(<4 x half> %a, <4 x half> %b, <4 x i32> %d, <4 x i32
; CHECK-GI-FP16-NEXT: fcmgt v0.4h, v1.4h, v0.4h
; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: shl v0.4s, v0.4s, #31
-; CHECK-GI-FP16-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-FP16-NEXT: cmlt v0.4s, v0.4s, #0
; CHECK-GI-FP16-NEXT: bsl v0.16b, v2.16b, v3.16b
; CHECK-GI-FP16-NEXT: ret
entry:
@@ -1657,8 +1657,8 @@ define <8 x i32> @v8f16_i32(<8 x half> %a, <8 x half> %b, <8 x i32> %d, <8 x i32
; CHECK-GI-FP16-NEXT: ushll2 v0.4s, v0.8h, #0
; CHECK-GI-FP16-NEXT: shl v1.4s, v1.4s, #31
; CHECK-GI-FP16-NEXT: shl v0.4s, v0.4s, #31
-; CHECK-GI-FP16-NEXT: sshr v1.4s, v1.4s, #31
-; CHECK-GI-FP16-NEXT: sshr v6.4s, v0.4s, #31
+; CHECK-GI-FP16-NEXT: cmlt v1.4s, v1.4s, #0
+; CHECK-GI-FP16-NEXT: cmlt v6.4s, v0.4s, #0
; CHECK-GI-FP16-NEXT: mov v0.16b, v1.16b
; CHECK-GI-FP16-NEXT: mov v1.16b, v6.16b
; CHECK-GI-FP16-NEXT: bsl v0.16b, v2.16b, v4.16b
@@ -1748,10 +1748,10 @@ define <16 x i32> @v16f16_i32(<16 x half> %a, <16 x half> %b, <16 x i32> %d, <16
; CHECK-GI-FP16-NEXT: shl v0.4s, v0.4s, #31
; CHECK-GI-FP16-NEXT: shl v3.4s, v3.4s, #31
; CHECK-GI-FP16-NEXT: shl v1.4s, v1.4s, #31
-; CHECK-GI-FP16-NEXT: sshr v2.4s, v2.4s, #31
-; CHECK-GI-FP16-NEXT: sshr v16.4s, v0.4s, #31
-; CHECK-GI-FP16-NEXT: sshr v3.4s, v3.4s, #31
-; CHECK-GI-FP16-NEXT: sshr v17.4s, v1.4s, #31
+; CHECK-GI-FP16-NEXT: cmlt v2.4s, v2.4s, #0
+; CHECK-GI-FP16-NEXT: cmlt v16.4s, v0.4s, #0
+; CHECK-GI-FP16-NEXT: cmlt v3.4s, v3.4s, #0
+; CHECK-GI-FP16-NEXT: cmlt v17.4s, v1.4s, #0
; CHECK-GI-FP16-NEXT: ldp q0, q1, [sp]
; CHECK-GI-FP16-NEXT: bit v0.16b, v4.16b, v2.16b
; CHECK-GI-FP16-NEXT: mov v2.16b, v3.16b
diff --git a/llvm/test/CodeGen/AArch64/fpclamptosat.ll b/llvm/test/CodeGen/AArch64/fpclamptosat.ll
index 00de153..24be923 100644
--- a/llvm/test/CodeGen/AArch64/fpclamptosat.ll
+++ b/llvm/test/CodeGen/AArch64/fpclamptosat.ll
@@ -111,14 +111,14 @@ entry:
ret i32 %conv6
}
-define i32 @utesth_f16i32(half %x) {
-; CHECK-CVT-LABEL: utesth_f16i32:
+define i32 @utest_f16i32(half %x) {
+; CHECK-CVT-LABEL: utest_f16i32:
; CHECK-CVT: // %bb.0: // %entry
; CHECK-CVT-NEXT: fcvt s0, h0
; CHECK-CVT-NEXT: fcvtzu w0, s0
; CHECK-CVT-NEXT: ret
;
-; CHECK-FP16-LABEL: utesth_f16i32:
+; CHECK-FP16-LABEL: utest_f16i32:
; CHECK-FP16: // %bb.0: // %entry
; CHECK-FP16-NEXT: fcvtzu w0, h0
; CHECK-FP16-NEXT: ret
@@ -298,8 +298,8 @@ entry:
ret i16 %conv6
}
-define i16 @utesth_f16i16(half %x) {
-; CHECK-CVT-LABEL: utesth_f16i16:
+define i16 @utest_f16i16(half %x) {
+; CHECK-CVT-LABEL: utest_f16i16:
; CHECK-CVT: // %bb.0: // %entry
; CHECK-CVT-NEXT: fcvt s0, h0
; CHECK-CVT-NEXT: mov w9, #65535 // =0xffff
@@ -308,7 +308,7 @@ define i16 @utesth_f16i16(half %x) {
; CHECK-CVT-NEXT: csel w0, w8, w9, lo
; CHECK-CVT-NEXT: ret
;
-; CHECK-FP16-LABEL: utesth_f16i16:
+; CHECK-FP16-LABEL: utest_f16i16:
; CHECK-FP16: // %bb.0: // %entry
; CHECK-FP16-NEXT: fcvtzu w8, h0
; CHECK-FP16-NEXT: mov w9, #65535 // =0xffff
@@ -493,8 +493,8 @@ entry:
ret i64 %conv6
}
-define i64 @utesth_f16i64(half %x) {
-; CHECK-LABEL: utesth_f16i64:
+define i64 @utest_f16i64(half %x) {
+; CHECK-LABEL: utest_f16i64:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
@@ -636,14 +636,14 @@ entry:
ret i32 %conv6
}
-define i32 @utesth_f16i32_mm(half %x) {
-; CHECK-CVT-LABEL: utesth_f16i32_mm:
+define i32 @utest_f16i32_mm(half %x) {
+; CHECK-CVT-LABEL: utest_f16i32_mm:
; CHECK-CVT: // %bb.0: // %entry
; CHECK-CVT-NEXT: fcvt s0, h0
; CHECK-CVT-NEXT: fcvtzu w0, s0
; CHECK-CVT-NEXT: ret
;
-; CHECK-FP16-LABEL: utesth_f16i32_mm:
+; CHECK-FP16-LABEL: utest_f16i32_mm:
; CHECK-FP16: // %bb.0: // %entry
; CHECK-FP16-NEXT: fcvtzu w0, h0
; CHECK-FP16-NEXT: ret
@@ -808,8 +808,8 @@ entry:
ret i16 %conv6
}
-define i16 @utesth_f16i16_mm(half %x) {
-; CHECK-CVT-LABEL: utesth_f16i16_mm:
+define i16 @utest_f16i16_mm(half %x) {
+; CHECK-CVT-LABEL: utest_f16i16_mm:
; CHECK-CVT: // %bb.0: // %entry
; CHECK-CVT-NEXT: fcvt s0, h0
; CHECK-CVT-NEXT: mov w9, #65535 // =0xffff
@@ -818,7 +818,7 @@ define i16 @utesth_f16i16_mm(half %x) {
; CHECK-CVT-NEXT: csel w0, w8, w9, lo
; CHECK-CVT-NEXT: ret
;
-; CHECK-FP16-LABEL: utesth_f16i16_mm:
+; CHECK-FP16-LABEL: utest_f16i16_mm:
; CHECK-FP16: // %bb.0: // %entry
; CHECK-FP16-NEXT: fcvtzu w8, h0
; CHECK-FP16-NEXT: mov w9, #65535 // =0xffff
@@ -986,8 +986,8 @@ entry:
ret i64 %conv6
}
-define i64 @utesth_f16i64_mm(half %x) {
-; CHECK-LABEL: utesth_f16i64_mm:
+define i64 @utest_f16i64_mm(half %x) {
+; CHECK-LABEL: utest_f16i64_mm:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
@@ -1026,6 +1026,29 @@ entry:
ret i64 %conv6
}
+; i32 non saturate
+
+define i32 @ustest_f16i32_nsat(half %x) {
+; CHECK-CVT-LABEL: ustest_f16i32_nsat:
+; CHECK-CVT: // %bb.0:
+; CHECK-CVT-NEXT: fcvt s0, h0
+; CHECK-CVT-NEXT: fcvtzs w8, s0
+; CHECK-CVT-NEXT: and w8, w8, w8, asr #31
+; CHECK-CVT-NEXT: bic w0, w8, w8, asr #31
+; CHECK-CVT-NEXT: ret
+;
+; CHECK-FP16-LABEL: ustest_f16i32_nsat:
+; CHECK-FP16: // %bb.0:
+; CHECK-FP16-NEXT: fcvtzs w8, h0
+; CHECK-FP16-NEXT: and w8, w8, w8, asr #31
+; CHECK-FP16-NEXT: bic w0, w8, w8, asr #31
+; CHECK-FP16-NEXT: ret
+ %conv = fptosi half %x to i32
+ %spec.store.select = call i32 @llvm.smin.i32(i32 0, i32 %conv)
+ %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0)
+ ret i32 %spec.store.select7
+}
+
declare i32 @llvm.smin.i32(i32, i32)
declare i32 @llvm.smax.i32(i32, i32)
declare i32 @llvm.umin.i32(i32, i32)
diff --git a/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll b/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
index b09a867..637c028 100644
--- a/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
@@ -321,20 +321,20 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32(<4 x half> %x) {
-; CHECK-CVT-SD-LABEL: utesth_f16i32:
+define <4 x i32> @utest_f16i32(<4 x half> %x) {
+; CHECK-CVT-SD-LABEL: utest_f16i32:
; CHECK-CVT-SD: // %bb.0: // %entry
; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
; CHECK-CVT-SD-NEXT: fcvtzu v0.4s, v0.4s
; CHECK-CVT-SD-NEXT: ret
;
-; CHECK-FP16-SD-LABEL: utesth_f16i32:
+; CHECK-FP16-SD-LABEL: utest_f16i32:
; CHECK-FP16-SD: // %bb.0: // %entry
; CHECK-FP16-SD-NEXT: fcvtl v0.4s, v0.4h
; CHECK-FP16-SD-NEXT: fcvtzu v0.4s, v0.4s
; CHECK-FP16-SD-NEXT: ret
;
-; CHECK-CVT-GI-LABEL: utesth_f16i32:
+; CHECK-CVT-GI-LABEL: utest_f16i32:
; CHECK-CVT-GI: // %bb.0: // %entry
; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
; CHECK-CVT-GI-NEXT: movi v1.2d, #0x000000ffffffff
@@ -349,7 +349,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
; CHECK-CVT-GI-NEXT: uzp1 v0.4s, v2.4s, v0.4s
; CHECK-CVT-GI-NEXT: ret
;
-; CHECK-FP16-GI-LABEL: utesth_f16i32:
+; CHECK-FP16-GI-LABEL: utest_f16i32:
; CHECK-FP16-GI: // %bb.0: // %entry
; CHECK-FP16-GI-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-FP16-GI-NEXT: mov h2, v0.h[1]
@@ -614,8 +614,8 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16(<8 x half> %x) {
-; CHECK-CVT-LABEL: utesth_f16i16:
+define <8 x i16> @utest_f16i16(<8 x half> %x) {
+; CHECK-CVT-LABEL: utest_f16i16:
; CHECK-CVT: // %bb.0: // %entry
; CHECK-CVT-NEXT: fcvtl v1.4s, v0.4h
; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h
@@ -625,12 +625,12 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-CVT-NEXT: uqxtn2 v0.8h, v2.4s
; CHECK-CVT-NEXT: ret
;
-; CHECK-FP16-SD-LABEL: utesth_f16i16:
+; CHECK-FP16-SD-LABEL: utest_f16i16:
; CHECK-FP16-SD: // %bb.0: // %entry
; CHECK-FP16-SD-NEXT: fcvtzu v0.8h, v0.8h
; CHECK-FP16-SD-NEXT: ret
;
-; CHECK-FP16-GI-LABEL: utesth_f16i16:
+; CHECK-FP16-GI-LABEL: utest_f16i16:
; CHECK-FP16-GI: // %bb.0: // %entry
; CHECK-FP16-GI-NEXT: fcvtl v1.4s, v0.4h
; CHECK-FP16-GI-NEXT: fcvtl2 v0.4s, v0.8h
@@ -1746,8 +1746,8 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64(<2 x half> %x) {
-; CHECK-CVT-SD-LABEL: utesth_f16i64:
+define <2 x i64> @utest_f16i64(<2 x half> %x) {
+; CHECK-CVT-SD-LABEL: utest_f16i64:
; CHECK-CVT-SD: // %bb.0: // %entry
; CHECK-CVT-SD-NEXT: sub sp, sp, #48
; CHECK-CVT-SD-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
@@ -1777,7 +1777,7 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) {
; CHECK-CVT-SD-NEXT: add sp, sp, #48
; CHECK-CVT-SD-NEXT: ret
;
-; CHECK-FP16-SD-LABEL: utesth_f16i64:
+; CHECK-FP16-SD-LABEL: utest_f16i64:
; CHECK-FP16-SD: // %bb.0: // %entry
; CHECK-FP16-SD-NEXT: sub sp, sp, #48
; CHECK-FP16-SD-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
@@ -1807,7 +1807,7 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) {
; CHECK-FP16-SD-NEXT: add sp, sp, #48
; CHECK-FP16-SD-NEXT: ret
;
-; CHECK-CVT-GI-LABEL: utesth_f16i64:
+; CHECK-CVT-GI-LABEL: utest_f16i64:
; CHECK-CVT-GI: // %bb.0: // %entry
; CHECK-CVT-GI-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-CVT-GI-NEXT: mov h1, v0.h[1]
@@ -1819,7 +1819,7 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) {
; CHECK-CVT-GI-NEXT: mov v0.d[1], x9
; CHECK-CVT-GI-NEXT: ret
;
-; CHECK-FP16-GI-LABEL: utesth_f16i64:
+; CHECK-FP16-GI-LABEL: utest_f16i64:
; CHECK-FP16-GI: // %bb.0: // %entry
; CHECK-FP16-GI-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-FP16-GI-NEXT: mov h1, v0.h[1]
@@ -2307,20 +2307,20 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
-; CHECK-CVT-SD-LABEL: utesth_f16i32_mm:
+define <4 x i32> @utest_f16i32_mm(<4 x half> %x) {
+; CHECK-CVT-SD-LABEL: utest_f16i32_mm:
; CHECK-CVT-SD: // %bb.0: // %entry
; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
; CHECK-CVT-SD-NEXT: fcvtzu v0.4s, v0.4s
; CHECK-CVT-SD-NEXT: ret
;
-; CHECK-FP16-SD-LABEL: utesth_f16i32_mm:
+; CHECK-FP16-SD-LABEL: utest_f16i32_mm:
; CHECK-FP16-SD: // %bb.0: // %entry
; CHECK-FP16-SD-NEXT: fcvtl v0.4s, v0.4h
; CHECK-FP16-SD-NEXT: fcvtzu v0.4s, v0.4s
; CHECK-FP16-SD-NEXT: ret
;
-; CHECK-CVT-GI-LABEL: utesth_f16i32_mm:
+; CHECK-CVT-GI-LABEL: utest_f16i32_mm:
; CHECK-CVT-GI: // %bb.0: // %entry
; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
; CHECK-CVT-GI-NEXT: movi v1.2d, #0x000000ffffffff
@@ -2335,7 +2335,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
; CHECK-CVT-GI-NEXT: uzp1 v0.4s, v2.4s, v0.4s
; CHECK-CVT-GI-NEXT: ret
;
-; CHECK-FP16-GI-LABEL: utesth_f16i32_mm:
+; CHECK-FP16-GI-LABEL: utest_f16i32_mm:
; CHECK-FP16-GI: // %bb.0: // %entry
; CHECK-FP16-GI-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-FP16-GI-NEXT: mov h2, v0.h[1]
@@ -2585,8 +2585,8 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
-; CHECK-CVT-LABEL: utesth_f16i16_mm:
+define <8 x i16> @utest_f16i16_mm(<8 x half> %x) {
+; CHECK-CVT-LABEL: utest_f16i16_mm:
; CHECK-CVT: // %bb.0: // %entry
; CHECK-CVT-NEXT: fcvtl v1.4s, v0.4h
; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h
@@ -2596,12 +2596,12 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-CVT-NEXT: uqxtn2 v0.8h, v2.4s
; CHECK-CVT-NEXT: ret
;
-; CHECK-FP16-SD-LABEL: utesth_f16i16_mm:
+; CHECK-FP16-SD-LABEL: utest_f16i16_mm:
; CHECK-FP16-SD: // %bb.0: // %entry
; CHECK-FP16-SD-NEXT: fcvtzu v0.8h, v0.8h
; CHECK-FP16-SD-NEXT: ret
;
-; CHECK-FP16-GI-LABEL: utesth_f16i16_mm:
+; CHECK-FP16-GI-LABEL: utest_f16i16_mm:
; CHECK-FP16-GI: // %bb.0: // %entry
; CHECK-FP16-GI-NEXT: fcvtl v1.4s, v0.4h
; CHECK-FP16-GI-NEXT: fcvtl2 v0.4s, v0.8h
@@ -3694,8 +3694,8 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
-; CHECK-CVT-SD-LABEL: utesth_f16i64_mm:
+define <2 x i64> @utest_f16i64_mm(<2 x half> %x) {
+; CHECK-CVT-SD-LABEL: utest_f16i64_mm:
; CHECK-CVT-SD: // %bb.0: // %entry
; CHECK-CVT-SD-NEXT: sub sp, sp, #48
; CHECK-CVT-SD-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
@@ -3725,7 +3725,7 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
; CHECK-CVT-SD-NEXT: add sp, sp, #48
; CHECK-CVT-SD-NEXT: ret
;
-; CHECK-FP16-SD-LABEL: utesth_f16i64_mm:
+; CHECK-FP16-SD-LABEL: utest_f16i64_mm:
; CHECK-FP16-SD: // %bb.0: // %entry
; CHECK-FP16-SD-NEXT: sub sp, sp, #48
; CHECK-FP16-SD-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
@@ -3755,7 +3755,7 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
; CHECK-FP16-SD-NEXT: add sp, sp, #48
; CHECK-FP16-SD-NEXT: ret
;
-; CHECK-CVT-GI-LABEL: utesth_f16i64_mm:
+; CHECK-CVT-GI-LABEL: utest_f16i64_mm:
; CHECK-CVT-GI: // %bb.0: // %entry
; CHECK-CVT-GI-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-CVT-GI-NEXT: mov h1, v0.h[1]
@@ -3767,7 +3767,7 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
; CHECK-CVT-GI-NEXT: mov v0.d[1], x9
; CHECK-CVT-GI-NEXT: ret
;
-; CHECK-FP16-GI-LABEL: utesth_f16i64_mm:
+; CHECK-FP16-GI-LABEL: utest_f16i64_mm:
; CHECK-FP16-GI: // %bb.0: // %entry
; CHECK-FP16-GI-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-FP16-GI-NEXT: mov h1, v0.h[1]
@@ -3941,6 +3941,51 @@ entry:
ret <2 x i64> %conv6
}
+; i32 non saturate
+
+define <4 x i32> @ustest_f16i32_nsat(<4 x half> %x) {
+; CHECK-CVT-SD-LABEL: ustest_f16i32_nsat:
+; CHECK-CVT-SD: // %bb.0: // %entry
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: movi v1.2d, #0000000000000000
+; CHECK-CVT-SD-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-CVT-SD-NEXT: smin v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: smax v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: ret
+;
+; CHECK-FP16-SD-LABEL: ustest_f16i32_nsat:
+; CHECK-FP16-SD: // %bb.0: // %entry
+; CHECK-FP16-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-FP16-SD-NEXT: movi v1.2d, #0000000000000000
+; CHECK-FP16-SD-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-FP16-SD-NEXT: smin v0.4s, v0.4s, v1.4s
+; CHECK-FP16-SD-NEXT: smax v0.4s, v0.4s, v1.4s
+; CHECK-FP16-SD-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: ustest_f16i32_nsat:
+; CHECK-CVT-GI: // %bb.0: // %entry
+; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-GI-NEXT: movi v1.2d, #0000000000000000
+; CHECK-CVT-GI-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-CVT-GI-NEXT: smin v0.4s, v1.4s, v0.4s
+; CHECK-CVT-GI-NEXT: smax v0.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: ret
+;
+; CHECK-FP16-GI-LABEL: ustest_f16i32_nsat:
+; CHECK-FP16-GI: // %bb.0: // %entry
+; CHECK-FP16-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-FP16-GI-NEXT: movi v1.2d, #0000000000000000
+; CHECK-FP16-GI-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-FP16-GI-NEXT: smin v0.4s, v1.4s, v0.4s
+; CHECK-FP16-GI-NEXT: smax v0.4s, v0.4s, v1.4s
+; CHECK-FP16-GI-NEXT: ret
+entry:
+ %conv = fptosi <4 x half> %x to <4 x i32>
+ %spec.store.select = call <4 x i32> @llvm.smin.v4i32(<4 x i32> zeroinitializer, <4 x i32> %conv)
+ %spec.store.select7 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %spec.store.select, <4 x i32> zeroinitializer)
+ ret <4 x i32> %spec.store.select7
+}
+
declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>)
diff --git a/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll b/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
index 0c84468f..2026959 100644
--- a/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
@@ -1110,7 +1110,7 @@ define <8 x i8> @vselect_constant_cond_zero_v8i8(<8 x i8> %a) {
; CHECK-GI-NEXT: adrp x8, .LCPI83_0
; CHECK-GI-NEXT: ldr d1, [x8, :lo12:.LCPI83_0]
; CHECK-GI-NEXT: shl v1.8b, v1.8b, #7
-; CHECK-GI-NEXT: sshr v1.8b, v1.8b, #7
+; CHECK-GI-NEXT: cmlt v1.8b, v1.8b, #0
; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b
; CHECK-GI-NEXT: ret
%b = select <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false>, <8 x i8> %a, <8 x i8> zeroinitializer
@@ -1133,7 +1133,7 @@ define <4 x i16> @vselect_constant_cond_zero_v4i16(<4 x i16> %a) {
; CHECK-GI-NEXT: mov v1.h[2], w9
; CHECK-GI-NEXT: mov v1.h[3], w8
; CHECK-GI-NEXT: shl v1.4h, v1.4h, #15
-; CHECK-GI-NEXT: sshr v1.4h, v1.4h, #15
+; CHECK-GI-NEXT: cmlt v1.4h, v1.4h, #0
; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b
; CHECK-GI-NEXT: ret
%b = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i16> %a, <4 x i16> zeroinitializer
@@ -1157,7 +1157,7 @@ define <4 x i32> @vselect_constant_cond_zero_v4i32(<4 x i32> %a) {
; CHECK-GI-NEXT: mov v1.s[2], w9
; CHECK-GI-NEXT: mov v1.s[3], w8
; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-GI-NEXT: ret
%b = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> %a, <4 x i32> zeroinitializer
@@ -1176,7 +1176,7 @@ define <8 x i8> @vselect_constant_cond_v8i8(<8 x i8> %a, <8 x i8> %b) {
; CHECK-GI-NEXT: adrp x8, .LCPI86_0
; CHECK-GI-NEXT: ldr d2, [x8, :lo12:.LCPI86_0]
; CHECK-GI-NEXT: shl v2.8b, v2.8b, #7
-; CHECK-GI-NEXT: sshr v2.8b, v2.8b, #7
+; CHECK-GI-NEXT: cmlt v2.8b, v2.8b, #0
; CHECK-GI-NEXT: bif v0.8b, v1.8b, v2.8b
; CHECK-GI-NEXT: ret
%c = select <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false>, <8 x i8> %a, <8 x i8> %b
@@ -1199,7 +1199,7 @@ define <4 x i16> @vselect_constant_cond_v4i16(<4 x i16> %a, <4 x i16> %b) {
; CHECK-GI-NEXT: mov v2.h[2], w9
; CHECK-GI-NEXT: mov v2.h[3], w8
; CHECK-GI-NEXT: shl v2.4h, v2.4h, #15
-; CHECK-GI-NEXT: sshr v2.4h, v2.4h, #15
+; CHECK-GI-NEXT: cmlt v2.4h, v2.4h, #0
; CHECK-GI-NEXT: bif v0.8b, v1.8b, v2.8b
; CHECK-GI-NEXT: ret
%c = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i16> %a, <4 x i16> %b
@@ -1223,7 +1223,7 @@ define <4 x i32> @vselect_constant_cond_v4i32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-GI-NEXT: mov v2.s[2], w9
; CHECK-GI-NEXT: mov v2.s[3], w8
; CHECK-GI-NEXT: shl v2.4s, v2.4s, #31
-; CHECK-GI-NEXT: sshr v2.4s, v2.4s, #31
+; CHECK-GI-NEXT: cmlt v2.4s, v2.4s, #0
; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b
; CHECK-GI-NEXT: ret
%c = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> %a, <4 x i32> %b
diff --git a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
index fb8b721..11b3b62 100644
--- a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
@@ -966,7 +966,7 @@ define <8 x i8> @cmgez8xi8_alt(<8 x i8> %A) {
;
; CHECK-GI-LABEL: cmgez8xi8_alt:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.8b, v0.8b, #7
+; CHECK-GI-NEXT: cmlt v0.8b, v0.8b, #0
; CHECK-GI-NEXT: mvn v0.8b, v0.8b
; CHECK-GI-NEXT: ret
%sign = ashr <8 x i8> %A, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
@@ -982,7 +982,7 @@ define <16 x i8> @cmgez16xi8_alt(<16 x i8> %A) {
;
; CHECK-GI-LABEL: cmgez16xi8_alt:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.16b, v0.16b, #7
+; CHECK-GI-NEXT: cmlt v0.16b, v0.16b, #0
; CHECK-GI-NEXT: mvn v0.16b, v0.16b
; CHECK-GI-NEXT: ret
%sign = ashr <16 x i8> %A, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
@@ -998,7 +998,7 @@ define <4 x i16> @cmgez4xi16_alt(<4 x i16> %A) {
;
; CHECK-GI-LABEL: cmgez4xi16_alt:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.4h, v0.4h, #15
+; CHECK-GI-NEXT: cmlt v0.4h, v0.4h, #0
; CHECK-GI-NEXT: mvn v0.8b, v0.8b
; CHECK-GI-NEXT: ret
%sign = ashr <4 x i16> %A, <i16 15, i16 15, i16 15, i16 15>
@@ -1014,7 +1014,7 @@ define <8 x i16> @cmgez8xi16_alt(<8 x i16> %A) {
;
; CHECK-GI-LABEL: cmgez8xi16_alt:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.8h, v0.8h, #15
+; CHECK-GI-NEXT: cmlt v0.8h, v0.8h, #0
; CHECK-GI-NEXT: mvn v0.16b, v0.16b
; CHECK-GI-NEXT: ret
%sign = ashr <8 x i16> %A, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
@@ -1030,7 +1030,7 @@ define <2 x i32> @cmgez2xi32_alt(<2 x i32> %A) {
;
; CHECK-GI-LABEL: cmgez2xi32_alt:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.2s, v0.2s, #31
+; CHECK-GI-NEXT: cmlt v0.2s, v0.2s, #0
; CHECK-GI-NEXT: mvn v0.8b, v0.8b
; CHECK-GI-NEXT: ret
%sign = ashr <2 x i32> %A, <i32 31, i32 31>
@@ -1046,7 +1046,7 @@ define <4 x i32> @cmgez4xi32_alt(<4 x i32> %A) {
;
; CHECK-GI-LABEL: cmgez4xi32_alt:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v0.4s, v0.4s, #0
; CHECK-GI-NEXT: mvn v0.16b, v0.16b
; CHECK-GI-NEXT: ret
%sign = ashr <4 x i32> %A, <i32 31, i32 31, i32 31, i32 31>
@@ -1062,7 +1062,7 @@ define <2 x i64> @cmgez2xi64_alt(<2 x i64> %A) {
;
; CHECK-GI-LABEL: cmgez2xi64_alt:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.2d, v0.2d, #63
+; CHECK-GI-NEXT: cmlt v0.2d, v0.2d, #0
; CHECK-GI-NEXT: mvn v0.16b, v0.16b
; CHECK-GI-NEXT: ret
%sign = ashr <2 x i64> %A, <i64 63, i64 63>
@@ -1503,99 +1503,64 @@ entry:
}
define <8 x i8> @cmltz8xi8_alt(<8 x i8> %A) {
-; CHECK-SD-LABEL: cmltz8xi8_alt:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: cmlt v0.8b, v0.8b, #0
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: cmltz8xi8_alt:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.8b, v0.8b, #7
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: cmltz8xi8_alt:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v0.8b, v0.8b, #0
+; CHECK-NEXT: ret
%A.lobit = ashr <8 x i8> %A, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
ret <8 x i8> %A.lobit
}
define <16 x i8> @cmltz16xi8_alt(<16 x i8> %A) {
-; CHECK-SD-LABEL: cmltz16xi8_alt:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: cmlt v0.16b, v0.16b, #0
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: cmltz16xi8_alt:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.16b, v0.16b, #7
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: cmltz16xi8_alt:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v0.16b, v0.16b, #0
+; CHECK-NEXT: ret
%A.lobit = ashr <16 x i8> %A, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
ret <16 x i8> %A.lobit
}
define <4 x i16> @cmltz4xi16_alt(<4 x i16> %A) {
-; CHECK-SD-LABEL: cmltz4xi16_alt:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: cmlt v0.4h, v0.4h, #0
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: cmltz4xi16_alt:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.4h, v0.4h, #15
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: cmltz4xi16_alt:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v0.4h, v0.4h, #0
+; CHECK-NEXT: ret
%A.lobit = ashr <4 x i16> %A, <i16 15, i16 15, i16 15, i16 15>
ret <4 x i16> %A.lobit
}
define <8 x i16> @cmltz8xi16_alt(<8 x i16> %A) {
-; CHECK-SD-LABEL: cmltz8xi16_alt:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: cmlt v0.8h, v0.8h, #0
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: cmltz8xi16_alt:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.8h, v0.8h, #15
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: cmltz8xi16_alt:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v0.8h, v0.8h, #0
+; CHECK-NEXT: ret
%A.lobit = ashr <8 x i16> %A, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
ret <8 x i16> %A.lobit
}
define <2 x i32> @cmltz2xi32_alt(<2 x i32> %A) {
-; CHECK-SD-LABEL: cmltz2xi32_alt:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: cmlt v0.2s, v0.2s, #0
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: cmltz2xi32_alt:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.2s, v0.2s, #31
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: cmltz2xi32_alt:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v0.2s, v0.2s, #0
+; CHECK-NEXT: ret
%A.lobit = ashr <2 x i32> %A, <i32 31, i32 31>
ret <2 x i32> %A.lobit
}
define <4 x i32> @cmltz4xi32_alt(<4 x i32> %A) {
-; CHECK-SD-LABEL: cmltz4xi32_alt:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: cmlt v0.4s, v0.4s, #0
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: cmltz4xi32_alt:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: cmltz4xi32_alt:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v0.4s, v0.4s, #0
+; CHECK-NEXT: ret
%A.lobit = ashr <4 x i32> %A, <i32 31, i32 31, i32 31, i32 31>
ret <4 x i32> %A.lobit
}
define <2 x i64> @cmltz2xi64_alt(<2 x i64> %A) {
-; CHECK-SD-LABEL: cmltz2xi64_alt:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: cmlt v0.2d, v0.2d, #0
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: cmltz2xi64_alt:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.2d, v0.2d, #63
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: cmltz2xi64_alt:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v0.2d, v0.2d, #0
+; CHECK-NEXT: ret
%A.lobit = ashr <2 x i64> %A, <i64 63, i64 63>
ret <2 x i64> %A.lobit
}
@@ -2523,7 +2488,7 @@ define <2 x i32> @fcmal2xfloat(<2 x float> %A, <2 x float> %B) {
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: movi v0.2s, #1
; CHECK-GI-NEXT: shl v0.2s, v0.2s, #31
-; CHECK-GI-NEXT: sshr v0.2s, v0.2s, #31
+; CHECK-GI-NEXT: cmlt v0.2s, v0.2s, #0
; CHECK-GI-NEXT: ret
%tmp3 = fcmp true <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
@@ -2542,7 +2507,7 @@ define <4 x i32> @fcmal4xfloat(<4 x float> %A, <4 x float> %B) {
; CHECK-GI-NEXT: dup v0.2s, w8
; CHECK-GI-NEXT: mov v0.d[1], v0.d[0]
; CHECK-GI-NEXT: shl v0.4s, v0.4s, #31
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v0.4s, v0.4s, #0
; CHECK-GI-NEXT: ret
%tmp3 = fcmp true <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
@@ -2559,7 +2524,7 @@ define <2 x i64> @fcmal2xdouble(<2 x double> %A, <2 x double> %B) {
; CHECK-GI-NEXT: adrp x8, .LCPI221_0
; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI221_0]
; CHECK-GI-NEXT: shl v0.2d, v0.2d, #63
-; CHECK-GI-NEXT: sshr v0.2d, v0.2d, #63
+; CHECK-GI-NEXT: cmlt v0.2d, v0.2d, #0
; CHECK-GI-NEXT: ret
%tmp3 = fcmp true <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
@@ -2589,7 +2554,7 @@ define <4 x i32> @fcmnv4xfloat(<4 x float> %A, <4 x float> %B) {
; CHECK-GI-NEXT: mov v0.s[1], w8
; CHECK-GI-NEXT: mov v0.d[1], v0.d[0]
; CHECK-GI-NEXT: shl v0.4s, v0.4s, #31
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v0.4s, v0.4s, #0
; CHECK-GI-NEXT: ret
%tmp3 = fcmp false <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
diff --git a/llvm/test/CodeGen/AArch64/neon-shift-left-long.ll b/llvm/test/CodeGen/AArch64/neon-shift-left-long.ll
index 282f437..a8c55b4 100644
--- a/llvm/test/CodeGen/AArch64/neon-shift-left-long.ll
+++ b/llvm/test/CodeGen/AArch64/neon-shift-left-long.ll
@@ -465,7 +465,7 @@ define <8 x i16> @test_ushll_cmp(<8 x i8> %a, <8 x i8> %b) #0 {
; CHECK-GI-NEXT: movi v1.2d, #0xff00ff00ff00ff
; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
; CHECK-GI-NEXT: shl v0.8h, v0.8h, #15
-; CHECK-GI-NEXT: sshr v0.8h, v0.8h, #15
+; CHECK-GI-NEXT: cmlt v0.8h, v0.8h, #0
; CHECK-GI-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-GI-NEXT: ret
%cmp.i = icmp eq <8 x i8> %a, %b
diff --git a/llvm/test/CodeGen/AArch64/select_cc.ll b/llvm/test/CodeGen/AArch64/select_cc.ll
index 483f6c2..b562340 100644
--- a/llvm/test/CodeGen/AArch64/select_cc.ll
+++ b/llvm/test/CodeGen/AArch64/select_cc.ll
@@ -98,7 +98,7 @@ define <2 x double> @select_olt_load_cmp(<2 x double> %a, ptr %src) {
; CHECK-GI-NEXT: fcmgt v1.2s, v1.2s, #0.0
; CHECK-GI-NEXT: ushll v1.2d, v1.2s, #0
; CHECK-GI-NEXT: shl v1.2d, v1.2d, #63
-; CHECK-GI-NEXT: sshr v1.2d, v1.2d, #63
+; CHECK-GI-NEXT: cmlt v1.2d, v1.2d, #0
; CHECK-GI-NEXT: bif v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: ret
entry:
@@ -136,7 +136,7 @@ define <4 x i32> @select_icmp_sgt(<4 x i32> %a, <4 x i8> %b) {
; CHECK-GI-NEXT: mov v2.s[2], w8
; CHECK-GI-NEXT: mov v2.s[3], w9
; CHECK-GI-NEXT: shl v1.4s, v2.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: bic v0.16b, v0.16b, v1.16b
; CHECK-GI-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll b/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll
index 293b74ec..96a7a9d0 100644
--- a/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll
+++ b/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll
@@ -255,7 +255,7 @@ define <16 x i8> @sel_shift_bool_v16i8(<16 x i1> %t) {
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: shl v0.16b, v0.16b, #7
; CHECK-GI-NEXT: movi v1.16b, #128
-; CHECK-GI-NEXT: sshr v0.16b, v0.16b, #7
+; CHECK-GI-NEXT: cmlt v0.16b, v0.16b, #0
; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b
; CHECK-GI-NEXT: ret
%shl = select <16 x i1> %t, <16 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>, <16 x i8> zeroinitializer
@@ -277,7 +277,7 @@ define <8 x i16> @sel_shift_bool_v8i16(<8 x i1> %t) {
; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
; CHECK-GI-NEXT: movi v1.8h, #128
; CHECK-GI-NEXT: shl v0.8h, v0.8h, #15
-; CHECK-GI-NEXT: sshr v0.8h, v0.8h, #15
+; CHECK-GI-NEXT: cmlt v0.8h, v0.8h, #0
; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b
; CHECK-GI-NEXT: ret
%shl= select <8 x i1> %t, <8 x i16> <i16 128, i16 128, i16 128, i16 128, i16 128, i16 128, i16 128, i16 128>, <8 x i16> zeroinitializer
@@ -299,7 +299,7 @@ define <4 x i32> @sel_shift_bool_v4i32(<4 x i1> %t) {
; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-NEXT: movi v1.4s, #64
; CHECK-GI-NEXT: shl v0.4s, v0.4s, #31
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v0.4s, v0.4s, #0
; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b
; CHECK-GI-NEXT: ret
%shl = select <4 x i1> %t, <4 x i32> <i32 64, i32 64, i32 64, i32 64>, <4 x i32> zeroinitializer
@@ -323,7 +323,7 @@ define <2 x i64> @sel_shift_bool_v2i64(<2 x i1> %t) {
; CHECK-GI-NEXT: adrp x8, .LCPI16_0
; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI16_0]
; CHECK-GI-NEXT: shl v0.2d, v0.2d, #63
-; CHECK-GI-NEXT: sshr v0.2d, v0.2d, #63
+; CHECK-GI-NEXT: cmlt v0.2d, v0.2d, #0
; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b
; CHECK-GI-NEXT: ret
%shl = select <2 x i1> %t, <2 x i64> <i64 65536, i64 65536>, <2 x i64> zeroinitializer
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-flat-scratch-init-asan.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-flat-scratch-init-asan.ll
new file mode 100644
index 0000000..0d68762
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-flat-scratch-init-asan.ll
@@ -0,0 +1,24 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --check-globals all --version 6
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes='amdgpu-attributor' %s -o - | FileCheck %s
+
+@lds_1 = internal addrspace(3) global [1 x i8] poison, align 4
+
+;.
+; CHECK: @lds_1 = internal addrspace(3) global [1 x i8] poison, align 4
+;.
+define amdgpu_kernel void @k0() #0 {
+; CHECK: Function Attrs: sanitize_address
+; CHECK-LABEL: define amdgpu_kernel void @k0(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: store i8 7, ptr addrspace(3) @lds_1, align 4
+; CHECK-NEXT: ret void
+;
+ store i8 7, ptr addrspace(3) @lds_1, align 4
+ ret void
+}
+
+attributes #0 = { sanitize_address }
+; "amdgpu-no-flat-scratch-init" attribute should not be present in attribute list
+;.
+; CHECK: attributes #[[ATTR0]] = { sanitize_address "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
index a688b6f..fb566e5 100644
--- a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
@@ -707,8 +707,8 @@ attributes #6 = { "enqueued-block" }
; ATTRIBUTOR_HSA: attributes #[[ATTR14]] = { nounwind "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="gfx900" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR15:[0-9]+]] = { nounwind "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR16]] = { nounwind "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR17]] = { nounwind sanitize_address "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR18]] = { nounwind "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR17]] = { nounwind sanitize_address "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR18]] = { nounwind "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR19:[0-9]+]] = { nounwind sanitize_address "amdgpu-no-implicitarg-ptr" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR20:[0-9]+]] = { "enqueued-block" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR21]] = { "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "enqueued-block" "uniform-work-group-size"="false" }
diff --git a/llvm/test/CodeGen/AMDGPU/true16-fold.mir b/llvm/test/CodeGen/AMDGPU/true16-fold.mir
index 93cc12f..9484417 100644
--- a/llvm/test/CodeGen/AMDGPU/true16-fold.mir
+++ b/llvm/test/CodeGen/AMDGPU/true16-fold.mir
@@ -57,6 +57,7 @@ body: |
%4:vgpr_16 = COPY %3:sgpr_lo16
%5:vgpr_32 = V_ALIGNBIT_B32_t16_e64 0, %0:sreg_32, 0, killed %1:sreg_32, 0, killed %4:vgpr_16, 0, 0, implicit $exec
S_ENDPGM 0, implicit %5
+...
---
name: fold_16bit_madmix_clamp
@@ -207,3 +208,27 @@ body: |
$vgpr0 = COPY %4
S_ENDPGM 0, implicit $vgpr0
...
+
+---
+name: fold_imm16_across_reg_sequence
+tracksRegLiveness: true
+registers:
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2
+ ; CHECK-LABEL: name: fold_imm16_across_reg_sequence
+ ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[V_MOV_B16_t16_e64_:%[0-9]+]]:vgpr_16 = V_MOV_B16_t16_e64 0, -1, 0, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B16_t16_e64_1:%[0-9]+]]:vgpr_16 = V_MOV_B16_t16_e64 0, -1, 0, implicit $exec
+ ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_MOV_B16_t16_e64_]], %subreg.lo16, [[V_MOV_B16_t16_e64_1]], %subreg.hi16
+ ; CHECK-NEXT: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_MAX_F32_e64 0, -1, 0, -1, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: $vgpr0 = COPY [[V_MAX_F32_e64_]]
+ ; CHECK-NEXT: S_ENDPGM 0, implicit $vgpr0
+ %0:vgpr_16 = V_MOV_B16_t16_e64 0, -1, 0, implicit $exec
+ %1:vgpr_16 = V_MOV_B16_t16_e64 0, -1, 0, implicit $exec
+ %2:vgpr_32 = REG_SEQUENCE %0, %subreg.lo16, %1, %subreg.hi16
+ %3:vgpr_32 = nofpexcept V_MAX_F32_e64 0, %2, 0, %2, 0, 0, implicit $mode, implicit $exec
+ $vgpr0 = COPY %3
+ S_ENDPGM 0, implicit $vgpr0
+...
diff --git a/llvm/test/CodeGen/ARM/fpclamptosat.ll b/llvm/test/CodeGen/ARM/fpclamptosat.ll
index 8ab56b2..a6f0a03 100644
--- a/llvm/test/CodeGen/ARM/fpclamptosat.ll
+++ b/llvm/test/CodeGen/ARM/fpclamptosat.ll
@@ -383,8 +383,8 @@ entry:
ret i32 %conv6
}
-define i32 @utesth_f16i32(half %x) {
-; SOFT-LABEL: utesth_f16i32:
+define i32 @utest_f16i32(half %x) {
+; SOFT-LABEL: utest_f16i32:
; SOFT: @ %bb.0: @ %entry
; SOFT-NEXT: .save {r7, lr}
; SOFT-NEXT: push {r7, lr}
@@ -400,7 +400,7 @@ define i32 @utesth_f16i32(half %x) {
; SOFT-NEXT: .LBB7_2: @ %entry
; SOFT-NEXT: pop {r7, pc}
;
-; VFP2-LABEL: utesth_f16i32:
+; VFP2-LABEL: utest_f16i32:
; VFP2: @ %bb.0: @ %entry
; VFP2-NEXT: .save {r7, lr}
; VFP2-NEXT: push {r7, lr}
@@ -411,7 +411,7 @@ define i32 @utesth_f16i32(half %x) {
; VFP2-NEXT: vmov r0, s0
; VFP2-NEXT: pop {r7, pc}
;
-; FULL-LABEL: utesth_f16i32:
+; FULL-LABEL: utest_f16i32:
; FULL: @ %bb.0: @ %entry
; FULL-NEXT: vcvt.u32.f16 s0, s0
; FULL-NEXT: vmov r0, s0
@@ -3985,6 +3985,46 @@ entry:
ret i32 %spec.store.select7
}
+; i32 non saturate
+
+define i32 @ustest_f16i32_nsat(half %x) {
+; SOFT-LABEL: ustest_f16i32_nsat:
+; SOFT: @ %bb.0:
+; SOFT-NEXT: .save {r7, lr}
+; SOFT-NEXT: push {r7, lr}
+; SOFT-NEXT: uxth r0, r0
+; SOFT-NEXT: bl __aeabi_h2f
+; SOFT-NEXT: bl __aeabi_f2iz
+; SOFT-NEXT: asrs r1, r0, #31
+; SOFT-NEXT: ands r0, r1
+; SOFT-NEXT: asrs r1, r0, #31
+; SOFT-NEXT: bics r0, r1
+; SOFT-NEXT: pop {r7, pc}
+;
+; VFP2-LABEL: ustest_f16i32_nsat:
+; VFP2: @ %bb.0:
+; VFP2-NEXT: .save {r7, lr}
+; VFP2-NEXT: push {r7, lr}
+; VFP2-NEXT: vmov r0, s0
+; VFP2-NEXT: bl __aeabi_h2f
+; VFP2-NEXT: vmov s0, r0
+; VFP2-NEXT: vcvt.s32.f32 s0, s0
+; VFP2-NEXT: vmov r0, s0
+; VFP2-NEXT: usat r0, #0, r0
+; VFP2-NEXT: pop {r7, pc}
+;
+; FULL-LABEL: ustest_f16i32_nsat:
+; FULL: @ %bb.0:
+; FULL-NEXT: vcvt.s32.f16 s0, s0
+; FULL-NEXT: vmov r0, s0
+; FULL-NEXT: usat r0, #0, r0
+; FULL-NEXT: bx lr
+ %conv = fptosi half %x to i32
+ %spec.store.select = call i32 @llvm.smin.i32(i32 0, i32 %conv)
+ %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0)
+ ret i32 %spec.store.select7
+}
+
declare i32 @llvm.smin.i32(i32, i32)
diff --git a/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll b/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll
index 96f009a..ba31b35 100644
--- a/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll
@@ -748,8 +748,8 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32(<4 x half> %x) {
-; CHECK-NEON-LABEL: utesth_f16i32:
+define <4 x i32> @utest_f16i32(<4 x half> %x) {
+; CHECK-NEON-LABEL: utest_f16i32:
; CHECK-NEON: @ %bb.0: @ %entry
; CHECK-NEON-NEXT: .save {r4, r5, r6, r7, r8, r9, r11, lr}
; CHECK-NEON-NEXT: push {r4, r5, r6, r7, r8, r9, r11, lr}
@@ -821,7 +821,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
; CHECK-NEON-NEXT: vpop {d12, d13}
; CHECK-NEON-NEXT: pop {r4, r5, r6, r7, r8, r9, r11, pc}
;
-; CHECK-FP16-LABEL: utesth_f16i32:
+; CHECK-FP16-LABEL: utest_f16i32:
; CHECK-FP16: @ %bb.0: @ %entry
; CHECK-FP16-NEXT: .save {r4, r5, r6, r7, r8, r9, r11, lr}
; CHECK-FP16-NEXT: push {r4, r5, r6, r7, r8, r9, r11, lr}
@@ -1366,8 +1366,8 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16(<8 x half> %x) {
-; CHECK-NEON-LABEL: utesth_f16i16:
+define <8 x i16> @utest_f16i16(<8 x half> %x) {
+; CHECK-NEON-LABEL: utest_f16i16:
; CHECK-NEON: @ %bb.0: @ %entry
; CHECK-NEON-NEXT: .save {r4, r5, r6, r7, r11, lr}
; CHECK-NEON-NEXT: push {r4, r5, r6, r7, r11, lr}
@@ -1441,7 +1441,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-NEON-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14}
; CHECK-NEON-NEXT: pop {r4, r5, r6, r7, r11, pc}
;
-; CHECK-FP16-LABEL: utesth_f16i16:
+; CHECK-FP16-LABEL: utest_f16i16:
; CHECK-FP16: @ %bb.0: @ %entry
; CHECK-FP16-NEXT: vmovx.f16 s4, s0
; CHECK-FP16-NEXT: vcvt.u32.f16 s12, s0
@@ -2109,8 +2109,8 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64(<2 x half> %x) {
-; CHECK-NEON-LABEL: utesth_f16i64:
+define <2 x i64> @utest_f16i64(<2 x half> %x) {
+; CHECK-NEON-LABEL: utest_f16i64:
; CHECK-NEON: @ %bb.0: @ %entry
; CHECK-NEON-NEXT: .save {r4, r5, r6, lr}
; CHECK-NEON-NEXT: push {r4, r5, r6, lr}
@@ -2148,7 +2148,7 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) {
; CHECK-NEON-NEXT: vpop {d8}
; CHECK-NEON-NEXT: pop {r4, r5, r6, pc}
;
-; CHECK-FP16-LABEL: utesth_f16i64:
+; CHECK-FP16-LABEL: utest_f16i64:
; CHECK-FP16: @ %bb.0: @ %entry
; CHECK-FP16-NEXT: .save {r4, r5, r6, lr}
; CHECK-FP16-NEXT: push {r4, r5, r6, lr}
@@ -2835,8 +2835,8 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
-; CHECK-NEON-LABEL: utesth_f16i32_mm:
+define <4 x i32> @utest_f16i32_mm(<4 x half> %x) {
+; CHECK-NEON-LABEL: utest_f16i32_mm:
; CHECK-NEON: @ %bb.0: @ %entry
; CHECK-NEON-NEXT: .save {r4, r5, r6, r7, r11, lr}
; CHECK-NEON-NEXT: push {r4, r5, r6, r7, r11, lr}
@@ -2881,7 +2881,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
; CHECK-NEON-NEXT: vpop {d8, d9, d10, d11}
; CHECK-NEON-NEXT: pop {r4, r5, r6, r7, r11, pc}
;
-; CHECK-FP16-LABEL: utesth_f16i32_mm:
+; CHECK-FP16-LABEL: utest_f16i32_mm:
; CHECK-FP16: @ %bb.0: @ %entry
; CHECK-FP16-NEXT: .save {r4, r5, r6, lr}
; CHECK-FP16-NEXT: push {r4, r5, r6, lr}
@@ -3344,8 +3344,8 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
-; CHECK-NEON-LABEL: utesth_f16i16_mm:
+define <8 x i16> @utest_f16i16_mm(<8 x half> %x) {
+; CHECK-NEON-LABEL: utest_f16i16_mm:
; CHECK-NEON: @ %bb.0: @ %entry
; CHECK-NEON-NEXT: .save {r4, r5, r6, r7, r11, lr}
; CHECK-NEON-NEXT: push {r4, r5, r6, r7, r11, lr}
@@ -3419,7 +3419,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-NEON-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14}
; CHECK-NEON-NEXT: pop {r4, r5, r6, r7, r11, pc}
;
-; CHECK-FP16-LABEL: utesth_f16i16_mm:
+; CHECK-FP16-LABEL: utest_f16i16_mm:
; CHECK-FP16: @ %bb.0: @ %entry
; CHECK-FP16-NEXT: vmovx.f16 s4, s0
; CHECK-FP16-NEXT: vcvt.u32.f16 s12, s0
@@ -4044,8 +4044,8 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
-; CHECK-NEON-LABEL: utesth_f16i64_mm:
+define <2 x i64> @utest_f16i64_mm(<2 x half> %x) {
+; CHECK-NEON-LABEL: utest_f16i64_mm:
; CHECK-NEON: @ %bb.0: @ %entry
; CHECK-NEON-NEXT: .save {r4, r5, r6, lr}
; CHECK-NEON-NEXT: push {r4, r5, r6, lr}
@@ -4083,7 +4083,7 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
; CHECK-NEON-NEXT: vpop {d8}
; CHECK-NEON-NEXT: pop {r4, r5, r6, pc}
;
-; CHECK-FP16-LABEL: utesth_f16i64_mm:
+; CHECK-FP16-LABEL: utest_f16i64_mm:
; CHECK-FP16: @ %bb.0: @ %entry
; CHECK-FP16-NEXT: .save {r4, r5, r6, lr}
; CHECK-FP16-NEXT: push {r4, r5, r6, lr}
@@ -4215,6 +4215,77 @@ entry:
ret <2 x i64> %conv6
}
+; i32 non saturate
+
+define <4 x i32> @ustest_f16i32_nsat(<4 x half> %x) {
+; CHECK-NEON-LABEL: ustest_f16i32_nsat:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: .save {r4, lr}
+; CHECK-NEON-NEXT: push {r4, lr}
+; CHECK-NEON-NEXT: .vsave {d8, d9, d10, d11}
+; CHECK-NEON-NEXT: vpush {d8, d9, d10, d11}
+; CHECK-NEON-NEXT: vmov r0, s0
+; CHECK-NEON-NEXT: vmov.f32 s16, s3
+; CHECK-NEON-NEXT: vmov.f32 s18, s2
+; CHECK-NEON-NEXT: vmov.f32 s20, s1
+; CHECK-NEON-NEXT: bl __aeabi_h2f
+; CHECK-NEON-NEXT: mov r4, r0
+; CHECK-NEON-NEXT: vmov r0, s16
+; CHECK-NEON-NEXT: bl __aeabi_h2f
+; CHECK-NEON-NEXT: vmov s16, r0
+; CHECK-NEON-NEXT: vmov r0, s18
+; CHECK-NEON-NEXT: bl __aeabi_h2f
+; CHECK-NEON-NEXT: vmov s0, r0
+; CHECK-NEON-NEXT: vmov r1, s20
+; CHECK-NEON-NEXT: vcvt.s32.f32 s0, s0
+; CHECK-NEON-NEXT: vmov s18, r4
+; CHECK-NEON-NEXT: vmov r0, s0
+; CHECK-NEON-NEXT: vmov.32 d11[0], r0
+; CHECK-NEON-NEXT: mov r0, r1
+; CHECK-NEON-NEXT: bl __aeabi_h2f
+; CHECK-NEON-NEXT: vcvt.s32.f32 s2, s18
+; CHECK-NEON-NEXT: vmov s0, r0
+; CHECK-NEON-NEXT: vcvt.s32.f32 s4, s16
+; CHECK-NEON-NEXT: vcvt.s32.f32 s0, s0
+; CHECK-NEON-NEXT: vmov.i32 q8, #0x0
+; CHECK-NEON-NEXT: vmov r0, s2
+; CHECK-NEON-NEXT: vmov.32 d10[0], r0
+; CHECK-NEON-NEXT: vmov r0, s4
+; CHECK-NEON-NEXT: vmov.32 d11[1], r0
+; CHECK-NEON-NEXT: vmov r0, s0
+; CHECK-NEON-NEXT: vmov.32 d10[1], r0
+; CHECK-NEON-NEXT: vmin.s32 q9, q5, q8
+; CHECK-NEON-NEXT: vmax.s32 q0, q9, q8
+; CHECK-NEON-NEXT: vpop {d8, d9, d10, d11}
+; CHECK-NEON-NEXT: pop {r4, pc}
+;
+; CHECK-FP16-LABEL: ustest_f16i32_nsat:
+; CHECK-FP16: @ %bb.0: @ %entry
+; CHECK-FP16-NEXT: vmovx.f16 s2, s0
+; CHECK-FP16-NEXT: vcvt.s32.f16 s6, s0
+; CHECK-FP16-NEXT: vcvt.s32.f16 s0, s1
+; CHECK-FP16-NEXT: vmovx.f16 s4, s1
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: vcvt.s32.f16 s4, s4
+; CHECK-FP16-NEXT: vcvt.s32.f16 s2, s2
+; CHECK-FP16-NEXT: vmov.i32 q9, #0x0
+; CHECK-FP16-NEXT: vmov.32 d17[0], r0
+; CHECK-FP16-NEXT: vmov r0, s6
+; CHECK-FP16-NEXT: vmov.32 d16[0], r0
+; CHECK-FP16-NEXT: vmov r0, s4
+; CHECK-FP16-NEXT: vmov.32 d17[1], r0
+; CHECK-FP16-NEXT: vmov r0, s2
+; CHECK-FP16-NEXT: vmov.32 d16[1], r0
+; CHECK-FP16-NEXT: vmin.s32 q8, q8, q9
+; CHECK-FP16-NEXT: vmax.s32 q0, q8, q9
+; CHECK-FP16-NEXT: bx lr
+entry:
+ %conv = fptosi <4 x half> %x to <4 x i32>
+ %spec.store.select = call <4 x i32> @llvm.smin.v4i32(<4 x i32> zeroinitializer, <4 x i32> %conv)
+ %spec.store.select7 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %spec.store.select, <4 x i32> zeroinitializer)
+ ret <4 x i32> %spec.store.select7
+}
+
declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>)
diff --git a/llvm/test/CodeGen/Hexagon/inst_setcc_uno_uo.ll b/llvm/test/CodeGen/Hexagon/inst_setcc_uno_uo.ll
new file mode 100644
index 0000000..8b121c5
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/inst_setcc_uno_uo.ll
@@ -0,0 +1,93 @@
+;; RUN: llc --mtriple=hexagon -mattr=+hvxv79,+hvx-length128b %s -o - | FileCheck %s
+
+define dso_local void @store_isnan_f32(ptr %a, ptr %b, ptr %isnan_cmp) local_unnamed_addr {
+entry:
+ %arrayidx_a = getelementptr inbounds nuw float, ptr %a, i32 0
+ %arrayidx_b = getelementptr inbounds nuw float, ptr %b, i32 0
+ %0 = load <32 x float>, ptr %arrayidx_a, align 4
+ %1 = load <32 x float>, ptr %arrayidx_b, align 4
+ %.vectorized = fcmp uno <32 x float> %0, %1
+ %.LS.instance = zext <32 x i1> %.vectorized to <32 x i32>
+ %arrayidx1 = getelementptr inbounds nuw i32, ptr %isnan_cmp, i32 0
+ store <32 x i32> %.LS.instance, ptr %arrayidx1, align 4
+ ret void
+}
+
+; CHECK: store_isnan_f32
+; CHECK: [[RONE32:r[0-9]+]] = #1
+; CHECK: [[VOP2_F32:v[0-9]+]] = vxor([[VOP2_F32]],[[VOP2_F32]])
+; CHECK: [[VOP1_F32:v[0-9]+]] = vmemu(r0+#0)
+; CHECK: [[VONES32:v[0-9]+]] = vsplat([[RONE32]])
+; CHECK: [[Q1_F32:q[0-9]+]] = vcmp.eq([[VOP1_F32]].w,[[VOP1_F32]].w)
+; CHECK: [[VOP3_F32:v[0-9]+]] = vmemu(r1+#0)
+; CHECK: [[Q1_F32]] &= vcmp.eq([[VOP3_F32]].w,[[VOP3_F32]].w)
+; CHECK: [[VOUT_F32:v[0-9]+]] = vmux([[Q1_F32]],[[VOP2_F32]],[[VONES32]])
+; CHECK: vmemu(r2+#0) = [[VOUT_F32]]
+
+define dso_local void @store_isnan_f16(ptr %a, ptr %b, ptr %isnan_cmp) local_unnamed_addr {
+entry:
+ %arrayidx_a = getelementptr inbounds nuw half, ptr %a, i32 0
+ %arrayidx_b = getelementptr inbounds nuw half, ptr %b, i32 0
+ %0 = load <64 x half>, ptr %arrayidx_a, align 2
+ %1 = load <64 x half>, ptr %arrayidx_b, align 2
+ %.vectorized = fcmp uno <64 x half> %0, %1
+ %conv.LS.instance = zext <64 x i1> %.vectorized to <64 x i16>
+ %arrayidx1 = getelementptr inbounds nuw i16, ptr %isnan_cmp, i32 0
+ store <64 x i16> %conv.LS.instance, ptr %arrayidx1, align 2
+ ret void
+}
+; CHECK-LABEL: store_isnan_f16
+; CHECK: [[RONE16:r[0-9]+]] = #1
+; CHECK: [[VOP2_F16:v[0-9]+]] = vxor([[VOP2_F16]],[[VOP2_F16]])
+; CHECK: [[VOP1_F16:v[0-9]+]] = vmemu(r0+#0)
+; CHECK: [[VONES16:v[0-9]+]].h = vsplat([[RONE16]])
+; CHECK: [[Q1_F16:q[0-9]+]] = vcmp.eq([[VOP1_F16]].h,[[VOP1_F16]].h)
+; CHECK: [[VOP3_F16:v[0-9]+]] = vmemu(r1+#0)
+; CHECK: [[Q1_F16]] &= vcmp.eq([[VOP3_F16]].h,[[VOP3_F16]].h)
+; CHECK: [[VOUT_F16:v[0-9]+]] = vmux([[Q1_F16]],[[VOP2_F16]],[[VONES16]])
+; CHECK: vmemu(r2+#0) = [[VOUT_F32]]
+
+define dso_local void @store_isordered_f32(ptr %a, ptr %b, ptr %isordered_cmp) local_unnamed_addr {
+entry:
+ %arrayidx_a = getelementptr inbounds nuw float, ptr %a, i32 0
+ %arrayidx_b = getelementptr inbounds nuw float, ptr %b, i32 0
+ %0 = load <32 x float>, ptr %arrayidx_a, align 4
+ %1 = load <32 x float>, ptr %arrayidx_b, align 4
+ %.vectorized = fcmp ord <32 x float> %0, %1
+ %.LS.instance = zext <32 x i1> %.vectorized to <32 x i32>
+ %arrayidx1 = getelementptr inbounds nuw i32, ptr %isordered_cmp, i32 0
+ store <32 x i32> %.LS.instance, ptr %arrayidx1, align 4
+ ret void
+}
+; CHECK-LABEL: store_isordered_f32
+; CHECK: [[VOP2_ORD_F32:v[0-9]+]] = vxor([[VOP2_ORD_F32]],[[VOP2_ORD_F32]])
+; CHECK: [[VOP1_ORD_F32:v[0-9]+]] = vmemu(r0+#0)
+; CHECK: [[VONES_ORD_F32:v[0-9]+]] = vsplat([[RONE32]])
+; CHECK: [[Q1_ORD_F32:q[0-9]+]] = vcmp.eq([[VOP1_ORD_F32]].w,[[VOP1_ORD_F32]].w)
+; CHECK: [[VOP3_ORD_F32:v[0-9]+]] = vmemu(r1+#0)
+; CHECK: [[Q1_ORD_F32]] &= vcmp.eq([[VOP3_ORD_F32]].w,[[VOP3_ORD_F32]].w)
+; CHECK: [[VOUT_ORD_F32:v[0-9]+]] = vmux([[Q1_ORD_F32]],[[VONES_ORD_F32]],[[VOP2_ORD_F32]])
+; CHECK: vmemu(r2+#0) = [[VOUT_ORD_F32]]
+
+
+define dso_local void @store_isordered_f16(ptr %a, ptr %b, ptr %isordered_cmp) local_unnamed_addr {
+entry:
+ %arrayidx_a = getelementptr inbounds nuw half, ptr %a, i32 0
+ %arrayidx_b = getelementptr inbounds nuw half, ptr %b, i32 0
+ %0 = load <64 x half>, ptr %arrayidx_a, align 2
+ %1 = load <64 x half>, ptr %arrayidx_b, align 2
+ %.vectorized = fcmp ord <64 x half> %0, %1
+ %conv.LS.instance = zext <64 x i1> %.vectorized to <64 x i16>
+ %arrayidx1 = getelementptr inbounds nuw i16, ptr %isordered_cmp, i32 0
+ store <64 x i16> %conv.LS.instance, ptr %arrayidx1, align 2
+ ret void
+}
+; CHECK-LABEL: store_isordered_f16
+; CHECK: [[VOP2_ORD_F16:v[0-9]+]] = vxor([[VOP2_ORD_F16]],[[VOP2_ORD_F16]])
+; CHECK: [[VOP1_ORD_F16:v[0-9]+]] = vmemu(r0+#0)
+; CHECK: [[VONES_ORD_F16:v[0-9]+]].h = vsplat([[RONE16]])
+; CHECK: [[Q1_ORD_F16:q[0-9]+]] = vcmp.eq([[VOP1_ORD_F16]].h,[[VOP1_ORD_F16]].h)
+; CHECK: [[VOP3_ORD_F16:v[0-9]+]] = vmemu(r1+#0)
+; CHECK: [[Q1_ORD_F16]] &= vcmp.eq([[VOP3_ORD_F16]].h,[[VOP3_ORD_F16]].h)
+; CHECK: [[VOUT_ORD_F16:v[0-9]+]] = vmux([[Q1_ORD_F16]],[[VONES_ORD_F16]],[[VOP2_ORD_F16]])
+; CHECK: vmemu(r2+#0) = [[VOUT_ORD_F16]]
diff --git a/llvm/test/CodeGen/Hexagon/isel-fold-shl-zext.ll b/llvm/test/CodeGen/Hexagon/isel-fold-shl-zext.ll
index 5fa5023..fe0f7dd 100644
--- a/llvm/test/CodeGen/Hexagon/isel-fold-shl-zext.ll
+++ b/llvm/test/CodeGen/Hexagon/isel-fold-shl-zext.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
; RUN: llc -mtriple=hexagon-unknown-elf < %s | FileCheck %s
; In ISelLowering, when folding nodes (or (shl xx, s), (zext y))
@@ -11,17 +12,18 @@ target triple = "hexagon"
; Function Attrs: nofree nosync nounwind memory(readwrite, inaccessiblemem: none)
define dso_local void @foo(i64* nocapture noundef %buf, i32 %a, i32 %b) local_unnamed_addr {
; CHECK-LABEL: foo:
-; CHECK: // %bb.0: // %entry
+; CHECK: .cfi_startproc
+; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: {
-; CHECK-NEXT: r[[REG0:[0-9]+]] = addasl(r2,r1,#1)
-; CHECK-NEXT: r[[REG2:[0-9]+]] = asl(r1,#1)
+; CHECK-NEXT: r2 = addasl(r2,r1,#1)
+; CHECK-NEXT: r3 = asl(r1,#1)
; CHECK-NEXT: }
; CHECK-NEXT: {
-; CHECK-NEXT: r[[REG1:[0-9]+]] = addasl(r[[REG0]],r1,#1)
+; CHECK-NEXT: r2 = addasl(r2,r1,#1)
; CHECK-NEXT: }
; CHECK-NEXT: {
; CHECK-NEXT: jumpr r31
-; CHECK-NEXT: memd(r0+#8) = r[[REG2]]:[[REG1]]
+; CHECK-NEXT: memd(r0+#8) = r3:2
; CHECK-NEXT: }
entry:
%arrayidx = getelementptr inbounds i64, i64* %buf, i32 1
diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat.ll b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
index 18d071c..a0d1ecc 100644
--- a/llvm/test/CodeGen/RISCV/fpclamptosat.ll
+++ b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
@@ -436,8 +436,8 @@ entry:
ret i32 %conv6
}
-define i32 @utesth_f16i32(half %x) {
-; RV32-LABEL: utesth_f16i32:
+define i32 @utest_f16i32(half %x) {
+; RV32-LABEL: utest_f16i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
@@ -456,7 +456,7 @@ define i32 @utesth_f16i32(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
-; RV64-LABEL: utesth_f16i32:
+; RV64-LABEL: utest_f16i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
@@ -974,8 +974,8 @@ entry:
ret i16 %conv6
}
-define i16 @utesth_f16i16(half %x) {
-; RV32-LABEL: utesth_f16i16:
+define i16 @utest_f16i16(half %x) {
+; RV32-LABEL: utest_f16i16:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
@@ -995,7 +995,7 @@ define i16 @utesth_f16i16(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
-; RV64-LABEL: utesth_f16i16:
+; RV64-LABEL: utest_f16i16:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
@@ -3829,6 +3829,52 @@ entry:
ret i64 %conv6
}
+; i32 non saturate
+
+define i32 @ustest_f16i32_nsat(half %x) {
+; RV32-LABEL: ustest_f16i32_nsat:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: call __extendhfsf2
+; RV32-NEXT: fcvt.w.s a0, fa0, rtz
+; RV32-NEXT: srai a1, a0, 31
+; RV32-NEXT: and a0, a1, a0
+; RV32-NEXT: sgtz a1, a0
+; RV32-NEXT: neg a1, a1
+; RV32-NEXT: and a0, a1, a0
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: .cfi_def_cfa_offset 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ustest_f16i32_nsat:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: call __extendhfsf2
+; RV64-NEXT: fcvt.l.s a0, fa0, rtz
+; RV64-NEXT: srai a1, a0, 63
+; RV64-NEXT: and a0, a1, a0
+; RV64-NEXT: sgtz a1, a0
+; RV64-NEXT: neg a1, a1
+; RV64-NEXT: and a0, a1, a0
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: .cfi_def_cfa_offset 0
+; RV64-NEXT: ret
+ %conv = fptosi half %x to i32
+ %spec.store.select = call i32 @llvm.smin.i32(i32 0, i32 %conv)
+ %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0)
+ ret i32 %spec.store.select7
+}
+
declare i32 @llvm.smin.i32(i32, i32)
declare i32 @llvm.smax.i32(i32, i32)
declare i32 @llvm.umin.i32(i32, i32)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
index aba9d37..f5977625 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
@@ -519,8 +519,8 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32(<4 x half> %x) {
-; CHECK-NOV-LABEL: utesth_f16i32:
+define <4 x i32> @utest_f16i32(<4 x half> %x) {
+; CHECK-NOV-LABEL: utest_f16i32:
; CHECK-NOV: # %bb.0: # %entry
; CHECK-NOV-NEXT: addi sp, sp, -64
; CHECK-NOV-NEXT: .cfi_def_cfa_offset 64
@@ -610,7 +610,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
; CHECK-NOV-NEXT: bgeu a3, a1, .LBB7_4
; CHECK-NOV-NEXT: j .LBB7_5
;
-; CHECK-V-LABEL: utesth_f16i32:
+; CHECK-V-LABEL: utest_f16i32:
; CHECK-V: # %bb.0: # %entry
; CHECK-V-NEXT: addi sp, sp, -48
; CHECK-V-NEXT: .cfi_def_cfa_offset 48
@@ -1594,8 +1594,8 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16(<8 x half> %x) {
-; CHECK-NOV-LABEL: utesth_f16i16:
+define <8 x i16> @utest_f16i16(<8 x half> %x) {
+; CHECK-NOV-LABEL: utest_f16i16:
; CHECK-NOV: # %bb.0: # %entry
; CHECK-NOV-NEXT: addi sp, sp, -128
; CHECK-NOV-NEXT: .cfi_def_cfa_offset 128
@@ -1765,7 +1765,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-NOV-NEXT: bgeu a7, a3, .LBB16_8
; CHECK-NOV-NEXT: j .LBB16_9
;
-; CHECK-V-LABEL: utesth_f16i16:
+; CHECK-V-LABEL: utest_f16i16:
; CHECK-V: # %bb.0: # %entry
; CHECK-V-NEXT: addi sp, sp, -80
; CHECK-V-NEXT: .cfi_def_cfa_offset 80
@@ -3332,8 +3332,8 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64(<2 x half> %x) {
-; CHECK-NOV-LABEL: utesth_f16i64:
+define <2 x i64> @utest_f16i64(<2 x half> %x) {
+; CHECK-NOV-LABEL: utest_f16i64:
; CHECK-NOV: # %bb.0: # %entry
; CHECK-NOV-NEXT: addi sp, sp, -32
; CHECK-NOV-NEXT: .cfi_def_cfa_offset 32
@@ -3373,7 +3373,7 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) {
; CHECK-NOV-NEXT: .cfi_def_cfa_offset 0
; CHECK-NOV-NEXT: ret
;
-; CHECK-V-LABEL: utesth_f16i64:
+; CHECK-V-LABEL: utest_f16i64:
; CHECK-V: # %bb.0: # %entry
; CHECK-V-NEXT: addi sp, sp, -32
; CHECK-V-NEXT: .cfi_def_cfa_offset 32
@@ -4074,8 +4074,8 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
-; CHECK-NOV-LABEL: utesth_f16i32_mm:
+define <4 x i32> @utest_f16i32_mm(<4 x half> %x) {
+; CHECK-NOV-LABEL: utest_f16i32_mm:
; CHECK-NOV: # %bb.0: # %entry
; CHECK-NOV-NEXT: addi sp, sp, -64
; CHECK-NOV-NEXT: .cfi_def_cfa_offset 64
@@ -4165,7 +4165,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
; CHECK-NOV-NEXT: bgeu a3, a1, .LBB34_4
; CHECK-NOV-NEXT: j .LBB34_5
;
-; CHECK-V-LABEL: utesth_f16i32_mm:
+; CHECK-V-LABEL: utest_f16i32_mm:
; CHECK-V: # %bb.0: # %entry
; CHECK-V-NEXT: addi sp, sp, -48
; CHECK-V-NEXT: .cfi_def_cfa_offset 48
@@ -5134,8 +5134,8 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
-; CHECK-NOV-LABEL: utesth_f16i16_mm:
+define <8 x i16> @utest_f16i16_mm(<8 x half> %x) {
+; CHECK-NOV-LABEL: utest_f16i16_mm:
; CHECK-NOV: # %bb.0: # %entry
; CHECK-NOV-NEXT: addi sp, sp, -128
; CHECK-NOV-NEXT: .cfi_def_cfa_offset 128
@@ -5305,7 +5305,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-NOV-NEXT: bgeu a7, a3, .LBB43_8
; CHECK-NOV-NEXT: j .LBB43_9
;
-; CHECK-V-LABEL: utesth_f16i16_mm:
+; CHECK-V-LABEL: utest_f16i16_mm:
; CHECK-V: # %bb.0: # %entry
; CHECK-V-NEXT: addi sp, sp, -80
; CHECK-V-NEXT: .cfi_def_cfa_offset 80
@@ -6837,8 +6837,8 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
-; CHECK-NOV-LABEL: utesth_f16i64_mm:
+define <2 x i64> @utest_f16i64_mm(<2 x half> %x) {
+; CHECK-NOV-LABEL: utest_f16i64_mm:
; CHECK-NOV: # %bb.0: # %entry
; CHECK-NOV-NEXT: addi sp, sp, -32
; CHECK-NOV-NEXT: .cfi_def_cfa_offset 32
@@ -6877,7 +6877,7 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
; CHECK-NOV-NEXT: .cfi_def_cfa_offset 0
; CHECK-NOV-NEXT: ret
;
-; CHECK-V-LABEL: utesth_f16i64_mm:
+; CHECK-V-LABEL: utest_f16i64_mm:
; CHECK-V: # %bb.0: # %entry
; CHECK-V-NEXT: addi sp, sp, -32
; CHECK-V-NEXT: .cfi_def_cfa_offset 32
@@ -7048,6 +7048,172 @@ entry:
ret <2 x i64> %conv6
}
+; i32 non saturate
+
+define <4 x i32> @ustest_f16i32_nsat(<4 x half> %x) {
+; CHECK-NOV-LABEL: ustest_f16i32_nsat:
+; CHECK-NOV: # %bb.0: # %entry
+; CHECK-NOV-NEXT: addi sp, sp, -64
+; CHECK-NOV-NEXT: .cfi_def_cfa_offset 64
+; CHECK-NOV-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; CHECK-NOV-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; CHECK-NOV-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; CHECK-NOV-NEXT: sd s2, 32(sp) # 8-byte Folded Spill
+; CHECK-NOV-NEXT: sd s3, 24(sp) # 8-byte Folded Spill
+; CHECK-NOV-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; CHECK-NOV-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
+; CHECK-NOV-NEXT: .cfi_offset ra, -8
+; CHECK-NOV-NEXT: .cfi_offset s0, -16
+; CHECK-NOV-NEXT: .cfi_offset s1, -24
+; CHECK-NOV-NEXT: .cfi_offset s2, -32
+; CHECK-NOV-NEXT: .cfi_offset s3, -40
+; CHECK-NOV-NEXT: .cfi_offset fs0, -48
+; CHECK-NOV-NEXT: .cfi_offset fs1, -56
+; CHECK-NOV-NEXT: lhu s1, 0(a1)
+; CHECK-NOV-NEXT: lhu s2, 8(a1)
+; CHECK-NOV-NEXT: lhu a2, 16(a1)
+; CHECK-NOV-NEXT: lhu s3, 24(a1)
+; CHECK-NOV-NEXT: mv s0, a0
+; CHECK-NOV-NEXT: fmv.w.x fa0, a2
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: fmv.s fs0, fa0
+; CHECK-NOV-NEXT: fmv.w.x fa0, s2
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: fmv.s fs1, fa0
+; CHECK-NOV-NEXT: fmv.w.x fa0, s1
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: fcvt.l.s s1, fa0, rtz
+; CHECK-NOV-NEXT: fcvt.l.s s2, fs1, rtz
+; CHECK-NOV-NEXT: fmv.w.x fa0, s3
+; CHECK-NOV-NEXT: fcvt.l.s s3, fs0, rtz
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: fcvt.l.s a0, fa0, rtz
+; CHECK-NOV-NEXT: srai a1, s3, 63
+; CHECK-NOV-NEXT: and a1, a1, s3
+; CHECK-NOV-NEXT: srai a2, s2, 63
+; CHECK-NOV-NEXT: and a2, a2, s2
+; CHECK-NOV-NEXT: srai a3, s1, 63
+; CHECK-NOV-NEXT: and a3, a3, s1
+; CHECK-NOV-NEXT: srai a4, a0, 63
+; CHECK-NOV-NEXT: and a0, a4, a0
+; CHECK-NOV-NEXT: sgtz a4, a3
+; CHECK-NOV-NEXT: neg a4, a4
+; CHECK-NOV-NEXT: and a3, a4, a3
+; CHECK-NOV-NEXT: sgtz a4, a2
+; CHECK-NOV-NEXT: neg a4, a4
+; CHECK-NOV-NEXT: and a2, a4, a2
+; CHECK-NOV-NEXT: sgtz a4, a1
+; CHECK-NOV-NEXT: neg a4, a4
+; CHECK-NOV-NEXT: and a1, a4, a1
+; CHECK-NOV-NEXT: sgtz a4, a0
+; CHECK-NOV-NEXT: neg a4, a4
+; CHECK-NOV-NEXT: and a0, a4, a0
+; CHECK-NOV-NEXT: sw a3, 0(s0)
+; CHECK-NOV-NEXT: sw a2, 4(s0)
+; CHECK-NOV-NEXT: sw a1, 8(s0)
+; CHECK-NOV-NEXT: sw a0, 12(s0)
+; CHECK-NOV-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT: ld s2, 32(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT: ld s3, 24(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT: .cfi_restore ra
+; CHECK-NOV-NEXT: .cfi_restore s0
+; CHECK-NOV-NEXT: .cfi_restore s1
+; CHECK-NOV-NEXT: .cfi_restore s2
+; CHECK-NOV-NEXT: .cfi_restore s3
+; CHECK-NOV-NEXT: .cfi_restore fs0
+; CHECK-NOV-NEXT: .cfi_restore fs1
+; CHECK-NOV-NEXT: addi sp, sp, 64
+; CHECK-NOV-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NOV-NEXT: ret
+;
+; CHECK-V-LABEL: ustest_f16i32_nsat:
+; CHECK-V: # %bb.0: # %entry
+; CHECK-V-NEXT: addi sp, sp, -48
+; CHECK-V-NEXT: .cfi_def_cfa_offset 48
+; CHECK-V-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; CHECK-V-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; CHECK-V-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; CHECK-V-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
+; CHECK-V-NEXT: .cfi_offset ra, -8
+; CHECK-V-NEXT: .cfi_offset s0, -16
+; CHECK-V-NEXT: .cfi_offset s1, -24
+; CHECK-V-NEXT: .cfi_offset s2, -32
+; CHECK-V-NEXT: csrr a1, vlenb
+; CHECK-V-NEXT: slli a1, a1, 1
+; CHECK-V-NEXT: sub sp, sp, a1
+; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; CHECK-V-NEXT: lhu s0, 0(a0)
+; CHECK-V-NEXT: lhu s1, 8(a0)
+; CHECK-V-NEXT: lhu s2, 16(a0)
+; CHECK-V-NEXT: lhu a0, 24(a0)
+; CHECK-V-NEXT: fmv.w.x fa0, a0
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; CHECK-V-NEXT: fmv.w.x fa0, s1
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-V-NEXT: vslideup.vi v8, v9, 2
+; CHECK-V-NEXT: vmin.vx v8, v8, zero
+; CHECK-V-NEXT: vmax.vx v8, v8, zero
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 48
+; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT: .cfi_restore ra
+; CHECK-V-NEXT: .cfi_restore s0
+; CHECK-V-NEXT: .cfi_restore s1
+; CHECK-V-NEXT: .cfi_restore s2
+; CHECK-V-NEXT: addi sp, sp, 48
+; CHECK-V-NEXT: .cfi_def_cfa_offset 0
+; CHECK-V-NEXT: ret
+entry:
+ %conv = fptosi <4 x half> %x to <4 x i32>
+ %spec.store.select = call <4 x i32> @llvm.smin.v4i32(<4 x i32> zeroinitializer, <4 x i32> %conv)
+ %spec.store.select7 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %spec.store.select, <4 x i32> zeroinitializer)
+ ret <4 x i32> %spec.store.select7
+}
+
declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>)
diff --git a/llvm/test/CodeGen/SPIRV/pointers/ptrcast-bitcast.ll b/llvm/test/CodeGen/SPIRV/pointers/ptrcast-bitcast.ll
new file mode 100644
index 0000000..8491328
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/ptrcast-bitcast.ll
@@ -0,0 +1,28 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s --match-full-lines
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: %[[#uint:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#v2_uint:]] = OpTypeVector %[[#uint]] 2
+; CHECK-DAG: %[[#double:]] = OpTypeFloat 64
+; CHECK-DAG: %[[#v2_double:]] = OpTypeVector %[[#double]] 2
+; CHECK-DAG: %[[#v4_uint:]] = OpTypeVector %[[#uint]] 4
+@.str = private unnamed_addr constant [3 x i8] c"In\00", align 1
+@.str.2 = private unnamed_addr constant [4 x i8] c"Out\00", align 1
+
+define void @main() local_unnamed_addr #0 {
+entry:
+ %0 = tail call target("spirv.VulkanBuffer", [0 x <2 x i32>], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v2i32_12_0t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @.str)
+ %1 = tail call target("spirv.VulkanBuffer", [0 x <2 x double>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v2f64_12_1t(i32 0, i32 2, i32 1, i32 0, ptr nonnull @.str.2)
+ %2 = tail call noundef align 8 dereferenceable(8) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v2i32_12_0t(target("spirv.VulkanBuffer", [0 x <2 x i32>], 12, 0) %0, i32 0)
+ %3 = load <2 x i32>, ptr addrspace(11) %2, align 8
+ %4 = tail call noundef align 8 dereferenceable(8) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v2i32_12_0t(target("spirv.VulkanBuffer", [0 x <2 x i32>], 12, 0) %0, i32 1)
+ %5 = load <2 x i32>, ptr addrspace(11) %4, align 8
+; CHECK: %[[#tmp:]] = OpVectorShuffle %[[#v4_uint]] {{%[0-9]+}} {{%[0-9]+}} 0 2 1 3
+ %6 = shufflevector <2 x i32> %3, <2 x i32> %5, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK: %[[#access:]] = OpAccessChain {{.*}}
+ %7 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v2f64_12_1t(target("spirv.VulkanBuffer", [0 x <2 x double>], 12, 1) %1, i32 0)
+; CHECK: %[[#bitcast:]] = OpBitcast %[[#v2_double]] %[[#tmp]]
+; CHECK: OpStore %[[#access]] %[[#bitcast]] Aligned 16
+ store <4 x i32> %6, ptr addrspace(11) %7, align 16
+ ret void
+}
diff --git a/llvm/test/CodeGen/WebAssembly/fpclamptosat.ll b/llvm/test/CodeGen/WebAssembly/fpclamptosat.ll
index 137994ce..59f3edc 100644
--- a/llvm/test/CodeGen/WebAssembly/fpclamptosat.ll
+++ b/llvm/test/CodeGen/WebAssembly/fpclamptosat.ll
@@ -136,9 +136,9 @@ entry:
ret i32 %conv6
}
-define i32 @utesth_f16i32(half %x) {
-; CHECK-LABEL: utesth_f16i32:
-; CHECK: .functype utesth_f16i32 (f32) -> (i32)
+define i32 @utest_f16i32(half %x) {
+; CHECK-LABEL: utest_f16i32:
+; CHECK: .functype utest_f16i32 (f32) -> (i32)
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 0
; CHECK-NEXT: call __truncsfhf2
@@ -153,9 +153,9 @@ entry:
ret i32 %conv6
}
-define i32 @utesth_f16i32_cse(half %x) {
-; CHECK-LABEL: utesth_f16i32_cse:
-; CHECK: .functype utesth_f16i32_cse (f32) -> (i32)
+define i32 @utest_f16i32_cse(half %x) {
+; CHECK-LABEL: utest_f16i32_cse:
+; CHECK: .functype utest_f16i32_cse (f32) -> (i32)
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 0
; CHECK-NEXT: call __truncsfhf2
@@ -403,9 +403,9 @@ entry:
ret i16 %conv6
}
-define i16 @utesth_f16i16(half %x) {
-; CHECK-LABEL: utesth_f16i16:
-; CHECK: .functype utesth_f16i16 (f32) -> (i32)
+define i16 @utest_f16i16(half %x) {
+; CHECK-LABEL: utest_f16i16:
+; CHECK: .functype utest_f16i16 (f32) -> (i32)
; CHECK-NEXT: .local i32
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 0
@@ -427,9 +427,9 @@ entry:
ret i16 %conv6
}
-define i16 @utesth_f16i16_cse(half %x) {
-; CHECK-LABEL: utesth_f16i16_cse:
-; CHECK: .functype utesth_f16i16_cse (f32) -> (i32)
+define i16 @utest_f16i16_cse(half %x) {
+; CHECK-LABEL: utest_f16i16_cse:
+; CHECK: .functype utest_f16i16_cse (f32) -> (i32)
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 0
; CHECK-NEXT: call __truncsfhf2
@@ -880,9 +880,9 @@ entry:
ret i64 %conv6
}
-define i64 @utesth_f16i64(half %x) {
-; CHECK-LABEL: utesth_f16i64:
-; CHECK: .functype utesth_f16i64 (f32) -> (i64)
+define i64 @utest_f16i64(half %x) {
+; CHECK-LABEL: utest_f16i64:
+; CHECK: .functype utest_f16i64 (f32) -> (i64)
; CHECK-NEXT: .local i32, i64, i64
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: global.get __stack_pointer
@@ -919,9 +919,9 @@ entry:
ret i64 %conv6
}
-define i64 @utesth_f16i64_cse(half %x) {
-; CHECK-LABEL: utesth_f16i64_cse:
-; CHECK: .functype utesth_f16i64_cse (f32) -> (i64)
+define i64 @utest_f16i64_cse(half %x) {
+; CHECK-LABEL: utest_f16i64_cse:
+; CHECK: .functype utest_f16i64_cse (f32) -> (i64)
; CHECK-NEXT: .local i32, i64
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: global.get __stack_pointer
@@ -1118,9 +1118,9 @@ entry:
ret i32 %conv6
}
-define i32 @utesth_f16i32_mm(half %x) {
-; CHECK-LABEL: utesth_f16i32_mm:
-; CHECK: .functype utesth_f16i32_mm (f32) -> (i32)
+define i32 @utest_f16i32_mm(half %x) {
+; CHECK-LABEL: utest_f16i32_mm:
+; CHECK: .functype utest_f16i32_mm (f32) -> (i32)
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 0
; CHECK-NEXT: call __truncsfhf2
@@ -1353,9 +1353,9 @@ entry:
ret i16 %conv6
}
-define i16 @utesth_f16i16_mm(half %x) {
-; CHECK-LABEL: utesth_f16i16_mm:
-; CHECK: .functype utesth_f16i16_mm (f32) -> (i32)
+define i16 @utest_f16i16_mm(half %x) {
+; CHECK-LABEL: utest_f16i16_mm:
+; CHECK: .functype utest_f16i16_mm (f32) -> (i32)
; CHECK-NEXT: .local i32
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 0
@@ -1637,9 +1637,9 @@ entry:
ret i64 %conv6
}
-define i64 @utesth_f16i64_mm(half %x) {
-; CHECK-LABEL: utesth_f16i64_mm:
-; CHECK: .functype utesth_f16i64_mm (f32) -> (i64)
+define i64 @utest_f16i64_mm(half %x) {
+; CHECK-LABEL: utest_f16i64_mm:
+; CHECK: .functype utest_f16i64_mm (f32) -> (i64)
; CHECK-NEXT: .local i32, i64, i64
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: global.get __stack_pointer
@@ -1724,9 +1724,9 @@ entry:
ret i64 %conv6
}
-define i64 @utesth_f16i64_mm_cse(half %x) {
-; CHECK-LABEL: utesth_f16i64_mm_cse:
-; CHECK: .functype utesth_f16i64_mm_cse (f32) -> (i64)
+define i64 @utest_f16i64_mm_cse(half %x) {
+; CHECK-LABEL: utest_f16i64_mm_cse:
+; CHECK: .functype utest_f16i64_mm_cse (f32) -> (i64)
; CHECK-NEXT: .local i32, i64
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: global.get __stack_pointer
@@ -1754,6 +1754,35 @@ entry:
ret i64 %conv6
}
+; i32 non saturate
+
+define i32 @ustest_f16i32_nsat(half %x) {
+; CHECK-LABEL: ustest_f16i32_nsat:
+; CHECK: .functype ustest_f16i32_nsat (f32) -> (i32)
+; CHECK-NEXT: .local i32
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: call __truncsfhf2
+; CHECK-NEXT: call __extendhfsf2
+; CHECK-NEXT: i32.trunc_sat_f32_s
+; CHECK-NEXT: local.tee 1
+; CHECK-NEXT: i32.const 31
+; CHECK-NEXT: i32.shr_s
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: i32.and
+; CHECK-NEXT: local.tee 1
+; CHECK-NEXT: i32.const 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: i32.const 0
+; CHECK-NEXT: i32.gt_s
+; CHECK-NEXT: i32.select
+; CHECK-NEXT: # fallthrough-return
+ %conv = fptosi half %x to i32
+ %spec.store.select = call i32 @llvm.smin.i32(i32 0, i32 %conv)
+ %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0)
+ ret i32 %spec.store.select7
+}
+
declare i32 @llvm.smin.i32(i32, i32)
declare i32 @llvm.smax.i32(i32, i32)
declare i32 @llvm.umin.i32(i32, i32)
diff --git a/llvm/test/CodeGen/WebAssembly/fpclamptosat_vec.ll b/llvm/test/CodeGen/WebAssembly/fpclamptosat_vec.ll
index 7190e16..52f57dc 100644
--- a/llvm/test/CodeGen/WebAssembly/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/WebAssembly/fpclamptosat_vec.ll
@@ -209,9 +209,9 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32(<4 x half> %x) {
-; CHECK-LABEL: utesth_f16i32:
-; CHECK: .functype utesth_f16i32 (f32, f32, f32, f32) -> (v128)
+define <4 x i32> @utest_f16i32(<4 x half> %x) {
+; CHECK-LABEL: utest_f16i32:
+; CHECK: .functype utest_f16i32 (f32, f32, f32, f32) -> (v128)
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 1
; CHECK-NEXT: call __truncsfhf2
@@ -513,9 +513,9 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16(<8 x half> %x) {
-; CHECK-LABEL: utesth_f16i16:
-; CHECK: .functype utesth_f16i16 (f32, f32, f32, f32, f32, f32, f32, f32) -> (v128)
+define <8 x i16> @utest_f16i16(<8 x half> %x) {
+; CHECK-LABEL: utest_f16i16:
+; CHECK: .functype utest_f16i16 (f32, f32, f32, f32, f32, f32, f32, f32) -> (v128)
; CHECK-NEXT: .local v128
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 5
@@ -1295,9 +1295,9 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64(<2 x half> %x) {
-; CHECK-LABEL: utesth_f16i64:
-; CHECK: .functype utesth_f16i64 (f32, f32) -> (v128)
+define <2 x i64> @utest_f16i64(<2 x half> %x) {
+; CHECK-LABEL: utest_f16i64:
+; CHECK: .functype utest_f16i64 (f32, f32) -> (v128)
; CHECK-NEXT: .local i32, i64, i64, i64, i64
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: global.get __stack_pointer
@@ -1649,9 +1649,9 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
-; CHECK-LABEL: utesth_f16i32_mm:
-; CHECK: .functype utesth_f16i32_mm (f32, f32, f32, f32) -> (v128)
+define <4 x i32> @utest_f16i32_mm(<4 x half> %x) {
+; CHECK-LABEL: utest_f16i32_mm:
+; CHECK: .functype utest_f16i32_mm (f32, f32, f32, f32) -> (v128)
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 1
; CHECK-NEXT: call __truncsfhf2
@@ -1938,9 +1938,9 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
-; CHECK-LABEL: utesth_f16i16_mm:
-; CHECK: .functype utesth_f16i16_mm (f32, f32, f32, f32, f32, f32, f32, f32) -> (v128)
+define <8 x i16> @utest_f16i16_mm(<8 x half> %x) {
+; CHECK-LABEL: utest_f16i16_mm:
+; CHECK: .functype utest_f16i16_mm (f32, f32, f32, f32, f32, f32, f32, f32) -> (v128)
; CHECK-NEXT: .local v128
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 5
@@ -2673,9 +2673,9 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
-; CHECK-LABEL: utesth_f16i64_mm:
-; CHECK: .functype utesth_f16i64_mm (f32, f32) -> (v128)
+define <2 x i64> @utest_f16i64_mm(<2 x half> %x) {
+; CHECK-LABEL: utest_f16i64_mm:
+; CHECK: .functype utest_f16i64_mm (f32, f32) -> (v128)
; CHECK-NEXT: .local i32, i64, i64, i64, i64
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: global.get __stack_pointer
@@ -2810,6 +2810,48 @@ entry:
ret <2 x i64> %conv6
}
+; i32 non saturate
+
+define <4 x i32> @ustest_f16i32_nsat(<4 x half> %x) {
+; CHECK-LABEL: ustest_f16i32_nsat:
+; CHECK: .functype ustest_f16i32_nsat (f32, f32, f32, f32) -> (v128)
+; CHECK-NEXT: .local v128
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: call __truncsfhf2
+; CHECK-NEXT: call __extendhfsf2
+; CHECK-NEXT: local.set 1
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: call __truncsfhf2
+; CHECK-NEXT: call __extendhfsf2
+; CHECK-NEXT: i32.trunc_sat_f32_s
+; CHECK-NEXT: i32x4.splat
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: i32.trunc_sat_f32_s
+; CHECK-NEXT: i32x4.replace_lane 1
+; CHECK-NEXT: local.get 2
+; CHECK-NEXT: call __truncsfhf2
+; CHECK-NEXT: call __extendhfsf2
+; CHECK-NEXT: i32.trunc_sat_f32_s
+; CHECK-NEXT: i32x4.replace_lane 2
+; CHECK-NEXT: local.get 3
+; CHECK-NEXT: call __truncsfhf2
+; CHECK-NEXT: call __extendhfsf2
+; CHECK-NEXT: i32.trunc_sat_f32_s
+; CHECK-NEXT: i32x4.replace_lane 3
+; CHECK-NEXT: v128.const 0, 0, 0, 0
+; CHECK-NEXT: local.tee 4
+; CHECK-NEXT: i32x4.min_s
+; CHECK-NEXT: local.get 4
+; CHECK-NEXT: i32x4.max_s
+; CHECK-NEXT: # fallthrough-return
+entry:
+ %conv = fptosi <4 x half> %x to <4 x i32>
+ %spec.store.select = call <4 x i32> @llvm.smin.v4i32(<4 x i32> zeroinitializer, <4 x i32> %conv)
+ %spec.store.select7 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %spec.store.select, <4 x i32> zeroinitializer)
+ ret <4 x i32> %spec.store.select7
+}
+
declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>)
diff --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
index dec829f..44cf4e8 100644
--- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
@@ -911,7 +911,7 @@ define void @vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2(ptr %in.
; SSE2-NEXT: paddb (%rsi), %xmm0
; SSE2-NEXT: paddb 16(%rsi), %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: paddb (%rdx), %xmm0
; SSE2-NEXT: movdqa %xmm0, (%rcx)
@@ -1898,7 +1898,7 @@ define void @vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4(ptr %in.
; SSE2-NEXT: paddb (%rsi), %xmm0
; SSE2-NEXT: paddb 32(%rsi), %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
@@ -4155,7 +4155,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
; SSE2-NEXT: paddb (%rsi), %xmm0
; SSE2-NEXT: paddb 48(%rsi), %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,0,0]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE2-NEXT: paddb (%rdx), %xmm2
diff --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
index 3d4cddb..89b5c33 100644
--- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -769,7 +769,7 @@ define void @vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2(ptr %in.
; SSE2-LABEL: vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,0,0]
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE2-NEXT: paddb (%rsi), %xmm1
; SSE2-NEXT: movdqa %xmm1, (%rdx)
@@ -1522,7 +1522,7 @@ define void @vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4(ptr %in.
; SSE2-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,0,0]
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3]
@@ -3335,7 +3335,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,0,0]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE2-NEXT: paddb (%rsi), %xmm2
diff --git a/llvm/test/CodeGen/X86/fpclamptosat.ll b/llvm/test/CodeGen/X86/fpclamptosat.ll
index 3f5ec7b..67483be 100644
--- a/llvm/test/CodeGen/X86/fpclamptosat.ll
+++ b/llvm/test/CodeGen/X86/fpclamptosat.ll
@@ -161,8 +161,8 @@ entry:
ret i32 %conv6
}
-define i32 @utesth_f16i32(half %x) nounwind {
-; CHECK-LABEL: utesth_f16i32:
+define i32 @utest_f16i32(half %x) nounwind {
+; CHECK-LABEL: utest_f16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: callq __extendhfsf2@PLT
@@ -360,8 +360,8 @@ entry:
ret i16 %conv6
}
-define i16 @utesth_f16i16(half %x) nounwind {
-; CHECK-LABEL: utesth_f16i16:
+define i16 @utest_f16i16(half %x) nounwind {
+; CHECK-LABEL: utest_f16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: callq __extendhfsf2@PLT
@@ -566,8 +566,8 @@ entry:
ret i64 %conv6
}
-define i64 @utesth_f16i64(half %x) nounwind {
-; CHECK-LABEL: utesth_f16i64:
+define i64 @utest_f16i64(half %x) nounwind {
+; CHECK-LABEL: utest_f16i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: callq __fixunshfti@PLT
@@ -762,8 +762,8 @@ entry:
ret i32 %conv6
}
-define i32 @utesth_f16i32_mm(half %x) nounwind {
-; CHECK-LABEL: utesth_f16i32_mm:
+define i32 @utest_f16i32_mm(half %x) nounwind {
+; CHECK-LABEL: utest_f16i32_mm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: callq __extendhfsf2@PLT
@@ -946,8 +946,8 @@ entry:
ret i16 %conv6
}
-define i16 @utesth_f16i16_mm(half %x) nounwind {
-; CHECK-LABEL: utesth_f16i16_mm:
+define i16 @utest_f16i16_mm(half %x) nounwind {
+; CHECK-LABEL: utest_f16i16_mm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: callq __extendhfsf2@PLT
@@ -1131,8 +1131,8 @@ entry:
ret i64 %conv6
}
-define i64 @utesth_f16i64_mm(half %x) nounwind {
-; CHECK-LABEL: utesth_f16i64_mm:
+define i64 @utest_f16i64_mm(half %x) nounwind {
+; CHECK-LABEL: utest_f16i64_mm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: callq __fixunshfti@PLT
@@ -1170,6 +1170,27 @@ entry:
ret i64 %conv6
}
+; i32 non saturate
+
+define i32 @ustest_f16i32_nsat(half %x) nounwind {
+; CHECK-LABEL: ustest_f16i32_nsat:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq __extendhfsf2@PLT
+; CHECK-NEXT: cvttss2si %xmm0, %ecx
+; CHECK-NEXT: movl %ecx, %eax
+; CHECK-NEXT: sarl $31, %eax
+; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: andl %ecx, %eax
+; CHECK-NEXT: cmovlel %edx, %eax
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: retq
+ %conv = fptosi half %x to i32
+ %spec.store.select = call i32 @llvm.smin.i32(i32 0, i32 %conv)
+ %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0)
+ ret i32 %spec.store.select7
+}
+
declare i32 @llvm.smin.i32(i32, i32)
declare i32 @llvm.smax.i32(i32, i32)
declare i32 @llvm.umin.i32(i32, i32)
diff --git a/llvm/test/CodeGen/X86/fpclamptosat_vec.ll b/llvm/test/CodeGen/X86/fpclamptosat_vec.ll
index 1a2cfd6..991ce33 100644
--- a/llvm/test/CodeGen/X86/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/X86/fpclamptosat_vec.ll
@@ -747,8 +747,8 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32(<4 x half> %x) nounwind {
-; SSE-LABEL: utesth_f16i32:
+define <4 x i32> @utest_f16i32(<4 x half> %x) nounwind {
+; SSE-LABEL: utest_f16i32:
; SSE: # %bb.0: # %entry
; SSE-NEXT: subq $72, %rsp
; SSE-NEXT: movaps %xmm0, %xmm1
@@ -835,7 +835,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) nounwind {
; SSE-NEXT: addq $72, %rsp
; SSE-NEXT: retq
;
-; AVX2-LABEL: utesth_f16i32:
+; AVX2-LABEL: utest_f16i32:
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm1
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm2
@@ -893,7 +893,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: utesth_f16i32:
+; AVX512-LABEL: utest_f16i32:
; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: vcvttps2uqq %ymm0, %zmm0
@@ -1338,8 +1338,8 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16(<8 x half> %x) nounwind {
-; SSE-LABEL: utesth_f16i16:
+define <8 x i16> @utest_f16i16(<8 x half> %x) nounwind {
+; SSE-LABEL: utest_f16i16:
; SSE: # %bb.0: # %entry
; SSE-NEXT: subq $72, %rsp
; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
@@ -1436,7 +1436,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) nounwind {
; SSE-NEXT: addq $72, %rsp
; SSE-NEXT: retq
;
-; AVX2-LABEL: utesth_f16i16:
+; AVX2-LABEL: utest_f16i16:
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vcvtph2ps %xmm0, %ymm0
; AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
@@ -1453,7 +1453,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: utesth_f16i16:
+; AVX512-LABEL: utest_f16i16:
; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
; AVX512-NEXT: vcvttps2udq %ymm0, %ymm0
@@ -2456,8 +2456,8 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64(<2 x half> %x) nounwind {
-; SSE-LABEL: utesth_f16i64:
+define <2 x i64> @utest_f16i64(<2 x half> %x) nounwind {
+; SSE-LABEL: utest_f16i64:
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %r14
; SSE-NEXT: pushq %rbx
@@ -2483,7 +2483,7 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) nounwind {
; SSE-NEXT: popq %r14
; SSE-NEXT: retq
;
-; AVX2-LABEL: utesth_f16i64:
+; AVX2-LABEL: utest_f16i64:
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: pushq %rbx
@@ -2508,7 +2508,7 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) nounwind {
; AVX2-NEXT: popq %r14
; AVX2-NEXT: retq
;
-; AVX512-LABEL: utesth_f16i64:
+; AVX512-LABEL: utest_f16i64:
; AVX512: # %bb.0: # %entry
; AVX512-NEXT: pushq %r14
; AVX512-NEXT: pushq %rbx
@@ -3359,8 +3359,8 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) nounwind {
-; SSE-LABEL: utesth_f16i32_mm:
+define <4 x i32> @utest_f16i32_mm(<4 x half> %x) nounwind {
+; SSE-LABEL: utest_f16i32_mm:
; SSE: # %bb.0: # %entry
; SSE-NEXT: subq $72, %rsp
; SSE-NEXT: movaps %xmm0, %xmm1
@@ -3447,7 +3447,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) nounwind {
; SSE-NEXT: addq $72, %rsp
; SSE-NEXT: retq
;
-; AVX2-LABEL: utesth_f16i32_mm:
+; AVX2-LABEL: utest_f16i32_mm:
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm1
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm2
@@ -3505,7 +3505,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: utesth_f16i32_mm:
+; AVX512-LABEL: utest_f16i32_mm:
; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: vcvttps2uqq %ymm0, %zmm0
@@ -3935,8 +3935,8 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) nounwind {
-; SSE-LABEL: utesth_f16i16_mm:
+define <8 x i16> @utest_f16i16_mm(<8 x half> %x) nounwind {
+; SSE-LABEL: utest_f16i16_mm:
; SSE: # %bb.0: # %entry
; SSE-NEXT: subq $72, %rsp
; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
@@ -4033,7 +4033,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) nounwind {
; SSE-NEXT: addq $72, %rsp
; SSE-NEXT: retq
;
-; AVX2-LABEL: utesth_f16i16_mm:
+; AVX2-LABEL: utest_f16i16_mm:
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vcvtph2ps %xmm0, %ymm0
; AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
@@ -4050,7 +4050,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: utesth_f16i16_mm:
+; AVX512-LABEL: utest_f16i16_mm:
; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
; AVX512-NEXT: vcvttps2udq %ymm0, %ymm0
@@ -4820,8 +4820,8 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) nounwind {
-; SSE-LABEL: utesth_f16i64_mm:
+define <2 x i64> @utest_f16i64_mm(<2 x half> %x) nounwind {
+; SSE-LABEL: utest_f16i64_mm:
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %r14
; SSE-NEXT: pushq %rbx
@@ -4847,7 +4847,7 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) nounwind {
; SSE-NEXT: popq %r14
; SSE-NEXT: retq
;
-; AVX2-LABEL: utesth_f16i64_mm:
+; AVX2-LABEL: utest_f16i64_mm:
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: pushq %rbx
@@ -4872,7 +4872,7 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) nounwind {
; AVX2-NEXT: popq %r14
; AVX2-NEXT: retq
;
-; AVX512-LABEL: utesth_f16i64_mm:
+; AVX512-LABEL: utest_f16i64_mm:
; AVX512: # %bb.0: # %entry
; AVX512-NEXT: pushq %r14
; AVX512-NEXT: pushq %rbx
@@ -4974,6 +4974,63 @@ entry:
ret <2 x i64> %conv6
}
+; i32 non saturate
+
+define <4 x i32> @ustest_f16i32_nsat(<4 x half> %x) nounwind {
+; SSE-LABEL: ustest_f16i32_nsat:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: subq $72, %rsp
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrld $16, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
+; SSE-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
+; SSE-NEXT: psrlq $48, %xmm0
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; SSE-NEXT: cvttps2dq %xmm0, %xmm0
+; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE-NEXT: cvttps2dq %xmm1, %xmm0
+; SSE-NEXT: punpcklqdq (%rsp), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE-NEXT: pand %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: pcmpgtd %xmm1, %xmm0
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: addq $72, %rsp
+; SSE-NEXT: retq
+;
+; AVX-LABEL: ustest_f16i32_nsat:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpminsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %conv = fptosi <4 x half> %x to <4 x i32>
+ %spec.store.select = call <4 x i32> @llvm.smin.v4i32(<4 x i32> zeroinitializer, <4 x i32> %conv)
+ %spec.store.select7 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %spec.store.select, <4 x i32> zeroinitializer)
+ ret <4 x i32> %spec.store.select7
+}
+
declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>)
diff --git a/llvm/test/CodeGen/X86/shuffle-of-splat-multiuses.ll b/llvm/test/CodeGen/X86/shuffle-of-splat-multiuses.ll
index ecd9435..1766b4d 100644
--- a/llvm/test/CodeGen/X86/shuffle-of-splat-multiuses.ll
+++ b/llvm/test/CodeGen/X86/shuffle-of-splat-multiuses.ll
@@ -58,7 +58,7 @@ define <8 x float> @foo8(<8 x float> %v, ptr%p) nounwind {
define <4 x i32> @undef_splatmask(<4 x i32> %v) nounwind {
; AVX2-LABEL: undef_splatmask:
; AVX2: # %bb.0:
-; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,2,2]
; AVX2-NEXT: retq
%res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 2, i32 undef>
%res1 = shufflevector <4 x i32> %res, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
@@ -68,7 +68,7 @@ define <4 x i32> @undef_splatmask(<4 x i32> %v) nounwind {
define <4 x i32> @undef_splatmask2(<4 x i32> %v) nounwind {
; AVX2-LABEL: undef_splatmask2:
; AVX2: # %bb.0:
-; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,2,2]
; AVX2-NEXT: retq
%res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 2, i32 undef>
%res1 = shufflevector <4 x i32> %res, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
@@ -78,7 +78,7 @@ define <4 x i32> @undef_splatmask2(<4 x i32> %v) nounwind {
define <4 x i32> @undef_splatmask3(<4 x i32> %v) nounwind {
; AVX2-LABEL: undef_splatmask3:
; AVX2: # %bb.0:
-; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,2,2]
; AVX2-NEXT: retq
%res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 2, i32 undef>
%res1 = shufflevector <4 x i32> %res, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 3>
@@ -88,7 +88,7 @@ define <4 x i32> @undef_splatmask3(<4 x i32> %v) nounwind {
define <4 x i32> @undef_splatmask4(<4 x i32> %v, ptr %p) nounwind {
; AVX2-LABEL: undef_splatmask4:
; AVX2: # %bb.0:
-; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,2,3,3]
+; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,2,2,2]
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
; AVX2-NEXT: vmovaps %xmm0, (%rdi)
; AVX2-NEXT: vmovaps %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/vec-strict-cmp-128.ll b/llvm/test/CodeGen/X86/vec-strict-cmp-128.ll
index 209d6a5..93a692c 100644
--- a/llvm/test/CodeGen/X86/vec-strict-cmp-128.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-cmp-128.ll
@@ -1911,13 +1911,13 @@ define <2 x i64> @test_v2f64_ogt_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1,
; SSE-32-NEXT: movl $0, %edx
; SSE-32-NEXT: cmoval %ecx, %edx
; SSE-32-NEXT: movd %edx, %xmm3
-; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; SSE-32-NEXT: ucomisd %xmm4, %xmm2
; SSE-32-NEXT: cmoval %ecx, %eax
; SSE-32-NEXT: movd %eax, %xmm2
-; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSE-32-NEXT: pand %xmm3, %xmm0
; SSE-32-NEXT: pandn %xmm1, %xmm3
@@ -2031,13 +2031,13 @@ define <2 x i64> @test_v2f64_oge_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1,
; SSE-32-NEXT: movl $0, %edx
; SSE-32-NEXT: cmovael %ecx, %edx
; SSE-32-NEXT: movd %edx, %xmm3
-; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; SSE-32-NEXT: ucomisd %xmm4, %xmm2
; SSE-32-NEXT: cmovael %ecx, %eax
; SSE-32-NEXT: movd %eax, %xmm2
-; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSE-32-NEXT: pand %xmm3, %xmm0
; SSE-32-NEXT: pandn %xmm1, %xmm3
@@ -2151,13 +2151,13 @@ define <2 x i64> @test_v2f64_olt_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1,
; SSE-32-NEXT: movl $0, %edx
; SSE-32-NEXT: cmoval %ecx, %edx
; SSE-32-NEXT: movd %edx, %xmm3
-; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
; SSE-32-NEXT: ucomisd %xmm2, %xmm4
; SSE-32-NEXT: cmoval %ecx, %eax
; SSE-32-NEXT: movd %eax, %xmm2
-; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSE-32-NEXT: pand %xmm3, %xmm0
; SSE-32-NEXT: pandn %xmm1, %xmm3
@@ -2269,13 +2269,13 @@ define <2 x i64> @test_v2f64_ole_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1,
; SSE-32-NEXT: movl $0, %edx
; SSE-32-NEXT: cmovael %ecx, %edx
; SSE-32-NEXT: movd %edx, %xmm3
-; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
; SSE-32-NEXT: ucomisd %xmm2, %xmm4
; SSE-32-NEXT: cmovael %ecx, %eax
; SSE-32-NEXT: movd %eax, %xmm2
-; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSE-32-NEXT: pand %xmm3, %xmm0
; SSE-32-NEXT: pandn %xmm1, %xmm3
@@ -2680,13 +2680,13 @@ define <2 x i64> @test_v2f64_ugt_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1,
; SSE-32-NEXT: movl $0, %edx
; SSE-32-NEXT: cmovbl %ecx, %edx
; SSE-32-NEXT: movd %edx, %xmm3
-; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
; SSE-32-NEXT: ucomisd %xmm2, %xmm4
; SSE-32-NEXT: cmovbl %ecx, %eax
; SSE-32-NEXT: movd %eax, %xmm2
-; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSE-32-NEXT: pand %xmm3, %xmm0
; SSE-32-NEXT: pandn %xmm1, %xmm3
@@ -2798,13 +2798,13 @@ define <2 x i64> @test_v2f64_uge_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1,
; SSE-32-NEXT: movl $0, %edx
; SSE-32-NEXT: cmovbel %ecx, %edx
; SSE-32-NEXT: movd %edx, %xmm3
-; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
; SSE-32-NEXT: ucomisd %xmm2, %xmm4
; SSE-32-NEXT: cmovbel %ecx, %eax
; SSE-32-NEXT: movd %eax, %xmm2
-; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSE-32-NEXT: pand %xmm3, %xmm0
; SSE-32-NEXT: pandn %xmm1, %xmm3
@@ -2916,13 +2916,13 @@ define <2 x i64> @test_v2f64_ult_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1,
; SSE-32-NEXT: movl $0, %edx
; SSE-32-NEXT: cmovbl %ecx, %edx
; SSE-32-NEXT: movd %edx, %xmm3
-; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; SSE-32-NEXT: ucomisd %xmm4, %xmm2
; SSE-32-NEXT: cmovbl %ecx, %eax
; SSE-32-NEXT: movd %eax, %xmm2
-; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSE-32-NEXT: pand %xmm3, %xmm0
; SSE-32-NEXT: pandn %xmm1, %xmm3
@@ -3036,13 +3036,13 @@ define <2 x i64> @test_v2f64_ule_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1,
; SSE-32-NEXT: movl $0, %edx
; SSE-32-NEXT: cmovbel %ecx, %edx
; SSE-32-NEXT: movd %edx, %xmm3
-; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; SSE-32-NEXT: ucomisd %xmm4, %xmm2
; SSE-32-NEXT: cmovbel %ecx, %eax
; SSE-32-NEXT: movd %eax, %xmm2
-; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSE-32-NEXT: pand %xmm3, %xmm0
; SSE-32-NEXT: pandn %xmm1, %xmm3
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
index 9ecc629..b378dce 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
@@ -162,7 +162,7 @@ define <2 x i32> @var_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind {
define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind {
; SSE2-LABEL: splatvar_funnnel_v2i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; SSE2-NEXT: pslld $23, %xmm1
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -182,7 +182,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; SSE41-LABEL: splatvar_funnnel_v2i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE41-NEXT: pslld $23, %xmm1
; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -200,7 +200,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; AVX1-LABEL: splatvar_funnnel_v2i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
@@ -277,7 +277,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; XOPAVX1-LABEL: splatvar_funnnel_v2i32:
; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; XOPAVX1-NEXT: vprotd %xmm1, %xmm0, %xmm0
; XOPAVX1-NEXT: retq
;
@@ -289,7 +289,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; X86-SSE2-LABEL: splatvar_funnnel_v2i32:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; X86-SSE2-NEXT: pslld $23, %xmm1
; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
diff --git a/llvm/test/CodeGen/X86/vector-fshl-sub128.ll b/llvm/test/CodeGen/X86/vector-fshl-sub128.ll
index 322ebe2..06ff7e7 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-sub128.ll
@@ -250,7 +250,7 @@ define <2 x i32> @var_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %amt)
define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %amt) nounwind {
; SSE2-LABEL: splatvar_funnnel_v2i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0]
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [31,31,31,31]
; SSE2-NEXT: movdqa %xmm3, %xmm5
; SSE2-NEXT: pandn %xmm4, %xmm5
@@ -286,7 +286,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %
;
; SSE41-LABEL: splatvar_funnnel_v2i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE41-NEXT: pmovsxbd {{.*#+}} xmm3 = [31,31,31,31]
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: pandn %xmm3, %xmm4
@@ -316,7 +316,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %
;
; AVX1-LABEL: splatvar_funnnel_v2i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [31,31,31,31]
; AVX1-NEXT: vpandn %xmm3, %xmm2, %xmm4
; AVX1-NEXT: vpsrldq {{.*#+}} xmm5 = xmm4[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -423,7 +423,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %
;
; XOPAVX1-LABEL: splatvar_funnnel_v2i32:
; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; XOPAVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [31,31,31,31]
; XOPAVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
; XOPAVX1-NEXT: vpshld %xmm4, %xmm0, %xmm0
@@ -450,7 +450,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %
;
; X86-SSE2-LABEL: splatvar_funnnel_v2i32:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0]
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [31,31,31,31]
; X86-SSE2-NEXT: movdqa %xmm3, %xmm5
; X86-SSE2-NEXT: pandn %xmm4, %xmm5
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
index 178c02f..ef5ffe4 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
@@ -172,7 +172,7 @@ define <2 x i32> @var_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind {
define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind {
; SSE2-LABEL: splatvar_funnnel_v2i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: psubd %xmm1, %xmm2
; SSE2-NEXT: pslld $23, %xmm2
@@ -194,7 +194,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; SSE41-LABEL: splatvar_funnnel_v2i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: psubd %xmm1, %xmm2
; SSE41-NEXT: pslld $23, %xmm2
@@ -214,7 +214,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; AVX1-LABEL: splatvar_funnnel_v2i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
@@ -293,7 +293,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; XOPAVX1-LABEL: splatvar_funnnel_v2i32:
; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
; XOPAVX1-NEXT: vprotd %xmm1, %xmm0, %xmm0
@@ -309,7 +309,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; X86-SSE2-LABEL: splatvar_funnnel_v2i32:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; X86-SSE2-NEXT: pxor %xmm2, %xmm2
; X86-SSE2-NEXT: psubd %xmm1, %xmm2
; X86-SSE2-NEXT: pslld $23, %xmm2
diff --git a/llvm/test/CodeGen/X86/vector-fshr-sub128.ll b/llvm/test/CodeGen/X86/vector-fshr-sub128.ll
index 372deb05..2d8670a 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-sub128.ll
@@ -251,7 +251,7 @@ define <2 x i32> @var_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %amt)
define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %amt) nounwind {
; SSE2-LABEL: splatvar_funnnel_v2i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0]
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [31,31,31,31]
; SSE2-NEXT: movdqa %xmm3, %xmm5
; SSE2-NEXT: pand %xmm4, %xmm5
@@ -287,7 +287,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %
;
; SSE41-LABEL: splatvar_funnnel_v2i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE41-NEXT: pmovsxbd {{.*#+}} xmm3 = [31,31,31,31]
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: pand %xmm3, %xmm4
@@ -317,7 +317,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %
;
; AVX1-LABEL: splatvar_funnnel_v2i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [31,31,31,31]
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
; AVX1-NEXT: vpsrldq {{.*#+}} xmm5 = xmm4[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -425,7 +425,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %
;
; XOPAVX1-LABEL: splatvar_funnnel_v2i32:
; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; XOPAVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [31,31,31,31]
; XOPAVX1-NEXT: vpandn %xmm3, %xmm2, %xmm4
; XOPAVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
@@ -452,7 +452,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %
;
; X86-SSE2-LABEL: splatvar_funnnel_v2i32:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0]
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [31,31,31,31]
; X86-SSE2-NEXT: movdqa %xmm3, %xmm5
; X86-SSE2-NEXT: pand %xmm4, %xmm5
diff --git a/llvm/test/CodeGen/X86/vector-sext.ll b/llvm/test/CodeGen/X86/vector-sext.ll
index f57efb4..1e11ea9 100644
--- a/llvm/test/CodeGen/X86/vector-sext.ll
+++ b/llvm/test/CodeGen/X86/vector-sext.ll
@@ -1409,11 +1409,11 @@ define <2 x i64> @load_sext_2i1_to_2i64(ptr%ptr) {
; X86-SSE2-NEXT: movzbl %al, %eax
; X86-SSE2-NEXT: negl %eax
; X86-SSE2-NEXT: movd %eax, %xmm0
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
; X86-SSE2-NEXT: andl $1, %ecx
; X86-SSE2-NEXT: negl %ecx
; X86-SSE2-NEXT: movd %ecx, %xmm0
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X86-SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X86-SSE2-NEXT: retl
;
diff --git a/llvm/test/CodeGen/X86/vector-zext.ll b/llvm/test/CodeGen/X86/vector-zext.ll
index bd1a48b..7b0f1c9 100644
--- a/llvm/test/CodeGen/X86/vector-zext.ll
+++ b/llvm/test/CodeGen/X86/vector-zext.ll
@@ -2555,7 +2555,7 @@ entry:
define <4 x i64> @splatshuf_zext_v4i64(<4 x i32> %x) {
; SSE2-LABEL: splatshuf_zext_v4i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: movdqa %xmm0, %xmm1
@@ -2563,7 +2563,7 @@ define <4 x i64> @splatshuf_zext_v4i64(<4 x i32> %x) {
;
; SSSE3-LABEL: splatshuf_zext_v4i64:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: movdqa %xmm0, %xmm1
@@ -2571,7 +2571,7 @@ define <4 x i64> @splatshuf_zext_v4i64(<4 x i32> %x) {
;
; SSE41-LABEL: splatshuf_zext_v4i64:
; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
index 19a31a6..31ed745 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
@@ -911,7 +911,7 @@ define void @vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2(ptr %in.
; SSE2-NEXT: paddb (%rsi), %xmm0
; SSE2-NEXT: paddb 16(%rsi), %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: paddb (%rdx), %xmm0
; SSE2-NEXT: movdqa %xmm0, (%rcx)
@@ -1898,7 +1898,7 @@ define void @vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4(ptr %in.
; SSE2-NEXT: paddb (%rsi), %xmm0
; SSE2-NEXT: paddb 32(%rsi), %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
@@ -4610,7 +4610,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
; SSE2-NEXT: paddb (%rsi), %xmm0
; SSE2-NEXT: paddb 48(%rsi), %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE2-NEXT: pxor %xmm1, %xmm1
@@ -6544,7 +6544,7 @@ define void @vec512_i32_widen_to_i64_factor2_broadcast_to_v8i64_factor8(ptr %in.
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: paddb (%rsi), %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: movdqa 16(%rdx), %xmm1
; SSE2-NEXT: paddb %xmm0, %xmm1
diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
index 239472c..5b4cdd2 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -769,7 +769,7 @@ define void @vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2(ptr %in.
; SSE2-LABEL: vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,0,0]
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE2-NEXT: paddb (%rsi), %xmm1
; SSE2-NEXT: movdqa %xmm1, (%rdx)
@@ -1522,7 +1522,7 @@ define void @vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4(ptr %in.
; SSE2-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,0,0]
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3]
@@ -3660,7 +3660,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
; SSE2-LABEL: vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,0,0]
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSE2-NEXT: pxor %xmm0, %xmm0
@@ -5250,7 +5250,7 @@ define void @vec512_i16_widen_to_i256_factor16_broadcast_to_v2i256_factor2(ptr %
define void @vec512_i32_widen_to_i64_factor2_broadcast_to_v8i64_factor8(ptr %in.elt.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
; SSE2-LABEL: vec512_i32_widen_to_i64_factor2_broadcast_to_v8i64_factor8:
; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[0,0,0,0]
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
diff --git a/llvm/test/ThinLTO/X86/memprof-dups.ll b/llvm/test/ThinLTO/X86/memprof-dups.ll
new file mode 100644
index 0000000..8accc83
--- /dev/null
+++ b/llvm/test/ThinLTO/X86/memprof-dups.ll
@@ -0,0 +1,138 @@
+;; Check that duplicate spurious duplicate (identical) clones are simply
+;; created as aliases to the first identical copy, rather than creating
+;; multiple clones that call the same callee clones or have the same
+;; allocation types. This currently happens in some cases due to additional
+;; cloning performed during function assignment.
+;;
+;; The ThinLTO combined summary was manually modified as described there
+;; to force multiple identical copies of various functions.
+
+;; -stats requires asserts
+; REQUIRES: asserts
+
+; RUN: rm -rf %t && split-file %s %t && cd %t
+; RUN: llvm-as src.ll -o src.o
+; RUN: llvm-as src.o.thinlto.ll -o src.o.thinlto.bc
+; RUN: opt -passes=memprof-context-disambiguation -stats \
+; RUN: -memprof-import-summary=src.o.thinlto.bc \
+; RUN: -pass-remarks=memprof-context-disambiguation \
+; RUN: src.o -S 2>&1 | FileCheck %s
+
+; CHECK: created clone bar.memprof.1
+;; Duplicates of bar are created as declarations since bar is available_externally,
+;; and the compiler does not well support available_externally aliases.
+; CHECK: created clone decl bar.memprof.2
+; CHECK: created clone decl bar.memprof.3
+; CHECK: created clone _Z3foov.memprof.1
+;; Duplicates of _Z3foov are created as aliases to the appropriate materialized
+;; clone of _Z3foov.
+; CHECK: created clone alias _Z3foov.memprof.2
+; CHECK: created clone alias _Z3foov.memprof.3
+
+;--- src.ll
+source_filename = "memprof-distrib-alias.ll"
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@_Z8fooAliasv = alias ptr (...), ptr @_Z3foov
+
+;; Original alias is unchanged.
+; CHECK: @_Z8fooAliasv = alias ptr (...), ptr @_Z3foov{{$}}
+;; We create an equivalent alias for the cloned def @_Z3foov.memprof.1.
+; CHECK: @_Z8fooAliasv.memprof.1 = alias ptr (...), ptr @_Z3foov.memprof.1
+
+;; We should also create aliases for the duplicate clones of _Z3foov
+;; (_Z3foov.memprof.2 and _Z3foov.memprof.3) to the versions they are duplicates
+;; of, and ditto for the associated @_Z8fooAliasv clones.
+;;
+;; _Z3foov.memprof.2 is a duplicate of original _Z3foov, and thus so is _Z8fooAliasv.memprof.2
+; CHECK: @_Z3foov.memprof.2 = alias ptr (), ptr @_Z3foov{{$}}
+; CHECK: @_Z8fooAliasv.memprof.2 = alias ptr (...), ptr @_Z3foov{{$}}
+;; _Z3foov.memprof.3 is a duplicate of _Z3foov.memprof.1, and thus so is _Z8fooAliasv.memprof.3
+; CHECK: @_Z3foov.memprof.3 = alias ptr (), ptr @_Z3foov.memprof.1
+; CHECK: @_Z8fooAliasv.memprof.3 = alias ptr (...), ptr @_Z3foov.memprof.1
+
+; CHECK-LABEL: define i32 @main()
+define i32 @main() #0 {
+entry:
+ ;; The first call to bar does not allocate cold memory. It should call
+ ;; the original function, which eventually calls the original allocation
+ ;; decorated with a "notcold" attribute.
+ ; CHECK: call {{.*}} @bar()
+ %call = call ptr @bar(), !callsite !0
+ ;; The second call to bar allocates cold memory. It should call the cloned
+ ;; function which eventually calls a cloned allocation decorated with a
+ ;; "cold" attribute.
+ ; CHECK: call {{.*}} @bar.memprof.1()
+ %call1 = call ptr @bar(), !callsite !1
+ ret i32 0
+}
+
+; CHECK-LABEL: define available_externally i32 @bar()
+define available_externally i32 @bar() #0 {
+entry:
+ ; CHECK: call {{.*}} @_Z8fooAliasv()
+ %call = call ptr @_Z8fooAliasv(), !callsite !8
+ ret i32 0
+}
+
+declare ptr @_Znam(i64)
+
+; CHECK-LABEL: define ptr @_Z3foov()
+define ptr @_Z3foov() #0 {
+entry:
+ ; CHECK: call {{.*}} @_Znam(i64 0) #[[NOTCOLD:[0-9]+]]
+ %call = call ptr @_Znam(i64 0), !memprof !2, !callsite !7
+ ret ptr null
+}
+
+; We create actual clone for bar.memprof.1.
+; CHECK: define available_externally i32 @bar.memprof.1()
+; CHECK: call {{.*}} @_Z3foov.memprof.1()
+
+;; bar.memprof.2 and bar.memprof.3 are duplicates (of original bar and
+;; bar.memprof.1, respectively). However, they are available externally,
+;; so rather than create an alias we simply create a declaration, since the
+;; compiler does not fully support available_externally aliases.
+; CHECK: declare i32 @bar.memprof.2
+; CHECK: declare i32 @bar.memprof.3
+
+; We create actual clone for foo.memprof.1.
+; CHECK: define {{.*}} @_Z3foov.memprof.1()
+; CHECK: call {{.*}} @_Znam(i64 0) #[[COLD:[0-9]+]]
+
+; CHECK: attributes #[[NOTCOLD]] = { "memprof"="notcold" }
+; CHECK: attributes #[[COLD]] = { "memprof"="cold" }
+
+; CHECK: 4 memprof-context-disambiguation - Number of function clone duplicates detected during ThinLTO backend
+; CHECK: 2 memprof-context-disambiguation - Number of function clones created during ThinLTO backend
+
+attributes #0 = { noinline optnone }
+
+!0 = !{i64 8632435727821051414}
+!1 = !{i64 -3421689549917153178}
+!2 = !{!3, !5}
+!3 = !{!4, !"notcold"}
+!4 = !{i64 9086428284934609951, i64 1234, i64 8632435727821051414}
+!5 = !{!6, !"cold"}
+!6 = !{i64 9086428284934609951, i64 1234, i64 -3421689549917153178}
+!7 = !{i64 9086428284934609951}
+!8 = !{i64 1234}
+
+;--- src.o.thinlto.ll
+; ModuleID = 'src.o.thinlto.ll'
+source_filename = "src.o.thinlto.bc"
+
+^0 = module: (path: "src.o", hash: (1720506022, 1575514144, 2506794664, 3599359797, 3160884478))
+^1 = gv: (guid: 6583049656999245004, summaries: (alias: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 1, dsoLocal: 1, canAutoHide: 0, importType: definition), aliasee: ^2)))
+;; Summary for _Z3foov, where the allocs part has been manually modified to add
+;; two additional clones that are the same as the prior versions:
+;; ... allocs: ((versions: (notcold, cold, notcold, cold), ...
+^2 = gv: (guid: 9191153033785521275, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 1, dsoLocal: 1, canAutoHide: 0, importType: definition), insts: 2, funcFlags: (readNone: 0, readOnly: 0, noRecurse: 0, returnDoesNotAlias: 0, noInline: 1, alwaysInline: 0, noUnwind: 0, mayThrow: 0, hasUnknownCall: 0, mustBeUnreachable: 0), allocs: ((versions: (notcold, cold, notcold, cold), memProf: ((type: notcold, stackIds: (1234, 8632435727821051414)), (type: cold, stackIds: (1234, 15025054523792398438))))))))
+^3 = gv: (guid: 15822663052811949562, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 1, dsoLocal: 1, canAutoHide: 0, importType: definition), insts: 3, funcFlags: (readNone: 0, readOnly: 0, noRecurse: 0, returnDoesNotAlias: 0, noInline: 1, alwaysInline: 0, noUnwind: 0, mayThrow: 0, hasUnknownCall: 0, mustBeUnreachable: 0), calls: ((callee: ^4)), callsites: ((callee: ^4, clones: (0), stackIds: (8632435727821051414)), (callee: ^4, clones: (1), stackIds: (15025054523792398438))))))
+;; Summary for bar, where the callsites part has been manually modified to add
+;; two additional clones that are the same as the prior clones:
+;; ... callsites: ((callee: ^1, clones: (0, 1, 0, 1), ...
+^4 = gv: (guid: 16434608426314478903, summaries: (function: (module: ^0, flags: (linkage: available_externally, visibility: default, notEligibleToImport: 0, live: 1, dsoLocal: 1, canAutoHide: 0, importType: definition), insts: 2, funcFlags: (readNone: 0, readOnly: 0, noRecurse: 0, returnDoesNotAlias: 0, noInline: 1, alwaysInline: 0, noUnwind: 0, mayThrow: 0, hasUnknownCall: 0, mustBeUnreachable: 0), calls: ((callee: ^1)), callsites: ((callee: ^1, clones: (0, 1, 0, 1), stackIds: (1234))))))
+^6 = flags: 353
+^7 = blockcount: 0
diff --git a/llvm/test/ThinLTO/X86/memprof_imported_internal.ll b/llvm/test/ThinLTO/X86/memprof_imported_internal.ll
index a6e254c..09784f8 100644
--- a/llvm/test/ThinLTO/X86/memprof_imported_internal.ll
+++ b/llvm/test/ThinLTO/X86/memprof_imported_internal.ll
@@ -63,14 +63,14 @@
; CHECK: tail call void @_ZL9internal1v.llvm.3267420853450984672()
; CHECK: tail call void @_ZL9internal2v.llvm.3267420853450984672.memprof.1()
; CHECK-LABEL: declare void @_ZL9internal2v.llvm.3267420853450984672.memprof.1()
-;; We should have 2 clones of src2.cc's internal1 function, calling a single
-;; clone of external2.
+;; We should have one clone of src2.cc's internal1 function, calling a single
+;; clone of external2, and a second clone that was detected to be a duplicate
+;; of the first that becomes a declaration (since this is available_externally -
+;; in the module with the prevailing copy it would be an alias to clone 1).
; CHECK-LABEL: define available_externally void @_ZL9internal1v.llvm.3267420853450984672.memprof.1()
; CHECK: tail call void @_Z9external2v.memprof.1()
; CHECK: tail call void @_Z9external2v.memprof.1()
-; CHECK-LABEL: define available_externally void @_ZL9internal1v.llvm.3267420853450984672.memprof.2()
-; CHECK: tail call void @_Z9external2v.memprof.1()
-; CHECK: tail call void @_Z9external2v.memprof.1()
+; CHECK: declare void @_ZL9internal1v.llvm.3267420853450984672.memprof.2()
; CHECK-NOT: memprof
;--- src1.ll
diff --git a/llvm/test/Transforms/InstCombine/masked_intrinsics.ll b/llvm/test/Transforms/InstCombine/masked_intrinsics.ll
index 8f76834..67ab167 100644
--- a/llvm/test/Transforms/InstCombine/masked_intrinsics.ll
+++ b/llvm/test/Transforms/InstCombine/masked_intrinsics.ll
@@ -16,6 +16,14 @@ define <2 x double> @load_zeromask(ptr %ptr, <2 x double> %passthru) {
ret <2 x double> %res
}
+define <2 x double> @load_zero_withpoison_mask(ptr %ptr, <2 x double> %passthru) {
+; CHECK-LABEL: @load_zero_withpoison_mask(
+; CHECK-NEXT: ret <2 x double> [[PASSTHRU:%.*]]
+;
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 1, <2 x i1> <i1 0, i1 poison>, <2 x double> %passthru)
+ ret <2 x double> %res
+}
+
define <2 x double> @load_onemask(ptr %ptr, <2 x double> %passthru) {
; CHECK-LABEL: @load_onemask(
; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <2 x double>, ptr [[PTR:%.*]], align 2
@@ -150,6 +158,14 @@ define void @store_zeromask(ptr %ptr, <2 x double> %val) {
ret void
}
+define void @store_poisonmask(ptr %ptr, <2 x double> %val) {
+; CHECK-LABEL: @store_poisonmask(
+; CHECK-NEXT: ret void
+;
+ call void @llvm.masked.store.v2f64.p0(<2 x double> %val, ptr %ptr, i32 4, <2 x i1> splat(i1 poison))
+ ret void
+}
+
define void @store_onemask(ptr %ptr, <2 x double> %val) {
; CHECK-LABEL: @store_onemask(
; CHECK-NEXT: store <2 x double> [[VAL:%.*]], ptr [[PTR:%.*]], align 4
@@ -159,6 +175,15 @@ define void @store_onemask(ptr %ptr, <2 x double> %val) {
ret void
}
+define void @store_one_withpoison_mask(ptr %ptr, <2 x double> %val) {
+; CHECK-LABEL: @store_one_withpoison_mask(
+; CHECK-NEXT: store <2 x double> [[VAL:%.*]], ptr [[PTR:%.*]], align 4
+; CHECK-NEXT: ret void
+;
+ call void @llvm.masked.store.v2f64.p0(<2 x double> %val, ptr %ptr, i32 4, <2 x i1> <i1 1, i1 poison>)
+ ret void
+}
+
define void @store_demandedelts(ptr %ptr, double %val) {
; CHECK-LABEL: @store_demandedelts(
; CHECK-NEXT: [[VALVEC1:%.*]] = insertelement <2 x double> poison, double [[VAL:%.*]], i64 0
@@ -189,6 +214,13 @@ define <2 x double> @gather_zeromask(<2 x ptr> %ptrs, <2 x double> %passthru) {
ret <2 x double> %res
}
+define <2 x double> @gather_zero_withpoison_mask(<2 x ptr> %ptrs, <2 x double> %passthru) {
+; CHECK-LABEL: @gather_zero_withpoison_mask(
+; CHECK-NEXT: ret <2 x double> [[PASSTHRU:%.*]]
+;
+ %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> %ptrs, i32 4, <2 x i1> <i1 0, i1 poison>, <2 x double> %passthru)
+ ret <2 x double> %res
+}
define <2 x double> @gather_onemask(<2 x ptr> %ptrs, <2 x double> %passthru) {
; CHECK-LABEL: @gather_onemask(
@@ -199,6 +231,15 @@ define <2 x double> @gather_onemask(<2 x ptr> %ptrs, <2 x double> %passthru) {
ret <2 x double> %res
}
+define <2 x double> @gather_one_withpoisonmask(<2 x ptr> %ptrs, <2 x double> %passthru) {
+; CHECK-LABEL: @gather_one_withpoisonmask(
+; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> [[PTRS:%.*]], i32 4, <2 x i1> <i1 true, i1 poison>, <2 x double> [[PASSTHRU:%.*]])
+; CHECK-NEXT: ret <2 x double> [[RES]]
+;
+ %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> %ptrs, i32 4, <2 x i1> <i1 true, i1 poison>, <2 x double> %passthru)
+ ret <2 x double> %res
+}
+
define <4 x double> @gather_lane2(ptr %base, double %pt) {
; CHECK-LABEL: @gather_lane2(
; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[BASE:%.*]], <4 x i64> <i64 poison, i64 poison, i64 2, i64 poison>
@@ -257,6 +298,23 @@ define void @scatter_zeromask(<2 x ptr> %ptrs, <2 x double> %val) {
ret void
}
+define void @scatter_zero_withpoison_mask(<2 x ptr> %ptrs, <2 x double> %val) {
+; CHECK-LABEL: @scatter_zero_withpoison_mask(
+; CHECK-NEXT: ret void
+;
+ call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> %val, <2 x ptr> %ptrs, i32 8, <2 x i1> <i1 0, i1 poison>)
+ ret void
+}
+
+define void @scatter_one_withpoison_mask(<2 x ptr> %ptrs, <2 x double> %val) {
+; CHECK-LABEL: @scatter_one_withpoison_mask(
+; CHECK-NEXT: call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> [[VAL:%.*]], <2 x ptr> [[PTRS:%.*]], i32 8, <2 x i1> <i1 true, i1 poison>)
+; CHECK-NEXT: ret void
+;
+ call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> %val, <2 x ptr> %ptrs, i32 8, <2 x i1> <i1 1, i1 poison>)
+ ret void
+}
+
define void @scatter_demandedelts(ptr %ptr, double %val) {
; CHECK-LABEL: @scatter_demandedelts(
; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[PTR:%.*]], <2 x i64> <i64 0, i64 poison>
diff --git a/llvm/test/Transforms/InstCombine/pr83947.ll b/llvm/test/Transforms/InstCombine/pr83947.ll
index 1906502..679230a4 100644
--- a/llvm/test/Transforms/InstCombine/pr83947.ll
+++ b/llvm/test/Transforms/InstCombine/pr83947.ll
@@ -24,7 +24,6 @@ define void @masked_scatter2() {
define void @masked_scatter3() {
; CHECK-LABEL: define void @masked_scatter3() {
-; CHECK-NEXT: store i32 0, ptr @c, align 4
; CHECK-NEXT: ret void
;
call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> zeroinitializer, <2 x ptr> splat (ptr @c), i32 4, <2 x i1> undef)
@@ -50,7 +49,6 @@ define void @masked_scatter5() {
define void @masked_scatter6() {
; CHECK-LABEL: define void @masked_scatter6() {
-; CHECK-NEXT: store i32 0, ptr @c, align 4
; CHECK-NEXT: ret void
;
call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> zeroinitializer, <2 x ptr> splat (ptr @c), i32 4, <2 x i1> <i1 undef, i1 false>)
diff --git a/llvm/test/Transforms/InstCombine/select-and-cmp.ll b/llvm/test/Transforms/InstCombine/select-and-cmp.ll
index 50e1493..26c04ad 100644
--- a/llvm/test/Transforms/InstCombine/select-and-cmp.ll
+++ b/llvm/test/Transforms/InstCombine/select-and-cmp.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
define i32 @select_and_icmp(i32 %x, i32 %y, i32 %z) {
@@ -114,34 +114,34 @@ define i32 @select_and_icmp_inv(i32 %x, i32 %y, i32 %z) {
; Below used to be negative tests in InstSimplify, but are no more negative cases here
-define i32 @select_and_icmp_pred_bad_1(i32 %x, i32 %y, i32 %z) {
+define i32 @select_and_icmp_pred_bad_1(i32 %x, i32 %y, i32 %z) !prof !0 {
; CHECK-LABEL: @select_and_icmp_pred_bad_1(
-; CHECK-NEXT: ret i32 [[X]]
+; CHECK-NEXT: ret i32 [[X:%.*]]
;
%A = icmp eq i32 %x, %z
%B = icmp ne i32 %y, %z
%C = and i1 %A, %B
- %D = select i1 %C, i32 %z, i32 %x
+ %D = select i1 %C, i32 %z, i32 %x, !prof !1
ret i32 %D
}
-define i32 @select_and_icmp_pred_bad_2(i32 %x, i32 %y, i32 %z) {
+define i32 @select_and_icmp_pred_bad_2(i32 %x, i32 %y, i32 %z) !prof !0 {
; CHECK-LABEL: @select_and_icmp_pred_bad_2(
; CHECK-NEXT: [[B:%.*]] = icmp eq i32 [[Y:%.*]], [[Z:%.*]]
-; CHECK-NEXT: [[D:%.*]] = select i1 [[B]], i32 [[Z]], i32 [[X]]
+; CHECK-NEXT: [[D:%.*]] = select i1 [[B]], i32 [[Z]], i32 [[X:%.*]], !prof [[PROF1:![0-9]+]]
; CHECK-NEXT: ret i32 [[D]]
;
%A = icmp ne i32 %x, %z
%B = icmp eq i32 %y, %z
%C = and i1 %A, %B
- %D = select i1 %C, i32 %z, i32 %x
+ %D = select i1 %C, i32 %z, i32 %x, !prof !1
ret i32 %D
}
define i32 @select_and_icmp_pred_bad_3(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @select_and_icmp_pred_bad_3(
-; CHECK-NEXT: [[B_NOT:%.*]] = icmp eq i32 [[Y:%.*]], [[Z]]
-; CHECK-NEXT: [[D:%.*]] = select i1 [[B_NOT]], i32 [[X]], i32 [[Z]]
+; CHECK-NEXT: [[B_NOT:%.*]] = icmp eq i32 [[Y:%.*]], [[Z:%.*]]
+; CHECK-NEXT: [[D:%.*]] = select i1 [[B_NOT]], i32 [[X:%.*]], i32 [[Z]]
; CHECK-NEXT: ret i32 [[D]]
;
%A = icmp ne i32 %x, %z
@@ -153,8 +153,8 @@ define i32 @select_and_icmp_pred_bad_3(i32 %x, i32 %y, i32 %z) {
define i32 @select_and_icmp_pred_bad_4(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @select_and_icmp_pred_bad_4(
-; CHECK-NEXT: [[B:%.*]] = icmp eq i32 [[Y:%.*]], [[Z]]
-; CHECK-NEXT: [[D:%.*]] = select i1 [[B]], i32 [[Z]], i32 [[X]]
+; CHECK-NEXT: [[B:%.*]] = icmp eq i32 [[Y:%.*]], [[Z:%.*]]
+; CHECK-NEXT: [[D:%.*]] = select i1 [[B]], i32 [[Z]], i32 [[X:%.*]]
; CHECK-NEXT: ret i32 [[D]]
;
%A = icmp eq i32 %x, %z
@@ -166,7 +166,7 @@ define i32 @select_and_icmp_pred_bad_4(i32 %x, i32 %y, i32 %z) {
define i32 @select_and_icmp_alt_bad_1(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @select_and_icmp_alt_bad_1(
-; CHECK-NEXT: ret i32 [[Z]]
+; CHECK-NEXT: ret i32 [[Z:%.*]]
;
%A = icmp eq i32 %x, %z
%B = icmp ne i32 %y, %z
@@ -177,8 +177,8 @@ define i32 @select_and_icmp_alt_bad_1(i32 %x, i32 %y, i32 %z) {
define i32 @select_and_icmp_alt_bad_2(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @select_and_icmp_alt_bad_2(
-; CHECK-NEXT: [[B:%.*]] = icmp eq i32 [[Y:%.*]], [[Z]]
-; CHECK-NEXT: [[D:%.*]] = select i1 [[B]], i32 [[X]], i32 [[Z]]
+; CHECK-NEXT: [[B:%.*]] = icmp eq i32 [[Y:%.*]], [[Z:%.*]]
+; CHECK-NEXT: [[D:%.*]] = select i1 [[B]], i32 [[X:%.*]], i32 [[Z]]
; CHECK-NEXT: ret i32 [[D]]
;
%A = icmp ne i32 %x, %z
@@ -191,8 +191,8 @@ define i32 @select_and_icmp_alt_bad_2(i32 %x, i32 %y, i32 %z) {
define i32 @select_and_icmp_alt_bad_3(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @select_and_icmp_alt_bad_3(
-; CHECK-NEXT: [[B_NOT:%.*]] = icmp eq i32 [[Y:%.*]], [[Z]]
-; CHECK-NEXT: [[D:%.*]] = select i1 [[B_NOT]], i32 [[Z]], i32 [[X]]
+; CHECK-NEXT: [[B_NOT:%.*]] = icmp eq i32 [[Y:%.*]], [[Z:%.*]]
+; CHECK-NEXT: [[D:%.*]] = select i1 [[B_NOT]], i32 [[Z]], i32 [[X:%.*]]
; CHECK-NEXT: ret i32 [[D]]
;
%A = icmp ne i32 %x, %z
@@ -204,8 +204,8 @@ define i32 @select_and_icmp_alt_bad_3(i32 %x, i32 %y, i32 %z) {
define i32 @select_and_icmp_alt_bad_4(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @select_and_icmp_alt_bad_4(
-; CHECK-NEXT: [[B:%.*]] = icmp eq i32 [[Y:%.*]], [[Z]]
-; CHECK-NEXT: [[D:%.*]] = select i1 [[B]], i32 [[X]], i32 [[Z]]
+; CHECK-NEXT: [[B:%.*]] = icmp eq i32 [[Y:%.*]], [[Z:%.*]]
+; CHECK-NEXT: [[D:%.*]] = select i1 [[B]], i32 [[X:%.*]], i32 [[Z]]
; CHECK-NEXT: ret i32 [[D]]
;
%A = icmp eq i32 %x, %z
@@ -322,3 +322,11 @@ define i32 @select_and_icmp_alt_bad_false_val(i32 %x, i32 %y, i32 %z, i32 %k) {
%D = select i1 %C, i32 %x, i32 %k
ret i32 %D
}
+
+!0 = !{!"function_entry_count", i64 1000}
+!1 = !{!"branch_weights", i32 2, i32 3}
+
+;.
+; CHECK: [[META0:![0-9]+]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 2, i32 3}
+;.
diff --git a/llvm/test/Transforms/InstCombine/select-or-cmp.ll b/llvm/test/Transforms/InstCombine/select-or-cmp.ll
index 72a3747..82b069b 100644
--- a/llvm/test/Transforms/InstCombine/select-or-cmp.ll
+++ b/llvm/test/Transforms/InstCombine/select-or-cmp.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
define i32 @select_or_icmp(i32 %x, i32 %y, i32 %z) {
@@ -114,47 +114,47 @@ define i32 @select_or_icmp_inv(i32 %x, i32 %y, i32 %z) {
; Below used to be negative tests in InstSimplify, but are no more negative cases here
-define i32 @select_and_icmp_pred_bad_1(i32 %x, i32 %y, i32 %z) {
+define i32 @select_and_icmp_pred_bad_1(i32 %x, i32 %y, i32 %z) !prof !0 {
; CHECK-LABEL: @select_and_icmp_pred_bad_1(
-; CHECK-NEXT: [[B_NOT:%.*]] = icmp eq i32 [[Y:%.*]], [[Z]]
-; CHECK-NEXT: [[D:%.*]] = select i1 [[B_NOT]], i32 [[X]], i32 [[Z]]
+; CHECK-NEXT: [[B_NOT:%.*]] = icmp eq i32 [[Y:%.*]], [[Z:%.*]]
+; CHECK-NEXT: [[D:%.*]] = select i1 [[B_NOT]], i32 [[X:%.*]], i32 [[Z]], !prof [[PROF1:![0-9]+]]
; CHECK-NEXT: ret i32 [[D]]
;
%A = icmp eq i32 %x, %z
%B = icmp ne i32 %y, %z
%C = or i1 %A, %B
- %D = select i1 %C, i32 %z, i32 %x
+ %D = select i1 %C, i32 %z, i32 %x, !prof !1
ret i32 %D
}
-define i32 @select_and_icmp_pred_bad_2(i32 %x, i32 %y, i32 %z) {
+define i32 @select_and_icmp_pred_bad_2(i32 %x, i32 %y, i32 %z) !prof !0 {
; CHECK-LABEL: @select_and_icmp_pred_bad_2(
-; CHECK-NEXT: ret i32 [[Z]]
+; CHECK-NEXT: ret i32 [[Z:%.*]]
;
%A = icmp ne i32 %x, %z
%B = icmp eq i32 %y, %z
%C = or i1 %A, %B
- %D = select i1 %C, i32 %z, i32 %x
+ %D = select i1 %C, i32 %z, i32 %x, !prof !1
ret i32 %D
}
-define i32 @select_and_icmp_pred_bad_3(i32 %x, i32 %y, i32 %z) {
+define i32 @select_and_icmp_pred_bad_3(i32 %x, i32 %y, i32 %z) !prof !0 {
; CHECK-LABEL: @select_and_icmp_pred_bad_3(
-; CHECK-NEXT: [[B:%.*]] = icmp eq i32 [[Y:%.*]], [[Z]]
-; CHECK-NEXT: [[D:%.*]] = select i1 [[B]], i32 [[Z]], i32 [[X]]
+; CHECK-NEXT: [[B:%.*]] = icmp eq i32 [[Y:%.*]], [[Z:%.*]]
+; CHECK-NEXT: [[D:%.*]] = select i1 [[B]], i32 [[Z]], i32 [[X:%.*]], !prof [[PROF2:![0-9]+]]
; CHECK-NEXT: ret i32 [[D]]
;
%A = icmp eq i32 %x, %z
%B = icmp eq i32 %y, %z
%C = or i1 %A, %B
- %D = select i1 %C, i32 %z, i32 %x
+ %D = select i1 %C, i32 %z, i32 %x, !prof !1
ret i32 %D
}
define i32 @select_and_icmp_pred_bad_4(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @select_and_icmp_pred_bad_4(
-; CHECK-NEXT: [[B_NOT:%.*]] = icmp eq i32 [[Y:%.*]], [[Z]]
-; CHECK-NEXT: [[D:%.*]] = select i1 [[B_NOT]], i32 [[X]], i32 [[Z]]
+; CHECK-NEXT: [[B_NOT:%.*]] = icmp eq i32 [[Y:%.*]], [[Z:%.*]]
+; CHECK-NEXT: [[D:%.*]] = select i1 [[B_NOT]], i32 [[X:%.*]], i32 [[Z]]
; CHECK-NEXT: ret i32 [[D]]
;
%A = icmp ne i32 %x, %z
@@ -166,8 +166,8 @@ define i32 @select_and_icmp_pred_bad_4(i32 %x, i32 %y, i32 %z) {
define i32 @select_or_icmp_alt_bad_1(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @select_or_icmp_alt_bad_1(
-; CHECK-NEXT: [[B_NOT:%.*]] = icmp eq i32 [[Y:%.*]], [[Z]]
-; CHECK-NEXT: [[D:%.*]] = select i1 [[B_NOT]], i32 [[Z]], i32 [[X]]
+; CHECK-NEXT: [[B_NOT:%.*]] = icmp eq i32 [[Y:%.*]], [[Z:%.*]]
+; CHECK-NEXT: [[D:%.*]] = select i1 [[B_NOT]], i32 [[Z]], i32 [[X:%.*]]
; CHECK-NEXT: ret i32 [[D]]
;
%A = icmp eq i32 %x, %z
@@ -179,7 +179,7 @@ define i32 @select_or_icmp_alt_bad_1(i32 %x, i32 %y, i32 %z) {
define i32 @select_or_icmp_alt_bad_2(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @select_or_icmp_alt_bad_2(
-; CHECK-NEXT: ret i32 [[X]]
+; CHECK-NEXT: ret i32 [[X:%.*]]
;
%A = icmp ne i32 %x, %z
%B = icmp eq i32 %y, %z
@@ -190,8 +190,8 @@ define i32 @select_or_icmp_alt_bad_2(i32 %x, i32 %y, i32 %z) {
define i32 @select_or_icmp_alt_bad_3(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @select_or_icmp_alt_bad_3(
-; CHECK-NEXT: [[B:%.*]] = icmp eq i32 [[Y:%.*]], [[Z]]
-; CHECK-NEXT: [[D:%.*]] = select i1 [[B]], i32 [[X]], i32 [[Z]]
+; CHECK-NEXT: [[B:%.*]] = icmp eq i32 [[Y:%.*]], [[Z:%.*]]
+; CHECK-NEXT: [[D:%.*]] = select i1 [[B]], i32 [[X:%.*]], i32 [[Z]]
; CHECK-NEXT: ret i32 [[D]]
;
%A = icmp eq i32 %x, %z
@@ -203,8 +203,8 @@ define i32 @select_or_icmp_alt_bad_3(i32 %x, i32 %y, i32 %z) {
define i32 @select_or_icmp_alt_bad_4(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @select_or_icmp_alt_bad_4(
-; CHECK-NEXT: [[B_NOT:%.*]] = icmp eq i32 [[Y:%.*]], [[Z]]
-; CHECK-NEXT: [[D:%.*]] = select i1 [[B_NOT]], i32 [[Z]], i32 [[X]]
+; CHECK-NEXT: [[B_NOT:%.*]] = icmp eq i32 [[Y:%.*]], [[Z:%.*]]
+; CHECK-NEXT: [[D:%.*]] = select i1 [[B_NOT]], i32 [[Z]], i32 [[X:%.*]]
; CHECK-NEXT: ret i32 [[D]]
;
%A = icmp ne i32 %x, %z
@@ -321,3 +321,11 @@ define i32 @select_or_icmp_alt_bad_false_val(i32 %x, i32 %y, i32 %z, i32 %k) {
%D = select i1 %C, i32 %x, i32 %k
ret i32 %D
}
+
+!0 = !{!"function_entry_count", i64 1000}
+!1 = !{!"branch_weights", i32 2, i32 3}
+;.
+; CHECK: [[META0:![0-9]+]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 3, i32 2}
+; CHECK: [[PROF2]] = !{!"branch_weights", i32 2, i32 3}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll b/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll
index 8784873..f5329cf 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll
@@ -454,6 +454,132 @@ exit:
ret void
}
+declare i1 @cond()
+
+define double @test_load_used_by_other_load_scev(ptr %ptr.a, ptr %ptr.b, ptr %ptr.c) {
+; I64-LABEL: define double @test_load_used_by_other_load_scev(
+; I64-SAME: ptr [[PTR_A:%.*]], ptr [[PTR_B:%.*]], ptr [[PTR_C:%.*]]) {
+; I64-NEXT: [[ENTRY:.*]]:
+; I64-NEXT: br label %[[OUTER_LOOP:.*]]
+; I64: [[OUTER_LOOP_LOOPEXIT:.*]]:
+; I64-NEXT: br label %[[OUTER_LOOP]]
+; I64: [[OUTER_LOOP]]:
+; I64-NEXT: [[ACCUM:%.*]] = phi double [ 0.000000e+00, %[[ENTRY]] ], [ [[TMP29:%.*]], %[[OUTER_LOOP_LOOPEXIT]] ]
+; I64-NEXT: [[COND:%.*]] = call i1 @cond()
+; I64-NEXT: br i1 [[COND]], label %[[INNER_LOOP_PREHEADER:.*]], label %[[EXIT:.*]]
+; I64: [[INNER_LOOP_PREHEADER]]:
+; I64-NEXT: br label %[[VECTOR_PH:.*]]
+; I64: [[VECTOR_PH]]:
+; I64-NEXT: br label %[[VECTOR_BODY:.*]]
+; I64: [[VECTOR_BODY]]:
+; I64-NEXT: [[TMP0:%.*]] = add i64 0, 1
+; I64-NEXT: [[TMP1:%.*]] = add i64 1, 1
+; I64-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR_C]], i64 [[TMP0]]
+; I64-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR_C]], i64 [[TMP1]]
+; I64-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[PTR_A]], i64 [[TMP0]]
+; I64-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[PTR_A]], i64 [[TMP1]]
+; I64-NEXT: [[TMP6:%.*]] = load i64, ptr [[TMP4]], align 8
+; I64-NEXT: [[TMP7:%.*]] = load i64, ptr [[TMP5]], align 8
+; I64-NEXT: [[TMP8:%.*]] = getelementptr double, ptr [[PTR_B]], i64 [[TMP6]]
+; I64-NEXT: [[TMP9:%.*]] = getelementptr double, ptr [[PTR_B]], i64 [[TMP7]]
+; I64-NEXT: [[TMP10:%.*]] = load double, ptr [[PTR_A]], align 8
+; I64-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x double> poison, double [[TMP10]], i64 0
+; I64-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT]], <2 x double> poison, <2 x i32> zeroinitializer
+; I64-NEXT: [[TMP11:%.*]] = fadd <2 x double> [[BROADCAST_SPLAT]], zeroinitializer
+; I64-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[TMP2]], i64 8
+; I64-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP3]], i64 8
+; I64-NEXT: [[TMP14:%.*]] = load double, ptr [[TMP12]], align 8
+; I64-NEXT: [[TMP15:%.*]] = load double, ptr [[TMP13]], align 8
+; I64-NEXT: [[TMP16:%.*]] = insertelement <2 x double> poison, double [[TMP14]], i32 0
+; I64-NEXT: [[TMP17:%.*]] = insertelement <2 x double> [[TMP16]], double [[TMP15]], i32 1
+; I64-NEXT: [[TMP18:%.*]] = fmul <2 x double> [[TMP11]], zeroinitializer
+; I64-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x double> poison, double [[ACCUM]], i64 0
+; I64-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT1]], <2 x double> poison, <2 x i32> zeroinitializer
+; I64-NEXT: [[TMP19:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLAT2]], <2 x double> [[TMP18]], <2 x i32> <i32 1, i32 2>
+; I64-NEXT: [[TMP20:%.*]] = fmul <2 x double> [[TMP17]], zeroinitializer
+; I64-NEXT: [[TMP21:%.*]] = fadd <2 x double> [[TMP20]], zeroinitializer
+; I64-NEXT: [[TMP22:%.*]] = fadd <2 x double> [[TMP21]], splat (double 1.000000e+00)
+; I64-NEXT: [[TMP23:%.*]] = load double, ptr [[TMP8]], align 8
+; I64-NEXT: [[TMP24:%.*]] = load double, ptr [[TMP9]], align 8
+; I64-NEXT: [[TMP25:%.*]] = insertelement <2 x double> poison, double [[TMP23]], i32 0
+; I64-NEXT: [[TMP26:%.*]] = insertelement <2 x double> [[TMP25]], double [[TMP24]], i32 1
+; I64-NEXT: [[TMP27:%.*]] = fdiv <2 x double> [[TMP26]], [[TMP22]]
+; I64-NEXT: [[TMP28:%.*]] = fsub <2 x double> [[TMP19]], [[TMP27]]
+; I64-NEXT: br label %[[MIDDLE_BLOCK:.*]]
+; I64: [[MIDDLE_BLOCK]]:
+; I64-NEXT: [[TMP29]] = extractelement <2 x double> [[TMP28]], i32 1
+; I64-NEXT: br label %[[OUTER_LOOP_LOOPEXIT]]
+; I64: [[EXIT]]:
+; I64-NEXT: ret double [[ACCUM]]
+;
+; I32-LABEL: define double @test_load_used_by_other_load_scev(
+; I32-SAME: ptr [[PTR_A:%.*]], ptr [[PTR_B:%.*]], ptr [[PTR_C:%.*]]) {
+; I32-NEXT: [[ENTRY:.*]]:
+; I32-NEXT: br label %[[OUTER_LOOP:.*]]
+; I32: [[OUTER_LOOP]]:
+; I32-NEXT: [[ACCUM:%.*]] = phi double [ 0.000000e+00, %[[ENTRY]] ], [ [[RESULT:%.*]], %[[INNER_LOOP:.*]] ]
+; I32-NEXT: [[COND:%.*]] = call i1 @cond()
+; I32-NEXT: br i1 [[COND]], label %[[INNER_LOOP]], label %[[EXIT:.*]]
+; I32: [[INNER_LOOP]]:
+; I32-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[OUTER_LOOP]] ], [ [[IV_NEXT:%.*]], %[[INNER_LOOP]] ]
+; I32-NEXT: [[ACCUM_INNER:%.*]] = phi double [ [[ACCUM]], %[[OUTER_LOOP]] ], [ [[MUL1:%.*]], %[[INNER_LOOP]] ]
+; I32-NEXT: [[IDX_PLUS1:%.*]] = add i64 [[IV]], 1
+; I32-NEXT: [[GEP_C:%.*]] = getelementptr i8, ptr [[PTR_C]], i64 [[IDX_PLUS1]]
+; I32-NEXT: [[GEP_A_I64:%.*]] = getelementptr i64, ptr [[PTR_A]], i64 [[IDX_PLUS1]]
+; I32-NEXT: [[LOAD_IDX:%.*]] = load i64, ptr [[GEP_A_I64]], align 8
+; I32-NEXT: [[GEP_B:%.*]] = getelementptr double, ptr [[PTR_B]], i64 [[LOAD_IDX]]
+; I32-NEXT: [[LOAD_A:%.*]] = load double, ptr [[PTR_A]], align 8
+; I32-NEXT: [[ADD1:%.*]] = fadd double [[LOAD_A]], 0.000000e+00
+; I32-NEXT: [[GEP_C_OFFSET:%.*]] = getelementptr i8, ptr [[GEP_C]], i64 8
+; I32-NEXT: [[LOAD_C:%.*]] = load double, ptr [[GEP_C_OFFSET]], align 8
+; I32-NEXT: [[MUL1]] = fmul double [[ADD1]], 0.000000e+00
+; I32-NEXT: [[MUL2:%.*]] = fmul double [[LOAD_C]], 0.000000e+00
+; I32-NEXT: [[ADD2:%.*]] = fadd double [[MUL2]], 0.000000e+00
+; I32-NEXT: [[ADD3:%.*]] = fadd double [[ADD2]], 1.000000e+00
+; I32-NEXT: [[LOAD_B:%.*]] = load double, ptr [[GEP_B]], align 8
+; I32-NEXT: [[DIV:%.*]] = fdiv double [[LOAD_B]], [[ADD3]]
+; I32-NEXT: [[RESULT]] = fsub double [[ACCUM_INNER]], [[DIV]]
+; I32-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; I32-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 1
+; I32-NEXT: br i1 [[EXITCOND]], label %[[OUTER_LOOP]], label %[[INNER_LOOP]]
+; I32: [[EXIT]]:
+; I32-NEXT: ret double [[ACCUM]]
+;
+entry:
+ br label %outer.loop
+
+outer.loop:
+ %accum = phi double [ 0.0, %entry ], [ %result, %inner.loop ]
+ %cond = call i1 @cond()
+ br i1 %cond, label %inner.loop, label %exit
+
+inner.loop:
+ %iv = phi i64 [ 0, %outer.loop ], [ %iv.next, %inner.loop ]
+ %accum.inner = phi double [ %accum, %outer.loop ], [ %mul1, %inner.loop ]
+ %idx.plus1 = add i64 %iv, 1
+ %gep.c = getelementptr i8, ptr %ptr.c, i64 %idx.plus1
+ %gep.a.i64 = getelementptr i64, ptr %ptr.a, i64 %idx.plus1
+ %load.idx = load i64, ptr %gep.a.i64, align 8
+ %gep.b = getelementptr double, ptr %ptr.b, i64 %load.idx
+ %load.a = load double, ptr %ptr.a, align 8
+ %add1 = fadd double %load.a, 0.000000e+00
+ %gep.c.offset = getelementptr i8, ptr %gep.c, i64 8
+ %load.c = load double, ptr %gep.c.offset, align 8
+ %mul1 = fmul double %add1, 0.000000e+00
+ %mul2 = fmul double %load.c, 0.000000e+00
+ %add2 = fadd double %mul2, 0.000000e+00
+ %add3 = fadd double %add2, 1.000000e+00
+ %load.b = load double, ptr %gep.b, align 8
+ %div = fdiv double %load.b, %add3
+ %result = fsub double %accum.inner, %div
+ %iv.next = add i64 %iv, 1
+ %exitcond = icmp eq i64 %iv, 1
+ br i1 %exitcond, label %outer.loop, label %inner.loop
+
+exit:
+ ret double %accum
+}
+
attributes #0 = { "target-cpu"="znver2" }
!0 = distinct !{!0, !1}
diff --git a/llvm/test/Transforms/NewGVN/pr159918.ll b/llvm/test/Transforms/NewGVN/pr159918.ll
new file mode 100644
index 0000000..3fad6e6
--- /dev/null
+++ b/llvm/test/Transforms/NewGVN/pr159918.ll
@@ -0,0 +1,21 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -passes=newgvn < %s | FileCheck %s
+
+; Don't use returned argument in memory defining intrinsics.
+define void @wombat(ptr %arg) {
+; CHECK-LABEL: define void @wombat(
+; CHECK-SAME: ptr [[ARG:%.*]]) {
+; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[ARG]], align 8
+; CHECK-NEXT: [[CALL:%.*]] = call ptr @llvm.objc.retain(ptr [[LOAD]])
+; CHECK-NEXT: store ptr [[CALL]], ptr [[ARG]], align 8
+; CHECK-NEXT: ret void
+;
+ %load = load ptr, ptr %arg, align 8
+ %call = call ptr @llvm.objc.retain(ptr %load)
+ store ptr %call, ptr %arg, align 8
+ ret void
+}
+
+declare ptr @llvm.objc.retain(ptr returned) #0
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/no_alternate_divrem.ll b/llvm/test/Transforms/SLPVectorizer/X86/no_alternate_divrem.ll
index ed0bd3f..cf62fd5 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/no_alternate_divrem.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/no_alternate_divrem.ll
@@ -55,6 +55,54 @@ entry:
ret void
}
+define void @test_add_udiv(ptr %arr1, ptr %arr2, i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: @test_add_udiv(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[GEP1_2:%.*]] = getelementptr i32, ptr [[ARR1:%.*]], i32 2
+; CHECK-NEXT: [[GEP1_3:%.*]] = getelementptr i32, ptr [[ARR1]], i32 3
+; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[GEP1_2]], align 4
+; CHECK-NEXT: [[V3:%.*]] = load i32, ptr [[GEP1_3]], align 4
+; CHECK-NEXT: [[Y2:%.*]] = add nsw i32 [[A2:%.*]], 42
+; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr [[ARR1]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> <i32 poison, i32 poison, i32 0, i32 poison>, i32 [[A0:%.*]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[A1:%.*]], i32 1
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[A3:%.*]], i32 3
+; CHECK-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> <i32 1146, i32 146, i32 0, i32 0>, [[TMP3]]
+; CHECK-NEXT: [[RES2:%.*]] = udiv i32 [[V2]], [[Y2]]
+; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> poison, i32 [[RES2]], i32 2
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[V3]], i32 3
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x i32> [[TMP0]], <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> [[TMP7]], <4 x i32> <i32 4, i32 5, i32 2, i32 3>
+; CHECK-NEXT: [[TMP9:%.*]] = add nsw <4 x i32> [[TMP8]], [[TMP4]]
+; CHECK-NEXT: store <4 x i32> [[TMP9]], ptr [[ARR2:%.*]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %gep1.1 = getelementptr i32, ptr %arr1, i32 1
+ %gep1.2 = getelementptr i32, ptr %arr1, i32 2
+ %gep1.3 = getelementptr i32, ptr %arr1, i32 3
+ %gep2.1 = getelementptr i32, ptr %arr2, i32 1
+ %gep2.2 = getelementptr i32, ptr %arr2, i32 2
+ %gep2.3 = getelementptr i32, ptr %arr2, i32 3
+ %v0 = load i32, ptr %arr1
+ %v1 = load i32, ptr %gep1.1
+ %v2 = load i32, ptr %gep1.2
+ %v3 = load i32, ptr %gep1.3
+ %y0 = add nsw i32 %a0, 1146
+ %y1 = add nsw i32 %a1, 146
+ %y2 = add nsw i32 %a2, 42
+ %y3 = add nsw i32 %a3, 0
+ %res0 = add nsw i32 %v0, %y0
+ %res1 = add nsw i32 %v1, %y1
+ %res2 = udiv i32 %v2, %y2
+ %res3 = add nsw i32 %v3, %y3
+ store i32 %res0, ptr %arr2
+ store i32 %res1, ptr %gep2.1
+ store i32 %res2, ptr %gep2.2
+ store i32 %res3, ptr %gep2.3
+ ret void
+}
+
;; Similar test, but now div/rem is main opcode and not the alternate one. Same issue.
define void @test_urem_add(ptr %arr1, ptr %arr2, i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: @test_urem_add(
@@ -114,3 +162,56 @@ entry:
store i32 %res3, ptr %gep2.3
ret void
}
+
+define void @test_srem_add(ptr %arr1, ptr %arr2, i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: @test_srem_add(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[GEP1_1:%.*]] = getelementptr i32, ptr [[ARR1:%.*]], i32 1
+; CHECK-NEXT: [[GEP1_2:%.*]] = getelementptr i32, ptr [[ARR1]], i32 2
+; CHECK-NEXT: [[GEP1_3:%.*]] = getelementptr i32, ptr [[ARR1]], i32 3
+; CHECK-NEXT: [[GEP2_1:%.*]] = getelementptr i32, ptr [[ARR2:%.*]], i32 1
+; CHECK-NEXT: [[GEP2_2:%.*]] = getelementptr i32, ptr [[ARR2]], i32 2
+; CHECK-NEXT: [[GEP2_3:%.*]] = getelementptr i32, ptr [[ARR2]], i32 3
+; CHECK-NEXT: [[V0:%.*]] = load i32, ptr [[ARR1]], align 4
+; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[GEP1_1]], align 4
+; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[GEP1_2]], align 4
+; CHECK-NEXT: [[V3:%.*]] = load i32, ptr [[GEP1_3]], align 4
+; CHECK-NEXT: [[Y0:%.*]] = add nsw i32 [[A0:%.*]], 1146
+; CHECK-NEXT: [[Y1:%.*]] = add nsw i32 [[A1:%.*]], 146
+; CHECK-NEXT: [[Y2:%.*]] = add nsw i32 [[A2:%.*]], 42
+; CHECK-NEXT: [[Y3:%.*]] = add nsw i32 [[A3:%.*]], 0
+; CHECK-NEXT: [[RES0:%.*]] = srem i32 [[V0]], [[Y0]]
+; CHECK-NEXT: [[RES1:%.*]] = srem i32 [[V1]], [[Y1]]
+; CHECK-NEXT: [[RES2:%.*]] = srem i32 [[V2]], [[Y2]]
+; CHECK-NEXT: [[RES3:%.*]] = add nsw i32 [[V3]], [[Y3]]
+; CHECK-NEXT: store i32 [[RES0]], ptr [[ARR2]], align 4
+; CHECK-NEXT: store i32 [[RES1]], ptr [[GEP2_1]], align 4
+; CHECK-NEXT: store i32 [[RES2]], ptr [[GEP2_2]], align 4
+; CHECK-NEXT: store i32 [[RES3]], ptr [[GEP2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %gep1.1 = getelementptr i32, ptr %arr1, i32 1
+ %gep1.2 = getelementptr i32, ptr %arr1, i32 2
+ %gep1.3 = getelementptr i32, ptr %arr1, i32 3
+ %gep2.1 = getelementptr i32, ptr %arr2, i32 1
+ %gep2.2 = getelementptr i32, ptr %arr2, i32 2
+ %gep2.3 = getelementptr i32, ptr %arr2, i32 3
+ %v0 = load i32, ptr %arr1
+ %v1 = load i32, ptr %gep1.1
+ %v2 = load i32, ptr %gep1.2
+ %v3 = load i32, ptr %gep1.3
+ %y0 = add nsw i32 %a0, 1146
+ %y1 = add nsw i32 %a1, 146
+ %y2 = add nsw i32 %a2, 42
+ %y3 = add nsw i32 %a3, 0
+ %res0 = srem i32 %v0, %y0
+ %res1 = srem i32 %v1, %y1
+ %res2 = srem i32 %v2, %y2
+ %res3 = add nsw i32 %v3, %y3
+ store i32 %res0, ptr %arr2
+ store i32 %res1, ptr %gep2.1
+ store i32 %res2, ptr %gep2.2
+ store i32 %res3, ptr %gep2.3
+ ret void
+}