aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/SystemZ
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/SystemZ')
-rw-r--r--llvm/test/CodeGen/SystemZ/htm-intrinsics.ll4
-rw-r--r--llvm/test/CodeGen/SystemZ/inline-asm-flag-output-01.ll738
-rw-r--r--llvm/test/CodeGen/SystemZ/inline-asm-flag-output-02.ll1665
3 files changed, 2405 insertions, 2 deletions
diff --git a/llvm/test/CodeGen/SystemZ/htm-intrinsics.ll b/llvm/test/CodeGen/SystemZ/htm-intrinsics.ll
index c6ee804..07fbed9 100644
--- a/llvm/test/CodeGen/SystemZ/htm-intrinsics.ll
+++ b/llvm/test/CodeGen/SystemZ/htm-intrinsics.ll
@@ -90,7 +90,7 @@ define i32 @test_tbegin_nofloat4(i32 %pad, ptr %ptr) {
; CHECK: tbegin 0, 65292
; CHECK: ipm %r2
; CHECK: srl %r2, 28
-; CHECK: ciblh %r2, 2, 0(%r14)
+; CHECK: bnhr %r14
; CHECK: mvhi 0(%r3), 0
; CHECK: br %r14
%res = call i32 @llvm.s390.tbegin.nofloat(ptr null, i32 65292)
@@ -219,7 +219,7 @@ define i32 @test_tend2(i32 %pad, ptr %ptr) {
; CHECK: tend
; CHECK: ipm %r2
; CHECK: srl %r2, 28
-; CHECK: ciblh %r2, 2, 0(%r14)
+; CHECK: bnhr %r14
; CHECK: mvhi 0(%r3), 0
; CHECK: br %r14
%res = call i32 @llvm.s390.tend()
diff --git a/llvm/test/CodeGen/SystemZ/inline-asm-flag-output-01.ll b/llvm/test/CodeGen/SystemZ/inline-asm-flag-output-01.ll
new file mode 100644
index 0000000..6b8746e
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/inline-asm-flag-output-01.ll
@@ -0,0 +1,738 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O2 | FileCheck %s
+; Test implementation of combining br_ccmask for flag output operand, and
+; optimizing ipm sequence using conditional branches.
+
+declare void @dummy()
+
+; Check a case where the cc is used as an integer.
+; Just (srl (ipm)) sequence without optimization.
+define i32 @test(ptr %a) {
+; CHECK-LABEL: test:
+; CHECK: # %bb.0:
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ipm %r2
+; CHECK-NEXT: srl %r2, 28
+; CHECK-NEXT: br %r14
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ ret i32 %cc
+}
+
+; Test-1(f1_0_*). Test all 14 valid combinations, where cc is being used for
+; branching.
+
+; Check (cc == 0).
+define void @f1_0_eq_0(ptr %a) {
+; CHECK-LABEL: f1_0_eq_0:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jge dummy@PLT
+; CHECK-NEXT: .LBB1_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp eq i32 %cc, 0
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc != 0).
+define void @f1_0_ne_0(ptr %a) {
+; CHECK-LABEL: f1_0_ne_0:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgne dummy@PLT
+; CHECK-NEXT: .LBB2_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp ugt i32 %cc, 0
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc == 1).
+define void @f1_0_eq_1(ptr %a) {
+; CHECK-LABEL: f1_0_eq_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgl dummy@PLT
+; CHECK-NEXT: .LBB3_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp eq i32 %cc, 1
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc != 1).
+define void @f1_0_ne_1(ptr %a) {
+; CHECK-LABEL: f1_0_ne_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnl dummy@PLT
+; CHECK-NEXT: .LBB4_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp ne i32 %cc, 1
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc == 2).
+define void @f1_0_eq_2(ptr %a) {
+; CHECK-LABEL: f1_0_eq_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgh dummy@PLT
+; CHECK-NEXT: .LBB5_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp eq i32 %cc, 2
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc != 2).
+define void @f1_0_ne_2(ptr %a) {
+; CHECK-LABEL: f1_0_ne_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnh dummy@PLT
+; CHECK-NEXT: .LBB6_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp ne i32 %cc, 2
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc == 3).
+define void @f1_0_eq_3(ptr %a) {
+; CHECK-LABEL: f1_0_eq_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgo dummy@PLT
+; CHECK-NEXT: .LBB7_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp eq i32 %cc, 3
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc != 3).
+define void @f1_0_ne_3(ptr %a) {
+; CHECK-LABEL: f1_0_ne_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgno dummy@PLT
+; CHECK-NEXT: .LBB8_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp ult i32 %cc, 3
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc == 0|1).
+define void @f1_0_01(ptr %a) {
+; CHECK-LABEL: f1_0_01:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgle dummy@PLT
+; CHECK-NEXT: .LBB9_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp ult i32 %cc, 2
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc == 0|2).
+define void @f1_0_02(ptr %a) {
+; CHECK-LABEL: f1_0_02:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jghe dummy@PLT
+; CHECK-NEXT: .LBB10_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cmp = icmp eq i32 %and, 0
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc == 0|3).
+define void @f1_0_03(ptr %a) {
+; CHECK-LABEL: f1_0_03:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnlh dummy@PLT
+; CHECK-NEXT: .LBB11_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp0 = icmp ne i32 %cc, 0
+ %cmp3 = icmp ne i32 %cc, 3
+ %cmp.inv = and i1 %cmp0, %cmp3
+ br i1 %cmp.inv, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc == 1|2).
+define void @f1_0_12(ptr %a) {
+; CHECK-LABEL: f1_0_12:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jglh dummy@PLT
+; CHECK-NEXT: .LBB12_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmpeq1 = icmp eq i32 %cc, 1
+ %cmpeq2 = icmp eq i32 %cc, 2
+ %cmp = or i1 %cmpeq1, %cmpeq2
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc == 1|3).
+define void @f1_0_13(ptr %a) {
+; CHECK-LABEL: f1_0_13:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnhe dummy@PLT
+; CHECK-NEXT: .LBB13_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmpeq1 = icmp eq i32 %cc, 1
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cmp = or i1 %cmpeq1, %cmpeq3
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc == 2|3).
+define void @f1_0_23(ptr %a) {
+; CHECK-LABEL: f1_0_23:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnle dummy@PLT
+; CHECK-NEXT: .LBB14_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp ugt i32 %cc, 1
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Test-2(f1_1_*/f1_2_*/fl_3_*/f1_4_*).
+; Test Mixed patterns involving Binary Ops.
+
+; Check 'add' for (cc != 0).
+define void @f1_1_1(ptr %a) {
+; CHECK-LABEL: f1_1_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgne dummy@PLT
+; CHECK-NEXT: .LBB15_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cmp = icmp ult i32 %add, 3
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'add' for (cc == 1|2).
+define void @f1_1_2(ptr %a) {
+; CHECK-LABEL: f1_1_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jglh dummy@PLT
+; CHECK-NEXT: .LBB16_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cmp = icmp ult i32 %add, 2
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'add' for (cc == 1|2).
+define void @f1_1_3(ptr %a) {
+; CHECK-LABEL: f1_1_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jglh dummy@PLT
+; CHECK-NEXT: .LBB17_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -3
+ %cmp.inv = icmp ult i32 %add, -2
+ br i1 %cmp.inv, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'and' with one operand cc and other select_ccmask(cc !=1).
+define void @f1_2_1(ptr %a) {
+; CHECK-LABEL: f1_2_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnl dummy@PLT
+; CHECK-NEXT: .LBB18_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpne0 = icmp ne i32 %andcc, 0
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cmp.inv = and i1 %cmpne3, %cmpne0
+ br i1 %cmp.inv, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'and' with both operands select_ccmask(cc != 2).
+define void @f1_2_2(ptr %a) {
+; CHECK-LABEL: f1_2_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnh dummy@PLT
+; CHECK-NEXT: .LBB19_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %ugt1 = icmp samesign ugt i32 %cc, 1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %and.cond.inv = and i1 %ugt1, %cmpne3
+ br i1 %and.cond.inv, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'and/tm' for (cc == 0|2).
+define void @f1_2_3(ptr %a) {
+; CHECK-LABEL: f1_2_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jghe dummy@PLT
+; CHECK-NEXT: .LBB20_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cmp = icmp eq i32 %and, 0
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'and/tm' for (cc == 1|3).
+define void @f1_2_4(ptr %a) {
+; CHECK-LABEL: f1_2_4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnhe dummy@PLT
+; CHECK-NEXT: .LBB21_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cmp = icmp eq i32 %and, 0
+ br i1 %cmp, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'icmp' with one operand 'and' and other 'select_ccmask'(cc != 1).
+define void @f1_2_5(ptr %a) {
+; CHECK-LABEL: f1_2_5:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnl dummy@PLT
+; CHECK-NEXT: .LBB22_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cmp = xor i1 %cmpne3, %trunc
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check nested 'xor' cc with select_ccmask(cc != 1).
+define void @f1_3_1(ptr %a) {
+; CHECK-LABEL: f1_3_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnl dummy@PLT
+; CHECK-NEXT: .LBB23_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmpeq0 = icmp eq i32 %cc, 0
+ %cmpeq2 = icmp eq i32 %cc, 2
+ %xor = xor i1 %cmpeq0, %cmpeq2
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cmp.inv = xor i1 %cmpne3, %xor
+ br i1 %cmp.inv, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=1).
+define void @f1_3_2(ptr %a) {
+; CHECK-LABEL: f1_3_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnl dummy@PLT
+; CHECK-NEXT: .LBB24_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cmp.inv = xor i1 %cmpeq3, %trunc
+ br i1 %cmp.inv, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=2).
+define void @f1_3_3(ptr %a) {
+; CHECK-LABEL: f1_3_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnh dummy@PLT
+; CHECK-NEXT: .LBB25_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne0 = icmp ne i32 %cc, 0
+ %cmp.cond.inv = xor i1 %cmpne0, %trunc
+ br i1 %cmp.cond.inv, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'or' with both operands are select_ccmask one with TM and other with
+; ICMP(cc == 1).
+define void @f1_4_1(ptr %a) {
+; CHECK-LABEL: f1_4_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgl dummy@PLT
+; CHECK-NEXT: .LBB26_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpeq0 = icmp eq i32 %andcc, 0
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cmp.cond.inv = or i1 %cmpeq3, %cmpeq0
+ br i1 %cmp.cond.inv, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'or' for (cc == 0|1).
+define void @f1_4_2(ptr %a) {
+; CHECK-LABEL: f1_4_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgle dummy@PLT
+; CHECK-NEXT: .LBB27_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cmp.inv = icmp samesign ugt i32 %or, -3
+ br i1 %cmp.inv, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'or' for (cc == 0|1).
+define void @f1_4_3(ptr %a) {
+; CHECK-LABEL: f1_4_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgle dummy@PLT
+; CHECK-NEXT: .LBB28_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cmp = icmp samesign ult i32 %or, -2
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
diff --git a/llvm/test/CodeGen/SystemZ/inline-asm-flag-output-02.ll b/llvm/test/CodeGen/SystemZ/inline-asm-flag-output-02.ll
new file mode 100644
index 0000000..b9b9a4b
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/inline-asm-flag-output-02.ll
@@ -0,0 +1,1665 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O2 | FileCheck %s
+; Test implementation of combining select_ccmask for flag output operand and
+; optimizing ipm sequence using conditional branches.
+
+; Test-1(f2_0_*): Both TrueVal and FalseVal non-const(14-valid CCMask).
+
+; Check (cc == 0).
+define i64 @f2_0_eq_0(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_eq_0:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ber %r14
+; CHECK-NEXT: .LBB0_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp eq i32 %cc, 0
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc != 0).
+define i64 @f2_0_ne_0(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_ne_0:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bner %r14
+; CHECK-NEXT: .LBB1_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp ugt i32 %cc, 0
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc == 1).
+define i64 @f2_0_eq_1(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_eq_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB2_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp eq i32 %cc, 1
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc != 1).
+define i64 @f2_0_ne_1(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_ne_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnlr %r14
+; CHECK-NEXT: .LBB3_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp ne i32 %cc, 1
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc == 2).
+define i64 @f2_0_eq_2(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_eq_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bhr %r14
+; CHECK-NEXT: .LBB4_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp eq i32 %cc, 2
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc != 2).
+define i64 @f2_0_ne_2(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_ne_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnhr %r14
+; CHECK-NEXT: .LBB5_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp ne i32 %cc, 2
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc == 3).
+define i64 @f2_0_eq_3(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_eq_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bor %r14
+; CHECK-NEXT: .LBB6_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp eq i32 %cc, 3
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc != 3).
+define i64 @f2_0_ne_3(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_ne_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnor %r14
+; CHECK-NEXT: .LBB7_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp ult i32 %cc, 3
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc == 0|1).
+define i64 @f2_0_01(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_01:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bler %r14
+; CHECK-NEXT: .LBB8_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp ult i32 %cc, 2
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc == 0|2).
+define i64 @f2_0_02(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_02:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB9_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %and = and i32 %cc, 1
+ %cond = icmp eq i32 %and, 0
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc == 0|3).
+define i64 @f2_0_03(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_0_03:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blhr %r14
+; CHECK-NEXT: .LBB10_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cmp0 = icmp ne i32 %cc, 0
+ %cmp3 = icmp ne i32 %cc, 3
+ %cond.inv = and i1 %cmp0, %cmp3
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check (cc == 1|2).
+define i64 @f2_0_12(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_0_12:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnlhr %r14
+; CHECK-NEXT: .LBB11_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %add = add nsw i32 %cc, -3
+ %cond.inv = icmp ult i32 %add, -2
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check (cc == 1|3).
+define i64 @f2_0_13(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_0_13:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB12_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %and = and i32 %cc, 1
+ %cond.inv = icmp eq i32 %and, 0
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check (cc == 2|3).
+define i64 @f2_0_23(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_23:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnler %r14
+; CHECK-NEXT: .LBB13_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp ugt i32 %cc, 1
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Test-2(f2_1_*/f2_2_*/f2_3_*/f2_4_*).
+; Both TrueVal and FalseVal are non-const with mixed patterns involving
+; Binary Ops.
+
+; Check 'add' for (cc != 0).
+define i64 @f2_1_1(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_1_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bner %r14
+; CHECK-NEXT: .LBB14_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cond = icmp ult i32 %add, 3
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check 'add' for (cc == 1|2).
+define i64 @f2_1_2(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_1_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blhr %r14
+; CHECK-NEXT: .LBB15_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cond = icmp ult i32 %add, 2
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check 'add' for (cc == 1|2).
+define i64 @f2_1_3(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_1_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnlhr %r14
+; CHECK-NEXT: .LBB16_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -3
+ %cond.inv = icmp ult i32 %add, -2
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check 'and' with one operand cc and other select_ccmask(cc !=1).
+define i64 @f2_2_1(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_2_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB17_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpne0 = icmp ne i32 %andcc, 0
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = and i1 %cmpne3, %cmpne0
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check 'and' with both operands select_ccmask(cc != 2).
+define i64 @f2_2_2(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_2_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bhr %r14
+; CHECK-NEXT: .LBB18_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %ugt1 = icmp samesign ugt i32 %cc, 1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = and i1 %ugt1, %cmpne3
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check 'and/tm' for (cc == 0|2).
+define i64 @f2_2_3(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_2_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB19_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cond = icmp eq i32 %and, 0
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check 'and/tm' for (cc == 1|3).
+define i64 @f2_2_4(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_2_4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB20_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cond.inv = icmp eq i32 %and, 0
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check 'icmp' with one operand 'and' and other 'select_ccmask'(cc != 1).
+define i64 @f2_2_5(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_2_5:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnlr %r14
+; CHECK-NEXT: .LBB21_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond = xor i1 %cmpne3, %trunc
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+
+; Check nested 'xor' cc with select_ccmask(cc != 1).
+define i64 @f2_3_1(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_3_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB22_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmpeq0 = icmp eq i32 %cc, 0
+ %cmpeq2 = icmp eq i32 %cc, 2
+ %xor = xor i1 %cmpeq0, %cmpeq2
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = xor i1 %cmpne3, %xor
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=1).
+define i64 @f2_3_2(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_3_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB23_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cond.inv = xor i1 %cmpeq3, %trunc
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=2).
+define i64 @f2_3_3(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_3_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bhr %r14
+; CHECK-NEXT: .LBB24_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne0 = icmp ne i32 %cc, 0
+ %cond.inv = xor i1 %cmpne0, %trunc
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check 'or' with both operands select_ccmask with TM and ICMP(cc == 1).
+define i64 @f2_4_1(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_4_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnlr %r14
+; CHECK-NEXT: .LBB25_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpeq0 = icmp eq i32 %andcc, 0
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cond.inv = or i1 %cmpeq3, %cmpeq0
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check 'or' for (cc == 0|1).
+define i64 @f2_4_2(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_4_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnler %r14
+; CHECK-NEXT: .LBB26_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cond.inv = icmp samesign ugt i32 %or, -3
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check 'or' for (cc == 0|1).
+define i64 @f2_4_3(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_4_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bler %r14
+; CHECK-NEXT: .LBB27_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cond = icmp samesign ult i32 %or, -2
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Test-3(f3_1_*/f3_2_*/f3_3_*/f3_4_*).
+; TrueVal is non-const and FalseVal is const with mixed patterns involving
+; Binary Ops.
+
+; Check 'add' for (cc != 0).
+define i64 @f3_1_1(i64 %x, ptr %a) {
+; CHECK-LABEL: f3_1_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bner %r14
+; CHECK-NEXT: .LBB28_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cond = icmp ult i32 %add, 3
+ %res = select i1 %cond, i64 %x, i64 5
+ ret i64 %res
+}
+
+; Check 'add' for (cc == 1|2).
+define i64 @f3_1_2(i64 %x, ptr %a) {
+; CHECK-LABEL: f3_1_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blhr %r14
+; CHECK-NEXT: .LBB29_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cond = icmp ult i32 %add, 2
+ %res = select i1 %cond, i64 %x, i64 5
+ ret i64 %res
+}
+
+; Check 'add' for (cc == 1|2).
+define i64 @f3_1_3(ptr %a, i64 %x) {
+; CHECK-LABEL: f3_1_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bnlhr %r14
+; CHECK-NEXT: .LBB30_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -3
+ %cond.inv = icmp ult i32 %add, -2
+ %res = select i1 %cond.inv, i64 5, i64 %x
+ ret i64 %res
+}
+
+; Check 'and' with one operand cc and other select_ccmask(cc !=1).
+define i64 @f3_2_1(ptr %a, i64 %x) {
+; CHECK-LABEL: f3_2_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB31_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpne0 = icmp ne i32 %andcc, 0
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = and i1 %cmpne3, %cmpne0
+ %res = select i1 %cond.inv, i64 5, i64 %x
+ ret i64 %res
+}
+
+; Check 'and' with both operands select_ccmask(cc != 2).
+define i64 @f3_2_2(ptr %a, i64 %x) {
+; CHECK-LABEL: f3_2_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bhr %r14
+; CHECK-NEXT: .LBB32_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %ugt1 = icmp samesign ugt i32 %cc, 1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = and i1 %ugt1, %cmpne3
+ %res = select i1 %cond.inv, i64 5, i64 %x
+ ret i64 %res
+}
+
+; Check 'and/tm' for (cc == 0|2).
+define i64 @f3_2_3(i64 %x, ptr %a) {
+; CHECK-LABEL: f3_2_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB33_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cond = icmp eq i32 %and, 0
+ %res = select i1 %cond, i64 %x, i64 5
+ ret i64 %res
+}
+
+; Check 'and/tm' for (cc == 1|3).
+define i64 @f3_2_4(ptr %a, i64 %x) {
+; CHECK-LABEL: f3_2_4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB34_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cond.inv = icmp eq i32 %and, 0
+ %res = select i1 %cond.inv, i64 5, i64 %x
+ ret i64 %res
+}
+
+; Check 'icmp' with one operand 'and' and other 'select_ccmask'(cc != 1).
+define i64 @f3_2_5(i64 %x, ptr %a) {
+; CHECK-LABEL: f3_2_5:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnlr %r14
+; CHECK-NEXT: .LBB35_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond = xor i1 %cmpne3, %trunc
+ %res = select i1 %cond, i64 %x, i64 5
+ ret i64 %res
+}
+
+
+; Check nested 'xor' cc with select_ccmask(cc != 1).
+define i64 @f3_3_1(ptr %a, i64 %x) {
+; CHECK-LABEL: f3_3_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB36_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmpeq0 = icmp eq i32 %cc, 0
+ %cmpeq2 = icmp eq i32 %cc, 2
+ %xor = xor i1 %cmpeq0, %cmpeq2
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = xor i1 %cmpne3, %xor
+ %res = select i1 %cond.inv, i64 5, i64 %x
+ ret i64 %res
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=1).
+define i64 @f3_3_2(ptr %a, i64 %x) {
+; CHECK-LABEL: f3_3_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB37_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cond.inv = xor i1 %cmpeq3, %trunc
+ %res = select i1 %cond.inv, i64 5, i64 %x
+ ret i64 %res
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=2).
+define i64 @f3_3_3(ptr %a, i64 %x) {
+; CHECK-LABEL: f3_3_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bhr %r14
+; CHECK-NEXT: .LBB38_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne0 = icmp ne i32 %cc, 0
+ %cond.inv = xor i1 %cmpne0, %trunc
+ %res = select i1 %cond.inv, i64 5, i64 %x
+ ret i64 %res
+}
+
+; Check 'or' with both operands select_ccmask with TM and ICMP(cc == 1).
+define i64 @f3_4_1(ptr %a, i64 %x) {
+; CHECK-LABEL: f3_4_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bnlr %r14
+; CHECK-NEXT: .LBB39_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpeq0 = icmp eq i32 %andcc, 0
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cond.inv = or i1 %cmpeq3, %cmpeq0
+ %res = select i1 %cond.inv, i64 5, i64 %x
+ ret i64 %res
+}
+
+; Check 'or' for (cc == 0|1).
+define i64 @f3_4_2(ptr %a, i64 %x) {
+; CHECK-LABEL: f3_4_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bnler %r14
+; CHECK-NEXT: .LBB40_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cond.inv = icmp samesign ugt i32 %or, -3
+ %res = select i1 %cond.inv, i64 5, i64 %x
+ ret i64 %res
+}
+
+; Check 'or' for (cc == 0|1).
+define i64 @f3_4_3(i64 %x, ptr %a) {
+; CHECK-LABEL: f3_4_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bler %r14
+; CHECK-NEXT: .LBB41_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cond = icmp samesign ult i32 %or, -2
+ %res = select i1 %cond, i64 %x, i64 5
+ ret i64 %res
+}
+
+
+; Test-4(f4_1_*/f4_2_*/f4_3_*/f4_4_*).
+; TrueVal is const and FalseVal is non-const with mixed patterns involving
+; Binary Ops.
+
+; Check 'add' for (cc != 0).
+define i64 @f4_1_1(ptr %a, i64 %y) {
+; CHECK-LABEL: f4_1_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: bner %r14
+; CHECK-NEXT: .LBB42_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cond = icmp ult i32 %add, 3
+ %res = select i1 %cond, i64 15, i64 %y
+ ret i64 %res
+}
+
+; Check 'add' for (cc == 1|2).
+define i64 @f4_1_2(ptr %a, i64 %y) {
+; CHECK-LABEL: f4_1_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: blhr %r14
+; CHECK-NEXT: .LBB43_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cond = icmp ult i32 %add, 2
+ %res = select i1 %cond, i64 15, i64 %y
+ ret i64 %res
+}
+
+; Check 'add' for (cc == 1|2).
+define i64 @f4_1_3(i64 %y, ptr %a) {
+; CHECK-LABEL: f4_1_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnlhr %r14
+; CHECK-NEXT: .LBB44_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -3
+ %cond.inv = icmp ult i32 %add, -2
+ %res = select i1 %cond.inv, i64 %y, i64 15
+ ret i64 %res
+}
+
+; Check 'and' with one operand cc and other select_ccmask(cc !=1).
+define i64 @f4_2_1(i64 %y, ptr %a) {
+; CHECK-LABEL: f4_2_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB45_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpne0 = icmp ne i32 %andcc, 0
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = and i1 %cmpne3, %cmpne0
+ %res = select i1 %cond.inv, i64 %y, i64 15
+ ret i64 %res
+}
+
+; Check 'and' with both operands select_ccmask(cc != 2).
+define i64 @f4_2_2(i64 %y, ptr %a) {
+; CHECK-LABEL: f4_2_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bhr %r14
+; CHECK-NEXT: .LBB46_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %ugt1 = icmp samesign ugt i32 %cc, 1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = and i1 %ugt1, %cmpne3
+ %res = select i1 %cond.inv, i64 %y, i64 15
+ ret i64 %res
+}
+
+; Check 'and/tm' for (cc == 0|2).
+define i64 @f4_2_3(ptr %a, i64 %y) {
+; CHECK-LABEL: f4_2_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB47_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cond = icmp eq i32 %and, 0
+ %res = select i1 %cond, i64 15, i64 %y
+ ret i64 %res
+}
+
+; Check 'and/tm' for (cc == 1|3).
+define i64 @f4_2_4(i64 %y, ptr %a) {
+; CHECK-LABEL: f4_2_4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB48_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cond.inv = icmp eq i32 %and, 0
+ %res = select i1 %cond.inv, i64 %y, i64 15
+ ret i64 %res
+}
+
+; Check 'icmp' with one operand 'and' and other 'select_ccmask'(cc != 1).
+define i64 @f4_2_5(ptr %a, i64 %y) {
+; CHECK-LABEL: f4_2_5:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: bnlr %r14
+; CHECK-NEXT: .LBB49_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond = xor i1 %cmpne3, %trunc
+ %res = select i1 %cond, i64 15, i64 %y
+ ret i64 %res
+}
+
+
+; Check nested 'xor' cc with select_ccmask(cc != 1).
+define i64 @f4_3_1(i64 %y, ptr %a) {
+; CHECK-LABEL: f4_3_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB50_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmpeq0 = icmp eq i32 %cc, 0
+ %cmpeq2 = icmp eq i32 %cc, 2
+ %xor = xor i1 %cmpeq0, %cmpeq2
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = xor i1 %cmpne3, %xor
+ %res = select i1 %cond.inv, i64 %y, i64 15
+ ret i64 %res
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=1).
+define i64 @f4_3_2(i64 %y, ptr %a) {
+; CHECK-LABEL: f4_3_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB51_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cond.inv = xor i1 %cmpeq3, %trunc
+ %res = select i1 %cond.inv, i64 %y, i64 15
+ ret i64 %res
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=2).
+define i64 @f4_3_3(i64 %y, ptr %a) {
+; CHECK-LABEL: f4_3_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bhr %r14
+; CHECK-NEXT: .LBB52_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne0 = icmp ne i32 %cc, 0
+ %cond.inv = xor i1 %cmpne0, %trunc
+ %res = select i1 %cond.inv, i64 %y, i64 15
+ ret i64 %res
+}
+
+; Check 'or' with both operands select_ccmask with TM and ICMP(cc == 1).
+define i64 @f4_4_1(i64 %y,ptr %a) {
+; CHECK-LABEL: f4_4_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnlr %r14
+; CHECK-NEXT: .LBB53_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpeq0 = icmp eq i32 %andcc, 0
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cond.inv = or i1 %cmpeq3, %cmpeq0
+ %res = select i1 %cond.inv, i64 %y, i64 15
+ ret i64 %res
+}
+
+; Check 'or' for (cc == 0|1).
+define i64 @f4_4_2(i64 %y, ptr %a) {
+; CHECK-LABEL: f4_4_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnler %r14
+; CHECK-NEXT: .LBB54_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cond.inv = icmp samesign ugt i32 %or, -3
+ %res = select i1 %cond.inv, i64 %y, i64 15
+ ret i64 %res
+}
+
+; Check 'or' for (cc == 0|1).
+define i64 @f4_4_3(ptr %a, i64 %y) {
+; CHECK-LABEL: f4_4_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: bler %r14
+; CHECK-NEXT: .LBB55_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cond = icmp samesign ult i32 %or, -2
+ %res = select i1 %cond, i64 15, i64 %y
+ ret i64 %res
+}
+
+; Test-5(f5_1_*/f5_2_*/f5_3_*/f5_4_*).
+; Both TrueVal and FalseVal are const with mixed patterns involving
+; Binary Ops.
+
+
+; Check 'add' for (cc != 0).
+define i64 @f5_1_1(ptr %a) {
+; CHECK-LABEL: f5_1_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: bner %r14
+; CHECK-NEXT: .LBB56_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cond = icmp ult i32 %add, 3
+ %res = select i1 %cond, i64 15, i64 5
+ ret i64 %res
+}
+
+; Check 'add' for (cc == 1|2).
+define i64 @f5_1_2(ptr %a) {
+; CHECK-LABEL: f5_1_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: blhr %r14
+; CHECK-NEXT: .LBB57_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cond = icmp ult i32 %add, 2
+ %res = select i1 %cond, i64 15, i64 5
+ ret i64 %res
+}
+
+; Check 'add' for (cc == 1|2).
+define i64 @f5_1_3(ptr %a) {
+; CHECK-LABEL: f5_1_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bnlhr %r14
+; CHECK-NEXT: .LBB58_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -3
+ %cond.inv = icmp ult i32 %add, -2
+ %res = select i1 %cond.inv, i64 5, i64 15
+ ret i64 %res
+}
+
+; Check 'and' with one operand cc and other select_ccmask(cc !=1).
+define i64 @f5_2_1(ptr %a) {
+; CHECK-LABEL: f5_2_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB59_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpne0 = icmp ne i32 %andcc, 0
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = and i1 %cmpne3, %cmpne0
+ %res = select i1 %cond.inv, i64 5, i64 15
+ ret i64 %res
+}
+
+; Check 'and' with both operands select_ccmask(cc != 2).
+define i64 @f5_2_2(ptr %a) {
+; CHECK-LABEL: f5_2_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bhr %r14
+; CHECK-NEXT: .LBB60_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %ugt1 = icmp samesign ugt i32 %cc, 1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = and i1 %ugt1, %cmpne3
+ %res = select i1 %cond.inv, i64 5, i64 15
+ ret i64 %res
+}
+
+; Check 'and/tm' for (cc == 0|2).
+define i64 @f5_2_3(ptr %a) {
+; CHECK-LABEL: f5_2_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB61_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cond = icmp eq i32 %and, 0
+ %res = select i1 %cond, i64 15, i64 5
+ ret i64 %res
+}
+
+; Check 'and/tm' for (cc == 1|3).
+define i64 @f5_2_4(ptr %a) {
+; CHECK-LABEL: f5_2_4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB62_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cond.inv = icmp eq i32 %and, 0
+ %res = select i1 %cond.inv, i64 5, i64 15
+ ret i64 %res
+}
+
+; Check 'icmp' with one operand 'and' and other 'select_ccmask'(cc != 1).
+define i64 @f5_2_5(ptr %a) {
+; CHECK-LABEL: f5_2_5:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: bnlr %r14
+; CHECK-NEXT: .LBB63_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond = xor i1 %cmpne3, %trunc
+ %res = select i1 %cond, i64 15, i64 5
+ ret i64 %res
+}
+
+
+; Check nested 'xor' cc with select_ccmask(cc != 1).
+define i64 @f5_3_1(ptr %a) {
+; CHECK-LABEL: f5_3_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB64_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmpeq0 = icmp eq i32 %cc, 0
+ %cmpeq2 = icmp eq i32 %cc, 2
+ %xor = xor i1 %cmpeq0, %cmpeq2
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = xor i1 %cmpne3, %xor
+ %res = select i1 %cond.inv, i64 5, i64 15
+ ret i64 %res
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=1).
+define i64 @f5_3_2(ptr %a) {
+; CHECK-LABEL: f5_3_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB65_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cond.inv = xor i1 %cmpeq3, %trunc
+ %res = select i1 %cond.inv, i64 5, i64 15
+ ret i64 %res
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=2).
+define i64 @f5_3_3(ptr %a) {
+; CHECK-LABEL: f5_3_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bhr %r14
+; CHECK-NEXT: .LBB66_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne0 = icmp ne i32 %cc, 0
+ %cond.inv = xor i1 %cmpne0, %trunc
+ %res = select i1 %cond.inv, i64 5, i64 15
+ ret i64 %res
+}
+
+; Check 'or' with both operands select_ccmask with TM and ICMP(cc == 1).
+define i64 @f5_4_1(ptr %a) {
+; CHECK-LABEL: f5_4_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bnlr %r14
+; CHECK-NEXT: .LBB67_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpeq0 = icmp eq i32 %andcc, 0
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cond.inv = or i1 %cmpeq3, %cmpeq0
+ %res = select i1 %cond.inv, i64 5, i64 15
+ ret i64 %res
+}
+
+; Check 'or' for (cc == 0|1).
+define i64 @f5_4_2(ptr %a) {
+; CHECK-LABEL: f5_4_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bnler %r14
+; CHECK-NEXT: .LBB68_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cond.inv = icmp samesign ugt i32 %or, -3
+ %res = select i1 %cond.inv, i64 5, i64 15
+ ret i64 %res
+}
+
+; Check 'or' for (cc == 0|1).
+define i64 @f5_4_3(ptr %a) {
+; CHECK-LABEL: f5_4_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: bler %r14
+; CHECK-NEXT: .LBB69_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cond = icmp samesign ult i32 %or, -2
+ %res = select i1 %cond, i64 15, i64 5
+ ret i64 %res
+}
+
+; Nested select_ccmask with TrueVal and FalseVal swapped with each other.
+define i64 @f6_1(ptr %a) {
+; CHECK-LABEL: f6_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB70_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %andcc = and i32 %cc, 1
+ %cmpeq0 = icmp eq i32 %andcc, 0
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %select = select i1 %cmpeq3, i64 5, i64 15
+ %res = select i1 %cmpeq0, i64 %select, i64 5
+ ret i64 %res
+}
+