aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPhoebe Wang <phoebe.wang@intel.com>2022-06-21 08:40:32 +0800
committerPhoebe Wang <phoebe.wang@intel.com>2022-06-21 09:09:27 +0800
commitedcc68e86f784fa6e1514f230a3c89a275a66bb6 (patch)
treedb4f4946b540dbf4faf5b8fe31ce5bea8d9108cd
parent7c5957aedb75f381cd9996f9eba96f3add16a721 (diff)
downloadllvm-edcc68e86f784fa6e1514f230a3c89a275a66bb6.zip
llvm-edcc68e86f784fa6e1514f230a3c89a275a66bb6.tar.gz
llvm-edcc68e86f784fa6e1514f230a3c89a275a66bb6.tar.bz2
[X86] Make sure SF is updated when optimizing for `jg/jge/jl/jle`
This fixes issue #56103. Reviewed By: mingmingl Differential Revision: https://reviews.llvm.org/D128122
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp5
-rw-r--r--llvm/test/CodeGen/X86/pr56103.ll65
2 files changed, 70 insertions, 0 deletions
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index f2101ea..ec32ac2 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -4462,6 +4462,11 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
return false;
case X86::COND_G: case X86::COND_GE:
case X86::COND_L: case X86::COND_LE:
+ // If SF is used, but the instruction doesn't update the SF, then we
+ // can't do the optimization.
+ if (NoSignFlag)
+ return false;
+ LLVM_FALLTHROUGH;
case X86::COND_O: case X86::COND_NO:
// If OF is used, the instruction needs to clear it like CmpZero does.
if (!ClearsOverflowFlag)
diff --git a/llvm/test/CodeGen/X86/pr56103.ll b/llvm/test/CodeGen/X86/pr56103.ll
new file mode 100644
index 0000000..3d979a0
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr56103.ll
@@ -0,0 +1,65 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-linux-generic | FileCheck %s
+
+@e = global i16 0, align 2
+@a = global i32 0, align 4
+@c = global i32 0, align 4
+@b = global i64 0, align 8
+
+; Check the test instruction won't be optimizated by peephole opt.
+
+define dso_local i32 @main() nounwind {
+; CHECK-LABEL: main:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: movq e@GOTPCREL(%rip), %rax
+; CHECK-NEXT: movw $1, (%rax)
+; CHECK-NEXT: movq b@GOTPCREL(%rip), %rax
+; CHECK-NEXT: movq $1, (%rax)
+; CHECK-NEXT: movq a@GOTPCREL(%rip), %rax
+; CHECK-NEXT: movl (%rax), %ecx
+; CHECK-NEXT: movl $-2, %eax
+; CHECK-NEXT: sarl %cl, %eax
+; CHECK-NEXT: movq c@GOTPCREL(%rip), %rdx
+; CHECK-NEXT: movl (%rdx), %edx
+; CHECK-NEXT: decl %edx
+; CHECK-NEXT: movzwl %ax, %eax
+; CHECK-NEXT: decl %eax
+; CHECK-NEXT: xorl %edx, %eax
+; CHECK-NEXT: notl %ecx
+; CHECK-NEXT: andl %eax, %ecx
+; CHECK-NEXT: testq %rcx, %rcx
+; CHECK-NEXT: jle .LBB0_2
+; CHECK-NEXT: # %bb.1: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB0_2: # %if.then
+; CHECK-NEXT: callq abort@PLT
+entry:
+ store i16 1, ptr @e, align 2
+ store i64 1, ptr @b, align 8
+ %0 = load i32, ptr @a, align 4
+ %shr = ashr i32 -2, %0
+ %1 = load i32, ptr @c, align 4
+ %sub = add i32 %1, -1
+ %conv2 = zext i32 %sub to i64
+ %2 = and i32 %shr, 65535
+ %conv3 = zext i32 %2 to i64
+ %sub4 = add nsw i64 %conv3, -1
+ %xor = xor i64 %sub4, %conv2
+ %neg5 = xor i32 %0, -1
+ %conv6 = sext i32 %neg5 to i64
+ %and = and i64 %xor, %conv6
+ %cmp = icmp slt i64 %and, 1
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ call void @abort() #2
+ unreachable
+
+if.end: ; preds = %entry
+ ret i32 0
+}
+
+declare void @abort()