aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@sifive.com>2021-10-16 13:31:41 -0700
committerCraig Topper <craig.topper@sifive.com>2021-10-16 13:31:43 -0700
commitbeb7862db520541fea429f27733e1993d254d76f (patch)
tree8214f846d83fe2a535d72aa728219981e8cad77f
parentecbee4804d44c0afdf97fe59e8221c30cbbf3ae7 (diff)
downloadllvm-beb7862db520541fea429f27733e1993d254d76f.zip
llvm-beb7862db520541fea429f27733e1993d254d76f.tar.gz
llvm-beb7862db520541fea429f27733e1993d254d76f.tar.bz2
[X86] Add DAG combine for negation of CMOV absolute value pattern.
This patch detects the absolute value pattern on the RHS of a subtract. If we find it we swap the CMOV true/false values and replace the subtract with an ADD. There may be a more generic way to do this, but I'm not sure. Targets that don't have legal or custom ISD::ABS use a generic expand in DAG combiner already when it sees (neg (abs(x))). I haven't checked what happens if the neg is a more general subtract. Fixes PR50991 for X86. Reviewed By: RKSimon, spatel Differential Revision: https://reviews.llvm.org/D111858
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp44
-rw-r--r--llvm/test/CodeGen/X86/neg-abs.ll37
2 files changed, 59 insertions, 22 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 2a01266..2282400 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -51425,6 +51425,47 @@ static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
return combineAddOrSubToADCOrSBB(N, DAG);
}
+// Try to fold (sub Y, cmovns X, -X) -> (add Y, cmovns -X, X) if the cmov
+// condition comes from the subtract node that produced -X. This matches the
+// cmov expansion for absolute value. By swapping the operands we convert abs
+// to nabs.
+static SDValue combineSubABS(SDNode *N, SelectionDAG &DAG) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+
+ if (N1.getOpcode() != X86ISD::CMOV || !N1.hasOneUse())
+ return SDValue();
+
+ X86::CondCode CC = (X86::CondCode)N1.getConstantOperandVal(2);
+ if (CC != X86::COND_S && CC != X86::COND_NS)
+ return SDValue();
+
+ // Condition should come from a negate operation.
+ SDValue Cond = N1.getOperand(3);
+ if (Cond.getOpcode() != X86ISD::SUB || !isNullConstant(Cond.getOperand(0)))
+ return SDValue();
+ assert(Cond.getResNo() == 1 && "Unexpected result number");
+
+ // Get the X and -X from the negate.
+ SDValue NegX = Cond.getValue(0);
+ SDValue X = Cond.getOperand(1);
+
+ SDValue FalseOp = N1.getOperand(0);
+ SDValue TrueOp = N1.getOperand(1);
+
+ // Cmov operands should be X and NegX. Order doesn't matter.
+ if (!(TrueOp == X && FalseOp == NegX) && !(TrueOp == NegX && FalseOp == X))
+ return SDValue();
+
+ // Build a new CMOV with the operands swapped.
+ SDLoc DL(N);
+ MVT VT = N->getSimpleValueType(0);
+ SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VT, TrueOp, FalseOp,
+ N1.getOperand(2), Cond);
+ // Convert sub to add.
+ return DAG.getNode(ISD::ADD, DL, VT, N0, Cmov);
+}
+
static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
@@ -51456,6 +51497,9 @@ static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(ISD::ADD, DL, VT, NewXor, NewAdd);
}
+ if (SDValue V = combineSubABS(N, DAG))
+ return V;
+
// Try to synthesize horizontal subs from subs of shuffles.
if (SDValue V = combineToHorizontalAddSub(N, DAG, Subtarget))
return V;
diff --git a/llvm/test/CodeGen/X86/neg-abs.ll b/llvm/test/CodeGen/X86/neg-abs.ll
index a8320cb..f34f683 100644
--- a/llvm/test/CodeGen/X86/neg-abs.ll
+++ b/llvm/test/CodeGen/X86/neg-abs.ll
@@ -46,9 +46,7 @@ define i16 @neg_abs_i16(i16 %x) nounwind {
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: negw %ax
-; X64-NEXT: cmovsw %di, %ax
-; X64-NEXT: negl %eax
-; X64-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NEXT: cmovnsw %di, %ax
; X64-NEXT: retq
%abs = tail call i16 @llvm.abs.i16(i16 %x, i1 true)
%neg = sub nsw i16 0, %abs
@@ -69,8 +67,7 @@ define i32 @neg_abs_i32(i32 %x) nounwind {
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: negl %eax
-; X64-NEXT: cmovsl %edi, %eax
-; X64-NEXT: negl %eax
+; X64-NEXT: cmovnsl %edi, %eax
; X64-NEXT: retq
%abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
%neg = sub nsw i32 0, %abs
@@ -97,8 +94,7 @@ define i64 @neg_abs_i64(i64 %x) nounwind {
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: negq %rax
-; X64-NEXT: cmovsq %rdi, %rax
-; X64-NEXT: negq %rax
+; X64-NEXT: cmovnsq %rdi, %rax
; X64-NEXT: retq
%abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true)
%neg = sub nsw i64 0, %abs
@@ -197,11 +193,10 @@ define i16 @sub_abs_i16(i16 %x, i16 %y) nounwind {
;
; X64-LABEL: sub_abs_i16:
; X64: # %bb.0:
-; X64-NEXT: movl %esi, %eax
-; X64-NEXT: movl %edi, %ecx
-; X64-NEXT: negw %cx
-; X64-NEXT: cmovsw %di, %cx
-; X64-NEXT: subl %ecx, %eax
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: negw %ax
+; X64-NEXT: cmovnsw %di, %ax
+; X64-NEXT: addl %esi, %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%abs = tail call i16 @llvm.abs.i16(i16 %x, i1 false)
@@ -223,11 +218,10 @@ define i32 @sub_abs_i32(i32 %x, i32 %y) nounwind {
;
; X64-LABEL: sub_abs_i32:
; X64: # %bb.0:
-; X64-NEXT: movl %esi, %eax
-; X64-NEXT: movl %edi, %ecx
-; X64-NEXT: negl %ecx
-; X64-NEXT: cmovsl %edi, %ecx
-; X64-NEXT: subl %ecx, %eax
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: negl %eax
+; X64-NEXT: cmovnsl %edi, %eax
+; X64-NEXT: addl %esi, %eax
; X64-NEXT: retq
%abs = tail call i32 @llvm.abs.i32(i32 %x, i1 false)
%neg = sub i32 %y, %abs
@@ -257,11 +251,10 @@ define i64 @sub_abs_i64(i64 %x, i64 %y) nounwind {
;
; X64-LABEL: sub_abs_i64:
; X64: # %bb.0:
-; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: movq %rdi, %rcx
-; X64-NEXT: negq %rcx
-; X64-NEXT: cmovsq %rdi, %rcx
-; X64-NEXT: subq %rcx, %rax
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: negq %rax
+; X64-NEXT: cmovnsq %rdi, %rax
+; X64-NEXT: addq %rsi, %rax
; X64-NEXT: retq
%abs = tail call i64 @llvm.abs.i64(i64 %x, i1 false)
%neg = sub i64 %y, %abs