aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib
diff options
context:
space:
mode:
authorBradley Smith <bradley.smith@arm.com>2021-06-25 12:11:33 +0100
committerBradley Smith <bradley.smith@arm.com>2021-06-28 15:06:06 +0100
commitc089e29aa47f8833d4370ac1a87a17f7b3a585cf (patch)
tree9bc597e7f6635249c8605afa738e8d3b7548746f /llvm/lib
parent8d5c0b8768f729d48e25251755ec12cfd785c934 (diff)
downloadllvm-c089e29aa47f8833d4370ac1a87a17f7b3a585cf.zip
llvm-c089e29aa47f8833d4370ac1a87a17f7b3a585cf.tar.gz
llvm-c089e29aa47f8833d4370ac1a87a17f7b3a585cf.tar.bz2
[AArch64][SVE] DAG combine SETCC_MERGE_ZERO of a SETCC_MERGE_ZERO
This helps remove extra comparisons when generating masks for fixed length masked operations. Differential Revision: https://reviews.llvm.org/D104910
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp23
1 files changed, 23 insertions, 0 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 9886d63..16bb7eb 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -15508,6 +15508,27 @@ static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
+static SDValue performSetccMergeZeroCombine(SDNode *N, SelectionDAG &DAG) {
+ assert(N->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO &&
+ "Unexpected opcode!");
+
+ SDValue Pred = N->getOperand(0);
+ SDValue LHS = N->getOperand(1);
+ SDValue RHS = N->getOperand(2);
+ ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(3))->get();
+
+ // setcc_merge_zero pred (sign_extend (setcc_merge_zero ... pred ...)), 0, ne
+ // => inner setcc_merge_zero
+ if (Cond == ISD::SETNE && isZerosVector(RHS.getNode()) &&
+ LHS->getOpcode() == ISD::SIGN_EXTEND &&
+ LHS->getOperand(0)->getValueType(0) == N->getValueType(0) &&
+ LHS->getOperand(0)->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO &&
+ LHS->getOperand(0)->getOperand(0) == Pred)
+ return LHS->getOperand(0);
+
+ return SDValue();
+}
+
// Optimize some simple tbz/tbnz cases. Returns the new operand and bit to test
// as well as whether the test should be inverted. This code is required to
// catch these cases (as opposed to standard dag combines) because
@@ -16366,6 +16387,8 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
return performSpliceCombine(N, DAG);
case AArch64ISD::UZP1:
return performUzpCombine(N, DAG);
+ case AArch64ISD::SETCC_MERGE_ZERO:
+ return performSetccMergeZeroCombine(N, DAG);
case AArch64ISD::GLD1_MERGE_ZERO:
case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
case AArch64ISD::GLD1_UXTW_MERGE_ZERO: