diff options
author | Yatao Wang <ningxinr@live.cn> | 2025-09-02 10:32:31 -0700 |
---|---|---|
committer | GitHub <noreply@github.com> | 2025-09-02 17:32:31 +0000 |
commit | 9b2c6052a8668711fb18844f557e229adb0306a1 (patch) | |
tree | 8bc09987a9e92f11dbef4f7bf6d87f0c91aa0688 /llvm/lib | |
parent | d6a72cb300f1d4131eee4fdb101741fb2be1f780 (diff) | |
download | llvm-9b2c6052a8668711fb18844f557e229adb0306a1.zip llvm-9b2c6052a8668711fb18844f557e229adb0306a1.tar.gz llvm-9b2c6052a8668711fb18844f557e229adb0306a1.tar.bz2 |
Reland "[AArch64] AArch64TargetLowering::computeKnownBitsForTargetNode - add support for AArch64ISD::MOV/MVN constants" (#155696)
Reland #154039
Per suggestion by @davemgreen, add mask on the shift amount to prevent
shifting more than the bitwidth. This change is confirmed to fix the
tests failures on x86 sanitizer bots and aarch64 sanitizer bots
failures.
Fixes: https://github.com/llvm/llvm-project/issues/153159
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 36 |
1 files changed, 36 insertions, 0 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 3a51305..f1e8fb7 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -2664,6 +2664,32 @@ void AArch64TargetLowering::computeKnownBitsForTargetNode( << Op->getConstantOperandVal(1))); break; } + case AArch64ISD::MOVImsl: { + unsigned ShiftAmt = AArch64_AM::getShiftValue(Op->getConstantOperandVal(1)); + Known = KnownBits::makeConstant(APInt( + Known.getBitWidth(), ~(~Op->getConstantOperandVal(0) << ShiftAmt))); + break; + } + case AArch64ISD::MOVIedit: { + Known = KnownBits::makeConstant(APInt( + Known.getBitWidth(), + AArch64_AM::decodeAdvSIMDModImmType10(Op->getConstantOperandVal(0)))); + break; + } + case AArch64ISD::MVNIshift: { + Known = KnownBits::makeConstant( + APInt(Known.getBitWidth(), + ~(Op->getConstantOperandVal(0) << Op->getConstantOperandVal(1)), + /*isSigned*/ false, /*implicitTrunc*/ true)); + break; + } + case AArch64ISD::MVNImsl: { + unsigned ShiftAmt = AArch64_AM::getShiftValue(Op->getConstantOperandVal(1)); + Known = KnownBits::makeConstant( + APInt(Known.getBitWidth(), (~Op->getConstantOperandVal(0) << ShiftAmt), + /*isSigned*/ false, /*implicitTrunc*/ true)); + break; + } case AArch64ISD::LOADgot: case AArch64ISD::ADDlow: { if (!Subtarget->isTargetILP32()) @@ -30805,6 +30831,16 @@ bool AArch64TargetLowering::isTargetCanonicalConstantNode(SDValue Op) const { return Op.getOpcode() == AArch64ISD::DUP || Op.getOpcode() == AArch64ISD::MOVI || Op.getOpcode() == AArch64ISD::MOVIshift || + Op.getOpcode() == AArch64ISD::MOVImsl || + Op.getOpcode() == AArch64ISD::MOVIedit || + Op.getOpcode() == AArch64ISD::MVNIshift || + Op.getOpcode() == AArch64ISD::MVNImsl || + // Ignoring fneg(movi(0)), because if it is folded to FPConstant(-0.0), + // ISel will select fmov(mov i64 0x8000000000000000), resulting in a + // fmov from fpr to gpr, which is more expensive than fneg(movi(0)) + (Op.getOpcode() == ISD::FNEG && + Op.getOperand(0).getOpcode() == AArch64ISD::MOVIedit && + Op.getOperand(0).getConstantOperandVal(0) == 0) || (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR && Op.getOperand(0).getOpcode() == AArch64ISD::DUP) || TargetLowering::isTargetCanonicalConstantNode(Op); |