diff options
Diffstat (limited to 'llvm/lib/Target/RISCV')
| -rw-r--r-- | llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 70 | ||||
| -rw-r--r-- | llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/RISCV/RISCVInstrInfo.td | 4 | ||||
| -rw-r--r-- | llvm/lib/Target/RISCV/RISCVInstrInfoD.td | 1 | ||||
| -rw-r--r-- | llvm/lib/Target/RISCV/RISCVInstrInfoF.td | 1 | ||||
| -rw-r--r-- | llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp | 31 |
6 files changed, 61 insertions, 48 deletions
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 56881f7..e0cf739 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -19794,7 +19794,9 @@ legalizeScatterGatherIndexType(SDLoc DL, SDValue &Index, // LLVM's legalization take care of the splitting. // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet. Index = DAG.getNode(ISD::SIGN_EXTEND, DL, - IndexVT.changeVectorElementType(XLenVT), Index); + EVT::getVectorVT(*DAG.getContext(), XLenVT, + IndexVT.getVectorElementCount()), + Index); } IndexType = ISD::UNSIGNED_SCALED; return true; @@ -23944,7 +23946,7 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, .Case("{t0}", RISCV::X5) .Case("{t1}", RISCV::X6) .Case("{t2}", RISCV::X7) - .Cases("{s0}", "{fp}", RISCV::X8) + .Cases({"{s0}", "{fp}"}, RISCV::X8) .Case("{s1}", RISCV::X9) .Case("{a0}", RISCV::X10) .Case("{a1}", RISCV::X11) @@ -23981,38 +23983,38 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, // use the ABI names in register constraint lists. if (Subtarget.hasStdExtF()) { unsigned FReg = StringSwitch<unsigned>(Constraint.lower()) - .Cases("{f0}", "{ft0}", RISCV::F0_F) - .Cases("{f1}", "{ft1}", RISCV::F1_F) - .Cases("{f2}", "{ft2}", RISCV::F2_F) - .Cases("{f3}", "{ft3}", RISCV::F3_F) - .Cases("{f4}", "{ft4}", RISCV::F4_F) - .Cases("{f5}", "{ft5}", RISCV::F5_F) - .Cases("{f6}", "{ft6}", RISCV::F6_F) - .Cases("{f7}", "{ft7}", RISCV::F7_F) - .Cases("{f8}", "{fs0}", RISCV::F8_F) - .Cases("{f9}", "{fs1}", RISCV::F9_F) - .Cases("{f10}", "{fa0}", RISCV::F10_F) - .Cases("{f11}", "{fa1}", RISCV::F11_F) - .Cases("{f12}", "{fa2}", RISCV::F12_F) - .Cases("{f13}", "{fa3}", RISCV::F13_F) - .Cases("{f14}", "{fa4}", RISCV::F14_F) - .Cases("{f15}", "{fa5}", RISCV::F15_F) - .Cases("{f16}", "{fa6}", RISCV::F16_F) - .Cases("{f17}", "{fa7}", RISCV::F17_F) - .Cases("{f18}", "{fs2}", RISCV::F18_F) - .Cases("{f19}", "{fs3}", RISCV::F19_F) - .Cases("{f20}", "{fs4}", RISCV::F20_F) - .Cases("{f21}", "{fs5}", RISCV::F21_F) - .Cases("{f22}", "{fs6}", RISCV::F22_F) - .Cases("{f23}", "{fs7}", RISCV::F23_F) - .Cases("{f24}", "{fs8}", RISCV::F24_F) - .Cases("{f25}", "{fs9}", RISCV::F25_F) - .Cases("{f26}", "{fs10}", RISCV::F26_F) - .Cases("{f27}", "{fs11}", RISCV::F27_F) - .Cases("{f28}", "{ft8}", RISCV::F28_F) - .Cases("{f29}", "{ft9}", RISCV::F29_F) - .Cases("{f30}", "{ft10}", RISCV::F30_F) - .Cases("{f31}", "{ft11}", RISCV::F31_F) + .Cases({"{f0}", "{ft0}"}, RISCV::F0_F) + .Cases({"{f1}", "{ft1}"}, RISCV::F1_F) + .Cases({"{f2}", "{ft2}"}, RISCV::F2_F) + .Cases({"{f3}", "{ft3}"}, RISCV::F3_F) + .Cases({"{f4}", "{ft4}"}, RISCV::F4_F) + .Cases({"{f5}", "{ft5}"}, RISCV::F5_F) + .Cases({"{f6}", "{ft6}"}, RISCV::F6_F) + .Cases({"{f7}", "{ft7}"}, RISCV::F7_F) + .Cases({"{f8}", "{fs0}"}, RISCV::F8_F) + .Cases({"{f9}", "{fs1}"}, RISCV::F9_F) + .Cases({"{f10}", "{fa0}"}, RISCV::F10_F) + .Cases({"{f11}", "{fa1}"}, RISCV::F11_F) + .Cases({"{f12}", "{fa2}"}, RISCV::F12_F) + .Cases({"{f13}", "{fa3}"}, RISCV::F13_F) + .Cases({"{f14}", "{fa4}"}, RISCV::F14_F) + .Cases({"{f15}", "{fa5}"}, RISCV::F15_F) + .Cases({"{f16}", "{fa6}"}, RISCV::F16_F) + .Cases({"{f17}", "{fa7}"}, RISCV::F17_F) + .Cases({"{f18}", "{fs2}"}, RISCV::F18_F) + .Cases({"{f19}", "{fs3}"}, RISCV::F19_F) + .Cases({"{f20}", "{fs4}"}, RISCV::F20_F) + .Cases({"{f21}", "{fs5}"}, RISCV::F21_F) + .Cases({"{f22}", "{fs6}"}, RISCV::F22_F) + .Cases({"{f23}", "{fs7}"}, RISCV::F23_F) + .Cases({"{f24}", "{fs8}"}, RISCV::F24_F) + .Cases({"{f25}", "{fs9}"}, RISCV::F25_F) + .Cases({"{f26}", "{fs10}"}, RISCV::F26_F) + .Cases({"{f27}", "{fs11}"}, RISCV::F27_F) + .Cases({"{f28}", "{ft8}"}, RISCV::F28_F) + .Cases({"{f29}", "{ft9}"}, RISCV::F29_F) + .Cases({"{f30}", "{ft10}"}, RISCV::F30_F) + .Cases({"{f31}", "{ft11}"}, RISCV::F31_F) .Default(RISCV::NoRegister); if (FReg != RISCV::NoRegister) { assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg"); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index 3a7013d..c9df787 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -869,7 +869,7 @@ std::optional<unsigned> getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, } } -// This is the version used during inline spilling +// This is the version used during InlineSpiller::spillAroundUses MachineInstr *RISCVInstrInfo::foldMemoryOperandImpl( MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS, diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td index 7c89686..9cb53fb 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -768,7 +768,7 @@ def BGE : BranchCC_rri<0b101, "bge">; def BLTU : BranchCC_rri<0b110, "bltu">; def BGEU : BranchCC_rri<0b111, "bgeu">; -let IsSignExtendingOpW = 1 in { +let IsSignExtendingOpW = 1, canFoldAsLoad = 1 in { def LB : Load_ri<0b000, "lb">, Sched<[WriteLDB, ReadMemBase]>; def LH : Load_ri<0b001, "lh">, Sched<[WriteLDH, ReadMemBase]>; def LW : Load_ri<0b010, "lw">, Sched<[WriteLDW, ReadMemBase]>; @@ -889,8 +889,10 @@ def CSRRCI : CSR_ii<0b111, "csrrci">; /// RV64I instructions let Predicates = [IsRV64] in { +let canFoldAsLoad = 1 in { def LWU : Load_ri<0b110, "lwu">, Sched<[WriteLDW, ReadMemBase]>; def LD : Load_ri<0b011, "ld">, Sched<[WriteLDD, ReadMemBase]>; +} def SD : Store_rri<0b011, "sd">, Sched<[WriteSTD, ReadStoreData, ReadMemBase]>; let IsSignExtendingOpW = 1 in { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td index afac37d..4ffe3e6 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td @@ -71,6 +71,7 @@ defvar DExtsRV64 = [DExt, ZdinxExt]; //===----------------------------------------------------------------------===// let Predicates = [HasStdExtD] in { +let canFoldAsLoad = 1 in def FLD : FPLoad_r<0b011, "fld", FPR64, WriteFLD64>; // Operands for stores are in the order srcreg, base, offset rather than diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td index 6571d99..b30f8ec 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td @@ -330,6 +330,7 @@ class PseudoFROUND<DAGOperand Ty, ValueType vt, ValueType intvt = XLenVT> //===----------------------------------------------------------------------===// let Predicates = [HasStdExtF] in { +let canFoldAsLoad = 1 in def FLW : FPLoad_r<0b010, "flw", FPR32, WriteFLD32>; // Operands for stores are in the order srcreg, base, offset rather than diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp index e9f43b9..84bb294 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp @@ -438,18 +438,19 @@ void RISCVRegisterInfo::lowerSegmentSpillReload(MachineBasicBlock::iterator II, TypeSize VRegSize = OldLoc.getValue().divideCoefficientBy(NumRegs); Register VLENB = 0; - unsigned PreHandledNum = 0; + unsigned VLENBShift = 0; + unsigned PrevHandledNum = 0; unsigned I = 0; while (I != NumRegs) { auto [LMulHandled, RegClass, Opcode] = getSpillReloadInfo(NumRegs - I, RegEncoding, IsSpill); auto [RegNumHandled, _] = RISCVVType::decodeVLMUL(LMulHandled); bool IsLast = I + RegNumHandled == NumRegs; - if (PreHandledNum) { + if (PrevHandledNum) { Register Step; // Optimize for constant VLEN. if (auto VLEN = STI.getRealVLen()) { - int64_t Offset = *VLEN / 8 * PreHandledNum; + int64_t Offset = *VLEN / 8 * PrevHandledNum; Step = MRI.createVirtualRegister(&RISCV::GPRRegClass); STI.getInstrInfo()->movImm(MBB, II, DL, Step, Offset); } else { @@ -457,15 +458,21 @@ void RISCVRegisterInfo::lowerSegmentSpillReload(MachineBasicBlock::iterator II, VLENB = MRI.createVirtualRegister(&RISCV::GPRRegClass); BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VLENB); } - uint32_t ShiftAmount = Log2_32(PreHandledNum); - if (ShiftAmount == 0) - Step = VLENB; - else { - Step = MRI.createVirtualRegister(&RISCV::GPRRegClass); - BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), Step) - .addReg(VLENB, getKillRegState(IsLast)) - .addImm(ShiftAmount); + uint32_t ShiftAmount = Log2_32(PrevHandledNum); + // To avoid using an extra register, we shift the VLENB register and + // remember how much it has been shifted. We can then use relative + // shifts to adjust to the desired shift amount. + if (VLENBShift > ShiftAmount) { + BuildMI(MBB, II, DL, TII->get(RISCV::SRLI), VLENB) + .addReg(VLENB, RegState::Kill) + .addImm(VLENBShift - ShiftAmount); + } else if (VLENBShift < ShiftAmount) { + BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VLENB) + .addReg(VLENB, RegState::Kill) + .addImm(ShiftAmount - VLENBShift); } + VLENBShift = ShiftAmount; + Step = VLENB; } BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase) @@ -489,7 +496,7 @@ void RISCVRegisterInfo::lowerSegmentSpillReload(MachineBasicBlock::iterator II, if (IsSpill) MIB.addReg(Reg, RegState::Implicit); - PreHandledNum = RegNumHandled; + PrevHandledNum = RegNumHandled; RegEncoding += RegNumHandled; I += RegNumHandled; } |
