diff options
Diffstat (limited to 'llvm/lib/Target/RISCV')
| -rw-r--r-- | llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp | 125 | ||||
| -rw-r--r-- | llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h | 1 | ||||
| -rw-r--r-- | llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 16 | ||||
| -rw-r--r-- | llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 66 | ||||
| -rw-r--r-- | llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td | 1 | 
6 files changed, 164 insertions, 47 deletions
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp index 282cf5d..3d5a55c 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp @@ -95,7 +95,8 @@ private:    void addVectorLoadStoreOperands(MachineInstr &I,                                    SmallVectorImpl<SrcOp> &SrcOps,                                    unsigned &CurOp, bool IsMasked, -                                  bool IsStrided) const; +                                  bool IsStridedOrIndexed, +                                  LLT *IndexVT = nullptr) const;    bool selectIntrinsicWithSideEffects(MachineInstr &I,                                        MachineIRBuilder &MIB) const; @@ -722,15 +723,17 @@ static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {  void RISCVInstructionSelector::addVectorLoadStoreOperands(      MachineInstr &I, SmallVectorImpl<SrcOp> &SrcOps, unsigned &CurOp, -    bool IsMasked, bool IsStrided) const { +    bool IsMasked, bool IsStridedOrIndexed, LLT *IndexVT) const {    // Base Pointer    auto PtrReg = I.getOperand(CurOp++).getReg();    SrcOps.push_back(PtrReg); -  // Stride -  if (IsStrided) { +  // Stride or Index +  if (IsStridedOrIndexed) {      auto StrideReg = I.getOperand(CurOp++).getReg();      SrcOps.push_back(StrideReg); +    if (IndexVT) +      *IndexVT = MRI->getType(StrideReg);    }    // Mask @@ -805,6 +808,70 @@ bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(      I.eraseFromParent();      return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);    } +  case Intrinsic::riscv_vloxei: +  case Intrinsic::riscv_vloxei_mask: +  case Intrinsic::riscv_vluxei: +  case Intrinsic::riscv_vluxei_mask: { +    bool IsMasked = IntrinID == Intrinsic::riscv_vloxei_mask || +                    IntrinID == Intrinsic::riscv_vluxei_mask; +    bool IsOrdered = IntrinID == Intrinsic::riscv_vloxei || +                     IntrinID == Intrinsic::riscv_vloxei_mask; +    LLT VT = MRI->getType(I.getOperand(0).getReg()); +    unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); + +    // Result vector +    const Register DstReg = I.getOperand(0).getReg(); + +    // Sources +    bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm; +    unsigned CurOp = 2; +    SmallVector<SrcOp, 4> SrcOps; // Source registers. + +    // Passthru +    if (HasPassthruOperand) { +      auto PassthruReg = I.getOperand(CurOp++).getReg(); +      SrcOps.push_back(PassthruReg); +    } else { +      // Use NoRegister if there is no specified passthru. +      SrcOps.push_back(Register()); +    } +    LLT IndexVT; +    addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT); + +    RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(getMVTForLLT(VT)); +    RISCVVType::VLMUL IndexLMUL = +        RISCVTargetLowering::getLMUL(getMVTForLLT(IndexVT)); +    unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); +    if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { +      reportFatalUsageError("The V extension does not support EEW=64 for index " +                            "values when XLEN=32"); +    } +    const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo( +        IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL), +        static_cast<unsigned>(IndexLMUL)); + +    auto PseudoMI = MIB.buildInstr(P->Pseudo, {DstReg}, SrcOps); + +    // Select VL +    auto VLOpFn = renderVLOp(I.getOperand(CurOp++)); +    for (auto &RenderFn : *VLOpFn) +      RenderFn(PseudoMI); + +    // SEW +    PseudoMI.addImm(Log2SEW); + +    // Policy +    uint64_t Policy = RISCVVType::MASK_AGNOSTIC; +    if (IsMasked) +      Policy = I.getOperand(CurOp++).getImm(); +    PseudoMI.addImm(Policy); + +    // Memref +    PseudoMI.cloneMemRefs(I); + +    I.eraseFromParent(); +    return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI); +  }    case Intrinsic::riscv_vsm:    case Intrinsic::riscv_vse:    case Intrinsic::riscv_vse_mask: @@ -847,6 +914,56 @@ bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(      I.eraseFromParent();      return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);    } +  case Intrinsic::riscv_vsoxei: +  case Intrinsic::riscv_vsoxei_mask: +  case Intrinsic::riscv_vsuxei: +  case Intrinsic::riscv_vsuxei_mask: { +    bool IsMasked = IntrinID == Intrinsic::riscv_vsoxei_mask || +                    IntrinID == Intrinsic::riscv_vsuxei_mask; +    bool IsOrdered = IntrinID == Intrinsic::riscv_vsoxei || +                     IntrinID == Intrinsic::riscv_vsoxei_mask; +    LLT VT = MRI->getType(I.getOperand(1).getReg()); +    unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); + +    // Sources +    unsigned CurOp = 1; +    SmallVector<SrcOp, 4> SrcOps; // Source registers. + +    // Store value +    auto PassthruReg = I.getOperand(CurOp++).getReg(); +    SrcOps.push_back(PassthruReg); + +    LLT IndexVT; +    addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT); + +    RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(getMVTForLLT(VT)); +    RISCVVType::VLMUL IndexLMUL = +        RISCVTargetLowering::getLMUL(getMVTForLLT(IndexVT)); +    unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); +    if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { +      reportFatalUsageError("The V extension does not support EEW=64 for index " +                            "values when XLEN=32"); +    } +    const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo( +        IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL), +        static_cast<unsigned>(IndexLMUL)); + +    auto PseudoMI = MIB.buildInstr(P->Pseudo, {}, SrcOps); + +    // Select VL +    auto VLOpFn = renderVLOp(I.getOperand(CurOp++)); +    for (auto &RenderFn : *VLOpFn) +      RenderFn(PseudoMI); + +    // SEW +    PseudoMI.addImm(Log2SEW); + +    // Memref +    PseudoMI.cloneMemRefs(I); + +    I.eraseFromParent(); +    return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI); +  }    }  } diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h index e75dfe3..5b8cfb2 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -407,7 +407,6 @@ enum OperandType : unsigned {    OPERAND_SIMM5_PLUS1,    OPERAND_SIMM6,    OPERAND_SIMM6_NONZERO, -  OPERAND_SIMM8,    OPERAND_SIMM8_UNSIGNED,    OPERAND_SIMM10,    OPERAND_SIMM10_LSB0000_NONZERO, diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index b25a054..9078335 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -371,8 +371,8 @@ void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, unsigned NF, bool IsMasked,    RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);    unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());    if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { -    report_fatal_error("The V extension does not support EEW=64 for index " -                       "values when XLEN=32"); +    reportFatalUsageError("The V extension does not support EEW=64 for index " +                          "values when XLEN=32");    }    const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(        NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL), @@ -444,8 +444,8 @@ void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, unsigned NF, bool IsMasked,    RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);    unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());    if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { -    report_fatal_error("The V extension does not support EEW=64 for index " -                       "values when XLEN=32"); +    reportFatalUsageError("The V extension does not support EEW=64 for index " +                          "values when XLEN=32");    }    const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(        NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL), @@ -2223,8 +2223,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {        RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);        unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());        if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { -        report_fatal_error("The V extension does not support EEW=64 for index " -                           "values when XLEN=32"); +        reportFatalUsageError("The V extension does not support EEW=64 for " +                              "index values when XLEN=32");        }        const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(            IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL), @@ -2457,8 +2457,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {        RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);        unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());        if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { -        report_fatal_error("The V extension does not support EEW=64 for index " -                           "values when XLEN=32"); +        reportFatalUsageError("The V extension does not support EEW=64 for " +                              "index values when XLEN=32");        }        const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(            IsMasked, IsOrdered, IndexLog2EEW, diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index c6a8b84..e0cf739 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -23946,7 +23946,7 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,                                 .Case("{t0}", RISCV::X5)                                 .Case("{t1}", RISCV::X6)                                 .Case("{t2}", RISCV::X7) -                               .Cases("{s0}", "{fp}", RISCV::X8) +                               .Cases({"{s0}", "{fp}"}, RISCV::X8)                                 .Case("{s1}", RISCV::X9)                                 .Case("{a0}", RISCV::X10)                                 .Case("{a1}", RISCV::X11) @@ -23983,38 +23983,38 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,    // use the ABI names in register constraint lists.    if (Subtarget.hasStdExtF()) {      unsigned FReg = StringSwitch<unsigned>(Constraint.lower()) -                        .Cases("{f0}", "{ft0}", RISCV::F0_F) -                        .Cases("{f1}", "{ft1}", RISCV::F1_F) -                        .Cases("{f2}", "{ft2}", RISCV::F2_F) -                        .Cases("{f3}", "{ft3}", RISCV::F3_F) -                        .Cases("{f4}", "{ft4}", RISCV::F4_F) -                        .Cases("{f5}", "{ft5}", RISCV::F5_F) -                        .Cases("{f6}", "{ft6}", RISCV::F6_F) -                        .Cases("{f7}", "{ft7}", RISCV::F7_F) -                        .Cases("{f8}", "{fs0}", RISCV::F8_F) -                        .Cases("{f9}", "{fs1}", RISCV::F9_F) -                        .Cases("{f10}", "{fa0}", RISCV::F10_F) -                        .Cases("{f11}", "{fa1}", RISCV::F11_F) -                        .Cases("{f12}", "{fa2}", RISCV::F12_F) -                        .Cases("{f13}", "{fa3}", RISCV::F13_F) -                        .Cases("{f14}", "{fa4}", RISCV::F14_F) -                        .Cases("{f15}", "{fa5}", RISCV::F15_F) -                        .Cases("{f16}", "{fa6}", RISCV::F16_F) -                        .Cases("{f17}", "{fa7}", RISCV::F17_F) -                        .Cases("{f18}", "{fs2}", RISCV::F18_F) -                        .Cases("{f19}", "{fs3}", RISCV::F19_F) -                        .Cases("{f20}", "{fs4}", RISCV::F20_F) -                        .Cases("{f21}", "{fs5}", RISCV::F21_F) -                        .Cases("{f22}", "{fs6}", RISCV::F22_F) -                        .Cases("{f23}", "{fs7}", RISCV::F23_F) -                        .Cases("{f24}", "{fs8}", RISCV::F24_F) -                        .Cases("{f25}", "{fs9}", RISCV::F25_F) -                        .Cases("{f26}", "{fs10}", RISCV::F26_F) -                        .Cases("{f27}", "{fs11}", RISCV::F27_F) -                        .Cases("{f28}", "{ft8}", RISCV::F28_F) -                        .Cases("{f29}", "{ft9}", RISCV::F29_F) -                        .Cases("{f30}", "{ft10}", RISCV::F30_F) -                        .Cases("{f31}", "{ft11}", RISCV::F31_F) +                        .Cases({"{f0}", "{ft0}"}, RISCV::F0_F) +                        .Cases({"{f1}", "{ft1}"}, RISCV::F1_F) +                        .Cases({"{f2}", "{ft2}"}, RISCV::F2_F) +                        .Cases({"{f3}", "{ft3}"}, RISCV::F3_F) +                        .Cases({"{f4}", "{ft4}"}, RISCV::F4_F) +                        .Cases({"{f5}", "{ft5}"}, RISCV::F5_F) +                        .Cases({"{f6}", "{ft6}"}, RISCV::F6_F) +                        .Cases({"{f7}", "{ft7}"}, RISCV::F7_F) +                        .Cases({"{f8}", "{fs0}"}, RISCV::F8_F) +                        .Cases({"{f9}", "{fs1}"}, RISCV::F9_F) +                        .Cases({"{f10}", "{fa0}"}, RISCV::F10_F) +                        .Cases({"{f11}", "{fa1}"}, RISCV::F11_F) +                        .Cases({"{f12}", "{fa2}"}, RISCV::F12_F) +                        .Cases({"{f13}", "{fa3}"}, RISCV::F13_F) +                        .Cases({"{f14}", "{fa4}"}, RISCV::F14_F) +                        .Cases({"{f15}", "{fa5}"}, RISCV::F15_F) +                        .Cases({"{f16}", "{fa6}"}, RISCV::F16_F) +                        .Cases({"{f17}", "{fa7}"}, RISCV::F17_F) +                        .Cases({"{f18}", "{fs2}"}, RISCV::F18_F) +                        .Cases({"{f19}", "{fs3}"}, RISCV::F19_F) +                        .Cases({"{f20}", "{fs4}"}, RISCV::F20_F) +                        .Cases({"{f21}", "{fs5}"}, RISCV::F21_F) +                        .Cases({"{f22}", "{fs6}"}, RISCV::F22_F) +                        .Cases({"{f23}", "{fs7}"}, RISCV::F23_F) +                        .Cases({"{f24}", "{fs8}"}, RISCV::F24_F) +                        .Cases({"{f25}", "{fs9}"}, RISCV::F25_F) +                        .Cases({"{f26}", "{fs10}"}, RISCV::F26_F) +                        .Cases({"{f27}", "{fs11}"}, RISCV::F27_F) +                        .Cases({"{f28}", "{ft8}"}, RISCV::F28_F) +                        .Cases({"{f29}", "{ft9}"}, RISCV::F29_F) +                        .Cases({"{f30}", "{ft10}"}, RISCV::F30_F) +                        .Cases({"{f31}", "{ft11}"}, RISCV::F31_F)                          .Default(RISCV::NoRegister);      if (FReg != RISCV::NoRegister) {        assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg"); diff --git a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp index a1c8e23..c58a5c0 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp @@ -48,7 +48,7 @@ class VXRMInfo {    } State = Uninitialized;  public: -  VXRMInfo() {} +  VXRMInfo() = default;    static VXRMInfo getUnknown() {      VXRMInfo Info; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td index c31713e..1c6a5af 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td @@ -90,6 +90,7 @@ defvar ZfhminDExts = [ZfhminDExt, ZhinxminZdinxExt, ZhinxminZdinx32Ext];  //===----------------------------------------------------------------------===//  let Predicates = [HasHalfFPLoadStoreMove] in { +let canFoldAsLoad = 1 in  def FLH : FPLoad_r<0b001, "flh", FPR16, WriteFLD16>;  // Operands for stores are in the order srcreg, base, offset rather than  | 
