diff options
Diffstat (limited to 'llvm/lib/Target/RISCV')
19 files changed, 291 insertions, 360 deletions
diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp index a143d85..d71c42c 100644 --- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -3849,9 +3849,14 @@ bool RISCVAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, switch (Inst.getOpcode()) { default: break; - case RISCV::PseudoC_ADDI_NOP: - emitToStreamer(Out, MCInstBuilder(RISCV::C_NOP)); + case RISCV::PseudoC_ADDI_NOP: { + if (Inst.getOperand(2).getImm() == 0) + emitToStreamer(Out, MCInstBuilder(RISCV::C_NOP)); + else + emitToStreamer( + Out, MCInstBuilder(RISCV::C_NOP_HINT).addOperand(Inst.getOperand(2))); return false; + } case RISCV::PseudoLLAImm: case RISCV::PseudoLAImm: case RISCV::PseudoLI: { diff --git a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp index fa7bcfa..67cc01e 100644 --- a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp +++ b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp @@ -193,21 +193,19 @@ static DecodeStatus DecodeFPR128RegisterClass(MCInst &Inst, uint32_t RegNo, static DecodeStatus DecodeGPRNoX0RegisterClass(MCInst &Inst, uint32_t RegNo, uint64_t Address, const MCDisassembler *Decoder) { - if (RegNo == 0) { + if (RegNo == 0) return MCDisassembler::Fail; - } return DecodeGPRRegisterClass(Inst, RegNo, Address, Decoder); } -static DecodeStatus -DecodeGPRNoX0X2RegisterClass(MCInst &Inst, uint64_t RegNo, uint32_t Address, - const MCDisassembler *Decoder) { - if (RegNo == 2) { +static DecodeStatus DecodeGPRNoX2RegisterClass(MCInst &Inst, uint64_t RegNo, + uint32_t Address, + const MCDisassembler *Decoder) { + if (RegNo == 2) return MCDisassembler::Fail; - } - return DecodeGPRNoX0RegisterClass(Inst, RegNo, Address, Decoder); + return DecodeGPRRegisterClass(Inst, RegNo, Address, Decoder); } static DecodeStatus DecodeGPRNoX31RegisterClass(MCInst &Inst, uint32_t RegNo, @@ -536,41 +534,26 @@ static DecodeStatus decodeRTZArg(MCInst &Inst, uint32_t Imm, int64_t Address, return MCDisassembler::Success; } -static DecodeStatus decodeRVCInstrRdRs1ImmZero(MCInst &Inst, uint32_t Insn, - uint64_t Address, - const MCDisassembler *Decoder); - -static DecodeStatus decodeRVCInstrRdSImm6(MCInst &Inst, uint32_t Insn, - uint64_t Address, - const MCDisassembler *Decoder); - -static DecodeStatus decodeRVCInstrRdCLUIImm(MCInst &Inst, uint32_t Insn, - uint64_t Address, - const MCDisassembler *Decoder); - -static DecodeStatus -decodeRVCInstrRdRs1UImmLog2XLenNonZero(MCInst &Inst, uint32_t Insn, - uint64_t Address, - const MCDisassembler *Decoder); - -static DecodeStatus decodeRVCInstrRdRs2(MCInst &Inst, uint32_t Insn, - uint64_t Address, - const MCDisassembler *Decoder); - -static DecodeStatus decodeRVCInstrRdRs1Rs2(MCInst &Inst, uint32_t Insn, - uint64_t Address, - const MCDisassembler *Decoder); - -static DecodeStatus decodeXTHeadMemPair(MCInst &Inst, uint32_t Insn, - uint64_t Address, - const MCDisassembler *Decoder); - static DecodeStatus decodeZcmpRlist(MCInst &Inst, uint32_t Imm, uint64_t Address, - const MCDisassembler *Decoder); + const MCDisassembler *Decoder) { + bool IsRVE = Decoder->getSubtargetInfo().hasFeature(RISCV::FeatureStdExtE); + if (Imm < RISCVZC::RA || (IsRVE && Imm >= RISCVZC::RA_S0_S2)) + return MCDisassembler::Fail; + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} static DecodeStatus decodeXqccmpRlistS0(MCInst &Inst, uint32_t Imm, uint64_t Address, + const MCDisassembler *Decoder) { + if (Imm < RISCVZC::RA_S0) + return MCDisassembler::Fail; + return decodeZcmpRlist(Inst, Imm, Address, Decoder); +} + +static DecodeStatus decodeXTHeadMemPair(MCInst &Inst, uint32_t Insn, + uint64_t Address, const MCDisassembler *Decoder); static DecodeStatus decodeCSSPushPopchk(MCInst &Inst, uint32_t Insn, @@ -579,18 +562,6 @@ static DecodeStatus decodeCSSPushPopchk(MCInst &Inst, uint32_t Insn, #include "RISCVGenDisassemblerTables.inc" -static DecodeStatus decodeRVCInstrRdRs1ImmZero(MCInst &Inst, uint32_t Insn, - uint64_t Address, - const MCDisassembler *Decoder) { - DecodeStatus S = MCDisassembler::Success; - uint32_t Rd = fieldFromInstruction(Insn, 7, 5); - if (!Check(S, DecodeGPRNoX0RegisterClass(Inst, Rd, Address, Decoder))) - return MCDisassembler::Fail; - Inst.addOperand(Inst.getOperand(0)); - Inst.addOperand(MCOperand::createImm(0)); - return S; -} - static DecodeStatus decodeCSSPushPopchk(MCInst &Inst, uint32_t Insn, uint64_t Address, const MCDisassembler *Decoder) { @@ -601,66 +572,6 @@ static DecodeStatus decodeCSSPushPopchk(MCInst &Inst, uint32_t Insn, return MCDisassembler::Success; } -static DecodeStatus decodeRVCInstrRdSImm6(MCInst &Inst, uint32_t Insn, - uint64_t Address, - const MCDisassembler *Decoder) { - Inst.addOperand(MCOperand::createReg(RISCV::X0)); - uint32_t Imm = - fieldFromInstruction(Insn, 12, 1) << 5 | fieldFromInstruction(Insn, 2, 5); - [[maybe_unused]] DecodeStatus Result = - decodeSImmOperand<6>(Inst, Imm, Address, Decoder); - assert(Result == MCDisassembler::Success && "Invalid immediate"); - return MCDisassembler::Success; -} - -static DecodeStatus decodeRVCInstrRdCLUIImm(MCInst &Inst, uint32_t Insn, - uint64_t Address, - const MCDisassembler *Decoder) { - Inst.addOperand(MCOperand::createReg(RISCV::X0)); - uint32_t Imm = - fieldFromInstruction(Insn, 12, 1) << 5 | fieldFromInstruction(Insn, 2, 5); - return decodeCLUIImmOperand(Inst, Imm, Address, Decoder); -} - -static DecodeStatus -decodeRVCInstrRdRs1UImmLog2XLenNonZero(MCInst &Inst, uint32_t Insn, - uint64_t Address, - const MCDisassembler *Decoder) { - Inst.addOperand(MCOperand::createReg(RISCV::X0)); - Inst.addOperand(Inst.getOperand(0)); - - uint32_t UImm6 = - fieldFromInstruction(Insn, 12, 1) << 5 | fieldFromInstruction(Insn, 2, 5); - return decodeUImmLog2XLenNonZeroOperand(Inst, UImm6, Address, Decoder); -} - -static DecodeStatus decodeRVCInstrRdRs2(MCInst &Inst, uint32_t Insn, - uint64_t Address, - const MCDisassembler *Decoder) { - DecodeStatus S = MCDisassembler::Success; - uint32_t Rd = fieldFromInstruction(Insn, 7, 5); - uint32_t Rs2 = fieldFromInstruction(Insn, 2, 5); - if (!Check(S, DecodeGPRRegisterClass(Inst, Rd, Address, Decoder))) - return MCDisassembler::Fail; - if (!Check(S, DecodeGPRRegisterClass(Inst, Rs2, Address, Decoder))) - return MCDisassembler::Fail; - return S; -} - -static DecodeStatus decodeRVCInstrRdRs1Rs2(MCInst &Inst, uint32_t Insn, - uint64_t Address, - const MCDisassembler *Decoder) { - DecodeStatus S = MCDisassembler::Success; - uint32_t Rd = fieldFromInstruction(Insn, 7, 5); - uint32_t Rs2 = fieldFromInstruction(Insn, 2, 5); - if (!Check(S, DecodeGPRRegisterClass(Inst, Rd, Address, Decoder))) - return MCDisassembler::Fail; - Inst.addOperand(Inst.getOperand(0)); - if (!Check(S, DecodeGPRRegisterClass(Inst, Rs2, Address, Decoder))) - return MCDisassembler::Fail; - return S; -} - static DecodeStatus decodeXTHeadMemPair(MCInst &Inst, uint32_t Insn, uint64_t Address, const MCDisassembler *Decoder) { @@ -691,24 +602,6 @@ static DecodeStatus decodeXTHeadMemPair(MCInst &Inst, uint32_t Insn, return S; } -static DecodeStatus decodeZcmpRlist(MCInst &Inst, uint32_t Imm, - uint64_t Address, - const MCDisassembler *Decoder) { - bool IsRVE = Decoder->getSubtargetInfo().hasFeature(RISCV::FeatureStdExtE); - if (Imm < RISCVZC::RA || (IsRVE && Imm >= RISCVZC::RA_S0_S2)) - return MCDisassembler::Fail; - Inst.addOperand(MCOperand::createImm(Imm)); - return MCDisassembler::Success; -} - -static DecodeStatus decodeXqccmpRlistS0(MCInst &Inst, uint32_t Imm, - uint64_t Address, - const MCDisassembler *Decoder) { - if (Imm < RISCVZC::RA_S0) - return MCDisassembler::Fail; - return decodeZcmpRlist(Inst, Imm, Address, Decoder); -} - // Add implied SP operand for C.*SP compressed instructions. The SP operand // isn't explicitly encoded in the instruction. void RISCVDisassembler::addSPOperands(MCInst &MI) const { diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp index 2c37c3b..82e3b5c 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp @@ -320,6 +320,7 @@ bool RISCVAsmBackend::relaxAlign(MCFragment &F, unsigned &Size) { MCFixup Fixup = MCFixup::create(0, Expr, FirstLiteralRelocationKind + ELF::R_RISCV_ALIGN); F.setVarFixups({Fixup}); + F.setLinkerRelaxable(); F.getParent()->setLinkerRelaxable(); return true; } diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h index 7ad5d5f..bddea43 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -330,7 +330,6 @@ enum OperandType : unsigned { OPERAND_UIMM32, OPERAND_UIMM48, OPERAND_UIMM64, - OPERAND_ZERO, OPERAND_THREE, OPERAND_FOUR, OPERAND_SIMM5, diff --git a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp index d4f5d8f..2f32e2a 100644 --- a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp +++ b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp @@ -293,7 +293,7 @@ void RISCVAsmPrinter::emitNTLHint(const MachineInstr *MI) { MCInst Hint; if (STI->hasStdExtZca()) - Hint.setOpcode(RISCV::C_ADD_HINT); + Hint.setOpcode(RISCV::C_ADD); else Hint.setOpcode(RISCV::ADD); diff --git a/llvm/lib/Target/RISCV/RISCVCallingConv.td b/llvm/lib/Target/RISCV/RISCVCallingConv.td index 4c303a9..da6b95d 100644 --- a/llvm/lib/Target/RISCV/RISCVCallingConv.td +++ b/llvm/lib/Target/RISCV/RISCVCallingConv.td @@ -95,3 +95,7 @@ def CSR_XLEN_F32_V_Interrupt_RVE: CalleeSavedRegs<(sub CSR_XLEN_F32_V_Interrupt, // Same as CSR_XLEN_F64_V_Interrupt, but excluding X16-X31. def CSR_XLEN_F64_V_Interrupt_RVE: CalleeSavedRegs<(sub CSR_XLEN_F64_V_Interrupt, (sequence "X%u", 16, 31))>; + +def CSR_RT_MostRegs : CalleeSavedRegs<(sub CSR_Interrupt, X6, X7, X28)>; +def CSR_RT_MostRegs_RVE : CalleeSavedRegs<(sub CSR_RT_MostRegs, + (sequence "X%u", 16, 31))>; diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp index b1ab76a..9fc0d81 100644 --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -1581,7 +1581,8 @@ void RISCVFrameLowering::determineCalleeSaves(MachineFunction &MF, // Set the register and all its subregisters. if (!MRI.def_empty(CSReg) || MRI.getUsedPhysRegsMask().test(CSReg)) { SavedRegs.set(CSReg); - llvm::for_each(SubRegs, [&](unsigned Reg) { return SavedRegs.set(Reg); }); + for (unsigned Reg : SubRegs) + SavedRegs.set(Reg); } // Combine to super register if all of its subregisters are marked. diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index 34910b7..f223fdbe 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -634,7 +634,7 @@ bool RISCVDAGToDAGISel::trySignedBitfieldExtract(SDNode *Node) { // Transform (sra (shl X, C1) C2) with C1 < C2 // -> (SignedBitfieldExtract X, msb, lsb) if (N0.getOpcode() == ISD::SHL) { - auto *N01C = dyn_cast<ConstantSDNode>(N0->getOperand(1)); + auto *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); if (!N01C) return false; @@ -750,7 +750,7 @@ bool RISCVDAGToDAGISel::trySignedBitfieldInsertInSign(SDNode *Node) { // Transform (sra (shl X, C1) C2) with C1 > C2 // -> (NDS.BFOS X, lsb, msb) if (N0.getOpcode() == ISD::SHL) { - auto *N01C = dyn_cast<ConstantSDNode>(N0->getOperand(1)); + auto *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); if (!N01C) return false; @@ -1191,7 +1191,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { // Optimize (shl (and X, C2), C) -> (slli (srliw X, C3), C3+C) // where C2 has 32 leading zeros and C3 trailing zeros. SDNode *SRLIW = CurDAG->getMachineNode( - RISCV::SRLIW, DL, VT, N0->getOperand(0), + RISCV::SRLIW, DL, VT, N0.getOperand(0), CurDAG->getTargetConstant(TrailingZeros, DL, VT)); SDNode *SLLI = CurDAG->getMachineNode( RISCV::SLLI, DL, VT, SDValue(SRLIW, 0), @@ -1210,7 +1210,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { // - without Zba a tablegen pattern applies the very same // transform as we would have done here SDNode *SLLI = CurDAG->getMachineNode( - RISCV::SLLI, DL, VT, N0->getOperand(0), + RISCV::SLLI, DL, VT, N0.getOperand(0), CurDAG->getTargetConstant(LeadingZeros, DL, VT)); SDNode *SRLI = CurDAG->getMachineNode( RISCV::SRLI, DL, VT, SDValue(SLLI, 0), @@ -1239,7 +1239,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { unsigned TrailingZeros = llvm::countr_zero(Mask); if (LeadingZeros == 32 && TrailingZeros > ShAmt) { SDNode *SRLIW = CurDAG->getMachineNode( - RISCV::SRLIW, DL, VT, N0->getOperand(0), + RISCV::SRLIW, DL, VT, N0.getOperand(0), CurDAG->getTargetConstant(TrailingZeros, DL, VT)); SDNode *SLLI = CurDAG->getMachineNode( RISCV::SLLI, DL, VT, SDValue(SRLIW, 0), @@ -1266,7 +1266,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { if (TrailingOnes == 32) { SDNode *SRLI = CurDAG->getMachineNode( Subtarget->is64Bit() ? RISCV::SRLIW : RISCV::SRLI, DL, VT, - N0->getOperand(0), CurDAG->getTargetConstant(ShAmt, DL, VT)); + N0.getOperand(0), CurDAG->getTargetConstant(ShAmt, DL, VT)); ReplaceNode(Node, SRLI); return; } @@ -1279,19 +1279,19 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { if (HasBitTest && ShAmt + 1 == TrailingOnes) { SDNode *BEXTI = CurDAG->getMachineNode( Subtarget->hasStdExtZbs() ? RISCV::BEXTI : RISCV::TH_TST, DL, VT, - N0->getOperand(0), CurDAG->getTargetConstant(ShAmt, DL, VT)); + N0.getOperand(0), CurDAG->getTargetConstant(ShAmt, DL, VT)); ReplaceNode(Node, BEXTI); return; } const unsigned Msb = TrailingOnes - 1; const unsigned Lsb = ShAmt; - if (tryUnsignedBitfieldExtract(Node, DL, VT, N0->getOperand(0), Msb, Lsb)) + if (tryUnsignedBitfieldExtract(Node, DL, VT, N0.getOperand(0), Msb, Lsb)) return; unsigned LShAmt = Subtarget->getXLen() - TrailingOnes; SDNode *SLLI = - CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0), + CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0), CurDAG->getTargetConstant(LShAmt, DL, VT)); SDNode *SRLI = CurDAG->getMachineNode( RISCV::SRLI, DL, VT, SDValue(SLLI, 0), @@ -1328,7 +1328,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { break; unsigned LShAmt = Subtarget->getXLen() - ExtSize; SDNode *SLLI = - CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0), + CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0), CurDAG->getTargetConstant(LShAmt, DL, VT)); SDNode *SRAI = CurDAG->getMachineNode( RISCV::SRAI, DL, VT, SDValue(SLLI, 0), @@ -2942,8 +2942,8 @@ bool RISCVDAGToDAGISel::SelectAddrRegImm(SDValue Addr, SDValue &Base, /// Similar to SelectAddrRegImm, except that the offset is restricted to uimm9. bool RISCVDAGToDAGISel::SelectAddrRegImm9(SDValue Addr, SDValue &Base, SDValue &Offset) { - // FIXME: Support FrameIndex. Need to teach eliminateFrameIndex that only - // a 9-bit immediate can be folded. + if (SelectAddrFrameIndex(Addr, Base, Offset)) + return true; SDLoc DL(Addr); MVT VT = Addr.getSimpleValueType(); @@ -2953,8 +2953,8 @@ bool RISCVDAGToDAGISel::SelectAddrRegImm9(SDValue Addr, SDValue &Base, if (isUInt<9>(CVal)) { Base = Addr.getOperand(0); - // FIXME: Support FrameIndex. Need to teach eliminateFrameIndex that only - // a 9-bit immediate can be folded. + if (auto *FIN = dyn_cast<FrameIndexSDNode>(Base)) + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), VT); Offset = CurDAG->getSignedTargetConstant(CVal, DL, VT); return true; } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 54845e5..c0ada51 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -20730,6 +20730,53 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, return DAG.getAllOnesConstant(DL, VT); return DAG.getConstant(0, DL, VT); } + case Intrinsic::riscv_vsseg2_mask: + case Intrinsic::riscv_vsseg3_mask: + case Intrinsic::riscv_vsseg4_mask: + case Intrinsic::riscv_vsseg5_mask: + case Intrinsic::riscv_vsseg6_mask: + case Intrinsic::riscv_vsseg7_mask: + case Intrinsic::riscv_vsseg8_mask: { + SDValue Tuple = N->getOperand(2); + unsigned NF = Tuple.getValueType().getRISCVVectorTupleNumFields(); + + if (Subtarget.hasOptimizedSegmentLoadStore(NF) || !Tuple.hasOneUse() || + Tuple.getOpcode() != RISCVISD::TUPLE_INSERT || + !Tuple.getOperand(0).isUndef()) + return SDValue(); + + SDValue Val = Tuple.getOperand(1); + unsigned Idx = Tuple.getConstantOperandVal(2); + + unsigned SEW = Val.getValueType().getScalarSizeInBits(); + assert(Log2_64(SEW) == N->getConstantOperandVal(6) && + "Type mismatch without bitcast?"); + unsigned Stride = SEW / 8 * NF; + unsigned Offset = SEW / 8 * Idx; + + SDValue Ops[] = { + /*Chain=*/N->getOperand(0), + /*IntID=*/ + DAG.getTargetConstant(Intrinsic::riscv_vsse_mask, DL, XLenVT), + /*StoredVal=*/Val, + /*Ptr=*/ + DAG.getNode(ISD::ADD, DL, XLenVT, N->getOperand(3), + DAG.getConstant(Offset, DL, XLenVT)), + /*Stride=*/DAG.getConstant(Stride, DL, XLenVT), + /*Mask=*/N->getOperand(4), + /*VL=*/N->getOperand(5)}; + + auto *OldMemSD = cast<MemIntrinsicSDNode>(N); + // Match getTgtMemIntrinsic for non-unit stride case + EVT MemVT = OldMemSD->getMemoryVT().getScalarType(); + MachineFunction &MF = DAG.getMachineFunction(); + MachineMemOperand *MMO = MF.getMachineMemOperand( + OldMemSD->getMemOperand(), Offset, MemoryLocation::UnknownSize); + + SDVTList VTs = DAG.getVTList(MVT::Other); + return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, VTs, Ops, MemVT, + MMO); + } } } case ISD::EXPERIMENTAL_VP_REVERSE: @@ -20822,6 +20869,68 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, } break; } + case RISCVISD::TUPLE_EXTRACT: { + EVT VT = N->getValueType(0); + SDValue Tuple = N->getOperand(0); + unsigned Idx = N->getConstantOperandVal(1); + if (!Tuple.hasOneUse() || Tuple.getOpcode() != ISD::INTRINSIC_W_CHAIN) + break; + + unsigned NF = 0; + switch (Tuple.getConstantOperandVal(1)) { + default: + break; + case Intrinsic::riscv_vlseg2_mask: + case Intrinsic::riscv_vlseg3_mask: + case Intrinsic::riscv_vlseg4_mask: + case Intrinsic::riscv_vlseg5_mask: + case Intrinsic::riscv_vlseg6_mask: + case Intrinsic::riscv_vlseg7_mask: + case Intrinsic::riscv_vlseg8_mask: + NF = Tuple.getValueType().getRISCVVectorTupleNumFields(); + break; + } + + if (!NF || Subtarget.hasOptimizedSegmentLoadStore(NF)) + break; + + unsigned SEW = VT.getScalarSizeInBits(); + assert(Log2_64(SEW) == Tuple.getConstantOperandVal(7) && + "Type mismatch without bitcast?"); + unsigned Stride = SEW / 8 * NF; + unsigned Offset = SEW / 8 * Idx; + + SDValue Ops[] = { + /*Chain=*/Tuple.getOperand(0), + /*IntID=*/DAG.getTargetConstant(Intrinsic::riscv_vlse_mask, DL, XLenVT), + /*Passthru=*/Tuple.getOperand(2), + /*Ptr=*/ + DAG.getNode(ISD::ADD, DL, XLenVT, Tuple.getOperand(3), + DAG.getConstant(Offset, DL, XLenVT)), + /*Stride=*/DAG.getConstant(Stride, DL, XLenVT), + /*Mask=*/Tuple.getOperand(4), + /*VL=*/Tuple.getOperand(5), + /*Policy=*/Tuple.getOperand(6)}; + + auto *TupleMemSD = cast<MemIntrinsicSDNode>(Tuple); + // Match getTgtMemIntrinsic for non-unit stride case + EVT MemVT = TupleMemSD->getMemoryVT().getScalarType(); + MachineFunction &MF = DAG.getMachineFunction(); + MachineMemOperand *MMO = MF.getMachineMemOperand( + TupleMemSD->getMemOperand(), Offset, MemoryLocation::UnknownSize); + + SDVTList VTs = DAG.getVTList({VT, MVT::Other}); + SDValue Result = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, + Ops, MemVT, MMO); + DAG.ReplaceAllUsesOfValueWith(Tuple.getValue(1), Result.getValue(1)); + return Result.getValue(0); + } + case RISCVISD::TUPLE_INSERT: { + // tuple_insert tuple, undef, idx -> tuple + if (N->getOperand(1).isUndef()) + return N->getOperand(0); + break; + } } return SDValue(); @@ -22346,6 +22455,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments( case CallingConv::C: case CallingConv::Fast: case CallingConv::SPIR_KERNEL: + case CallingConv::PreserveMost: case CallingConv::GRAAL: case CallingConv::RISCV_VectorCall: #define CC_VLS_CASE(ABI_VLEN) case CallingConv::RISCV_VLSCall_##ABI_VLEN: @@ -22615,8 +22725,14 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI, bool IsVarArg = CLI.IsVarArg; EVT PtrVT = getPointerTy(DAG.getDataLayout()); MVT XLenVT = Subtarget.getXLenVT(); + const CallBase *CB = CLI.CB; MachineFunction &MF = DAG.getMachineFunction(); + MachineFunction::CallSiteInfo CSInfo; + + // Set type id for call site info. + if (MF.getTarget().Options.EmitCallGraphSection && CB && CB->isIndirectCall()) + CSInfo = MachineFunction::CallSiteInfo(*CB); // Analyze the operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ArgLocs; @@ -22874,6 +22990,9 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI, if (CLI.CFIType) Ret.getNode()->setCFIType(CLI.CFIType->getZExtValue()); DAG.addNoMergeSiteInfo(Ret.getNode(), CLI.NoMerge); + if (MF.getTarget().Options.EmitCallGraphSection && CB && + CB->isIndirectCall()) + DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo)); return Ret; } @@ -22881,6 +23000,10 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI, Chain = DAG.getNode(CallOpc, DL, NodeTys, Ops); if (CLI.CFIType) Chain.getNode()->setCFIType(CLI.CFIType->getZExtValue()); + + if (MF.getTarget().Options.EmitCallGraphSection && CB && CB->isIndirectCall()) + DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo)); + DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); Glue = Chain.getValue(1); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index 64f9e3e..085064e 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -2859,9 +2859,6 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI, case RISCVOp::OPERAND_UIMM16_NONZERO: Ok = isUInt<16>(Imm) && (Imm != 0); break; - case RISCVOp::OPERAND_ZERO: - Ok = Imm == 0; - break; case RISCVOp::OPERAND_THREE: Ok = Imm == 3; break; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoC.td b/llvm/lib/Target/RISCV/RISCVInstrInfoC.td index 8252a9b..c5551fb 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoC.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoC.td @@ -57,12 +57,6 @@ def simm6nonzero : RISCVOp, }]; } -def immzero : RISCVOp, - ImmLeaf<XLenVT, [{return (Imm == 0);}]> { - let ParserMatchClass = ImmZeroAsmOperand; - let OperandType = "OPERAND_ZERO"; -} - def CLUIImmAsmOperand : AsmOperandClass { let Name = "CLUIImm"; let RenderMethod = "addImmOperands"; @@ -272,7 +266,7 @@ class Bcz<bits<3> funct3, string OpcodeStr> let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in class Shift_right<bits<2> funct2, string OpcodeStr> : RVInst16CB<0b100, 0b01, (outs GPRC:$rd), - (ins GPRC:$rs1, uimmlog2xlennonzero:$imm), + (ins GPRC:$rs1, uimmlog2xlen:$imm), OpcodeStr, "$rs1, $imm"> { let Constraints = "$rs1 = $rd"; let Inst{12} = imm{5}; @@ -402,17 +396,19 @@ def C_NOP : RVInst16CI<0b000, 0b01, (outs), (ins), "c.nop", "">, let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in def C_ADDI : RVInst16CI<0b000, 0b01, (outs GPRNoX0:$rd_wb), - (ins GPRNoX0:$rd, simm6nonzero:$imm), + (ins GPRNoX0:$rd, simm6:$imm), "c.addi", "$rd, $imm">, Sched<[WriteIALU, ReadIALU]> { let Constraints = "$rd = $rd_wb"; } -// Alternate syntax for c.nop. Converted to C_NOP by the assembler. +// Alternate syntax for c.nop. Converted to C_NOP/C_NOP_HINT by the assembler. let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0, isAsmParserOnly = 1 in -def PseudoC_ADDI_NOP : Pseudo<(outs GPRX0:$rd), (ins GPRX0:$rs1, immzero:$imm), - [], "c.addi", "$rd, $imm">; +def PseudoC_ADDI_NOP : Pseudo<(outs GPRX0:$rd), (ins GPRX0:$rs1, simm6:$imm), + [], "c.addi", "$rd, $imm"> { + let Constraints = "$rs1 = $rd"; +} let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCall = 1, DecoderNamespace = "RV32Only", Defs = [X1], @@ -430,7 +426,7 @@ def C_ADDIW : RVInst16CI<0b001, 0b01, (outs GPRNoX0:$rd_wb), } let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in -def C_LI : RVInst16CI<0b010, 0b01, (outs GPRNoX0:$rd), (ins simm6:$imm), +def C_LI : RVInst16CI<0b010, 0b01, (outs GPR:$rd), (ins simm6:$imm), "c.li", "$rd, $imm">, Sched<[WriteIALU]>; @@ -449,7 +445,7 @@ def C_ADDI16SP : RVInst16CI<0b011, 0b01, (outs SP:$rd_wb), } let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in -def C_LUI : RVInst16CI<0b011, 0b01, (outs GPRNoX0X2:$rd), +def C_LUI : RVInst16CI<0b011, 0b01, (outs GPRNoX2:$rd), (ins c_lui_imm:$imm), "c.lui", "$rd, $imm">, Sched<[WriteIALU]>; @@ -497,8 +493,8 @@ def C_BEQZ : Bcz<0b110, "c.beqz">, Sched<[WriteJmp, ReadJmp]>; def C_BNEZ : Bcz<0b111, "c.bnez">, Sched<[WriteJmp, ReadJmp]>; let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in -def C_SLLI : RVInst16CI<0b000, 0b10, (outs GPRNoX0:$rd_wb), - (ins GPRNoX0:$rd, uimmlog2xlennonzero:$imm), +def C_SLLI : RVInst16CI<0b000, 0b10, (outs GPR:$rd_wb), + (ins GPR:$rd, uimmlog2xlen:$imm), "c.slli", "$rd, $imm">, Sched<[WriteShiftImm, ReadShiftImm]> { let Constraints = "$rd = $rd_wb"; @@ -544,7 +540,7 @@ def C_JR : RVInst16CR<0b1000, 0b10, (outs), (ins GPRNoX0:$rs1), let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isMoveReg = 1, isAsCheapAsAMove = 1 in -def C_MV : RVInst16CR<0b1000, 0b10, (outs GPRNoX0:$rs1), (ins GPRNoX0:$rs2), +def C_MV : RVInst16CR<0b1000, 0b10, (outs GPR:$rs1), (ins GPRNoX0:$rs2), "c.mv", "$rs1, $rs2">, Sched<[WriteIALU, ReadIALU]>; @@ -557,8 +553,8 @@ def C_JALR : RVInst16CR<0b1001, 0b10, (outs), (ins GPRNoX0:$rs1), "c.jalr", "$rs1">, Sched<[WriteJalr, ReadJalr]>; let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in -def C_ADD : RVInst16CR<0b1001, 0b10, (outs GPRNoX0:$rd), - (ins GPRNoX0:$rs1, GPRNoX0:$rs2), +def C_ADD : RVInst16CR<0b1001, 0b10, (outs GPR:$rd), + (ins GPR:$rs1, GPRNoX0:$rs2), "c.add", "$rs1, $rs2">, Sched<[WriteIALU, ReadIALU, ReadIALU]> { let Constraints = "$rs1 = $rd"; @@ -616,81 +612,6 @@ def C_NOP_HINT : RVInst16CI<0b000, 0b01, (outs), (ins simm6nonzero:$imm), let rd = 0; } -def C_ADDI_HINT_IMM_ZERO : RVInst16CI<0b000, 0b01, (outs GPRNoX0:$rd_wb), - (ins GPRNoX0:$rd, immzero:$imm), - "c.addi", "$rd, $imm">, - Sched<[WriteIALU, ReadIALU]> { - let Constraints = "$rd = $rd_wb"; - let imm = 0; - let DecoderMethod = "decodeRVCInstrRdRs1ImmZero"; -} - -def C_LI_HINT : RVInst16CI<0b010, 0b01, (outs GPRX0:$rd), (ins simm6:$imm), - "c.li", "$rd, $imm">, - Sched<[WriteIALU]> { - let Inst{11-7} = 0; - let DecoderMethod = "decodeRVCInstrRdSImm6"; -} - -def C_LUI_HINT : RVInst16CI<0b011, 0b01, (outs GPRX0:$rd), - (ins c_lui_imm:$imm), - "c.lui", "$rd, $imm">, - Sched<[WriteIALU]> { - let Inst{11-7} = 0; - let DecoderMethod = "decodeRVCInstrRdCLUIImm"; -} - -def C_MV_HINT : RVInst16CR<0b1000, 0b10, (outs GPRX0:$rs1), (ins GPRNoX0:$rs2), - "c.mv", "$rs1, $rs2">, Sched<[WriteIALU, ReadIALU]> { - let Inst{11-7} = 0; - let DecoderMethod = "decodeRVCInstrRdRs2"; -} - -def C_ADD_HINT : RVInst16CR<0b1001, 0b10, (outs GPRX0:$rd), - (ins GPRX0:$rs1, GPRNoX0:$rs2), - "c.add", "$rs1, $rs2">, - Sched<[WriteIALU, ReadIALU, ReadIALU]> { - let Constraints = "$rs1 = $rd"; - let Inst{11-7} = 0; - let DecoderMethod = "decodeRVCInstrRdRs1Rs2"; -} - -def C_SLLI_HINT : RVInst16CI<0b000, 0b10, (outs GPRX0:$rd_wb), - (ins GPRX0:$rd, uimmlog2xlennonzero:$imm), - "c.slli", "$rd, $imm">, - Sched<[WriteShiftImm, ReadShiftImm]> { - let Constraints = "$rd = $rd_wb"; - let Inst{11-7} = 0; - let DecoderMethod = "decodeRVCInstrRdRs1UImmLog2XLenNonZero"; -} - -def C_SLLI64_HINT : RVInst16CI<0b000, 0b10, (outs GPR:$rd_wb), (ins GPR:$rd), - "c.slli64", "$rd">, - Sched<[WriteShiftImm, ReadShiftImm]> { - let Constraints = "$rd = $rd_wb"; - let imm = 0; -} - -def C_SRLI64_HINT : RVInst16CB<0b100, 0b01, (outs GPRC:$rd), - (ins GPRC:$rs1), - "c.srli64", "$rs1">, - Sched<[WriteShiftImm, ReadShiftImm]> { - let Constraints = "$rs1 = $rd"; - let Inst{6-2} = 0; - let Inst{11-10} = 0b00; - let Inst{12} = 0; -} - -def C_SRAI64_HINT : RVInst16CB<0b100, 0b01, (outs GPRC:$rd), - (ins GPRC:$rs1), - "c.srai64", "$rs1">, - Sched<[WriteShiftImm, ReadShiftImm]> { - let Constraints = "$rs1 = $rd"; - let Inst{6-2} = 0; - let Inst{11-10} = 0b01; - let Inst{12} = 0; -} - } // Predicates = [HasStdExtZca], hasSideEffects = 0, mayLoad = 0, // mayStore = 0 @@ -699,15 +620,17 @@ def C_SRAI64_HINT : RVInst16CB<0b100, 0b01, (outs GPRC:$rd), //===----------------------------------------------------------------------===// let Predicates = [HasStdExtZca] in { -// Just a different syntax for the c.nop hint: c.addi x0, simm6 vs c.nop simm6. -def : InstAlias<"c.addi x0, $imm", (C_NOP_HINT simm6nonzero:$imm), 0>; +// Legacy aliases. +def : InstAlias<"c.slli64 $rd", (C_SLLI GPR:$rd, 0), 0>; +def : InstAlias<"c.srli64 $rs1", (C_SRLI GPRC:$rs1, 0), 0>; +def : InstAlias<"c.srai64 $rs1", (C_SRAI GPRC:$rs1, 0), 0>; } let Predicates = [HasStdExtC, HasStdExtZihintntl] in { -def : InstAlias<"c.ntl.p1", (C_ADD_HINT X0, X2)>; -def : InstAlias<"c.ntl.pall", (C_ADD_HINT X0, X3)>; -def : InstAlias<"c.ntl.s1", (C_ADD_HINT X0, X4)>; -def : InstAlias<"c.ntl.all", (C_ADD_HINT X0, X5)>; +def : InstAlias<"c.ntl.p1", (C_ADD X0, X2)>; +def : InstAlias<"c.ntl.pall", (C_ADD X0, X3)>; +def : InstAlias<"c.ntl.s1", (C_ADD X0, X4)>; +def : InstAlias<"c.ntl.all", (C_ADD X0, X5)>; } // Predicates = [HasStdExtC, HasStdExtZihintntl] let EmitPriority = 0 in { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td index dd365cf..8297d50 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td @@ -136,6 +136,7 @@ class RVPUnary_ri<bits<2> w, bits<5> uf, string opcodestr> //===----------------------------------------------------------------------===// let Predicates = [HasStdExtP] in { +let IsSignExtendingOpW = 1 in def CLS : Unary_r<0b011000000011, 0b001, "cls">; def ABS : Unary_r<0b011000000111, 0b001, "abs">; } // Predicates = [HasStdExtP] @@ -146,8 +147,10 @@ let Predicates = [HasStdExtP, IsRV64] in { def REV16 : Unary_r<0b011010110000, 0b101, "rev16">; def REV_RV64 : Unary_r<0b011010111111, 0b101, "rev">; +let IsSignExtendingOpW = 1 in { def CLSW : UnaryW_r<0b011000000011, 0b001, "clsw">; def ABSW : UnaryW_r<0b011000000111, 0b001, "absw">; +} } // Predicates = [HasStdExtP, IsRV64] let Predicates = [HasStdExtP] in { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 6afc942d..03e6f43 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1510,21 +1510,6 @@ class VPseudoTiedBinaryCarryIn<VReg RetClass, let VLMul = MInfo.value; } -class VPseudoTernaryNoMask<VReg RetClass, - RegisterClass Op1Class, - DAGOperand Op2Class, - string Constraint> : - RISCVVPseudo<(outs RetClass:$rd), - (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, - AVL:$vl, sew:$sew)> { - let mayLoad = 0; - let mayStore = 0; - let hasSideEffects = 0; - let Constraints = !interleave([Constraint, "$rd = $rs3"], ","); - let HasVLOp = 1; - let HasSEWOp = 1; -} - class VPseudoTernaryNoMaskWithPolicy<VReg RetClass, RegisterClass Op1Class, DAGOperand Op2Class, diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td index f391300..5265613 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td @@ -1120,27 +1120,11 @@ let Predicates = [HasVendorXqcisync, IsRV32] in { def QC_C_SYNCWF : QCIRVInst16CBSYNC<0b100, "qc.c.syncwf">; def QC_C_SYNCWL : QCIRVInst16CBSYNC<0b101, "qc.c.syncwl">; - let mayLoad = 0, mayStore = 0, hasSideEffects = 1 in - def QC_C_DELAY : RVInst16CI<0b000, 0b10, (outs), - (ins uimm5nonzero:$imm), - "qc.c.delay", "$imm"> { - let Inst{12} = 0; - let Inst{11-7} = 0; - let Inst{6-2} = imm{4-0}; - } + // qc.c.delay implemented as an alias, below } // Predicates = [HasVendorXqcisync, IsRV32] let Predicates = [HasVendorXqcisim, IsRV32] in { let mayLoad = 0, mayStore = 0, hasSideEffects = 1 in { - def QC_PSYSCALLI : RVInstI<0b010, OPC_OP_IMM, (outs), (ins uimm10:$imm10), - "qc.psyscalli", "$imm10"> { - bits<10> imm10; - - let rs1 = 0; - let rd = 0; - let imm12 = {0b00, imm10}; - } - def QC_PPUTCI : RVInstI<0b010, OPC_OP_IMM, (outs), (ins uimm8:$imm8), "qc.pputci", "$imm8"> { bits<8> imm8; @@ -1150,18 +1134,7 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 1 in { let imm12 = {0b0100, imm8}; } - def QC_PCOREDUMP : QCISim_NONE<0b0110, "qc.pcoredump">; - def QC_PPREGS : QCISim_NONE<0b0111, "qc.ppregs">; - def QC_PPREG : QCISim_RS1<0b1000, "qc.ppreg">; - def QC_PPUTC : QCISim_RS1<0b1001, "qc.pputc">; - def QC_PPUTS : QCISim_RS1<0b1010, "qc.pputs">; - def QC_PEXIT : QCISim_RS1<0b1011, "qc.pexit">; - def QC_PSYSCALL : QCISim_RS1<0b1100, "qc.psyscall">; - - def QC_C_PTRACE : RVInst16CI<0b000, 0b10, (outs), (ins), "qc.c.ptrace", ""> { - let rd = 0; - let imm = 0; - } + // The other instructions are all implemented as aliases, below } // mayLoad = 0, mayStore = 0, hasSideEffects = 1 } // Predicates = [HasVendorXqcisim, IsRV32] @@ -1218,6 +1191,27 @@ let EmitPriority = 0 in { } // EmitPriority = 0 } // Predicates = [HasVendorXqcilo, IsRV32] +let Predicates = [HasVendorXqcisim, IsRV32] in { +let EmitPriority = 1 in { + def : InstAlias<"qc.c.ptrace", (C_SLLI X0, 0)>; + + def : InstAlias<"qc.psyscalli $imm", (SLTI X0, X0, uimm10:$imm)>; + def : InstAlias<"qc.pcoredump", (SLTI X0, X0, 1536)>; + def : InstAlias<"qc.ppregs", (SLTI X0, X0, 1792)>; + def : InstAlias<"qc.ppreg $rs1", (SLTI X0, GPR:$rs1, -2048)>; + def : InstAlias<"qc.pputc $rs1", (SLTI X0, GPR:$rs1, -1792)>; + def : InstAlias<"qc.pputs $rs1", (SLTI X0, GPR:$rs1, -1536)>; + def : InstAlias<"qc.pexit $rs1", (SLTI X0, GPR:$rs1, -1280)>; + def : InstAlias<"qc.psyscall $rs1", (SLTI X0, GPR:$rs1, -1024)>; +} // EmitPriority = 1 +} // Predicates = [HasVendorXqcisim, IsRV32] + +let Predicates = [HasVendorXqcisync, IsRV32] in { +let EmitPriority = 1 in { + def : InstAlias<"qc.c.delay $imm", (C_SLLI X0, uimm5nonzero:$imm)>; +} +} // Predicates = [HasVendorXqcisync, IsRV32] + //===----------------------------------------------------------------------===// // Pseudo-instructions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp index 30d8f85..726920e 100644 --- a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp +++ b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp @@ -216,29 +216,6 @@ bool RISCVTargetLowering::lowerInterleavedLoad( if (!isLegalInterleavedAccessType(VTy, Factor, Alignment, AS, DL)) return false; - // If the segment load is going to be performed segment at a time anyways - // and there's only one element used, use a strided load instead. This - // will be equally fast, and create less vector register pressure. - if (Indices.size() == 1 && !Subtarget.hasOptimizedSegmentLoadStore(Factor)) { - unsigned ScalarSizeInBytes = DL.getTypeStoreSize(VTy->getElementType()); - Value *Stride = ConstantInt::get(XLenTy, Factor * ScalarSizeInBytes); - Value *Offset = ConstantInt::get(XLenTy, Indices[0] * ScalarSizeInBytes); - Value *BasePtr = Builder.CreatePtrAdd(Ptr, Offset); - // For rv64, need to truncate i64 to i32 to match signature. As VL is at most - // the number of active lanes (which is bounded by i32) this is safe. - VL = Builder.CreateTrunc(VL, Builder.getInt32Ty()); - - CallInst *CI = - Builder.CreateIntrinsic(Intrinsic::experimental_vp_strided_load, - {VTy, BasePtr->getType(), Stride->getType()}, - {BasePtr, Stride, Mask, VL}); - Alignment = commonAlignment(Alignment, Indices[0] * ScalarSizeInBytes); - CI->addParamAttr(0, - Attribute::getWithAlignment(CI->getContext(), Alignment)); - Shuffles[0]->replaceAllUsesWith(CI); - return true; - }; - CallInst *VlsegN = Builder.CreateIntrinsic( FixedVlsegIntrIds[Factor - 2], {VTy, PtrTy, XLenTy}, {Ptr, Mask, VL}); @@ -289,33 +266,6 @@ bool RISCVTargetLowering::lowerInterleavedStore(Instruction *Store, if (!isLegalInterleavedAccessType(VTy, Factor, Alignment, AS, DL)) return false; - unsigned Index; - // If the segment store only has one active lane (i.e. the interleave is - // just a spread shuffle), we can use a strided store instead. This will - // be equally fast, and create less vector register pressure. - if (!Subtarget.hasOptimizedSegmentLoadStore(Factor) && - isSpreadMask(Mask, Factor, Index)) { - unsigned ScalarSizeInBytes = - DL.getTypeStoreSize(ShuffleVTy->getElementType()); - Value *Data = SVI->getOperand(0); - Data = Builder.CreateExtractVector(VTy, Data, uint64_t(0)); - Value *Stride = ConstantInt::get(XLenTy, Factor * ScalarSizeInBytes); - Value *Offset = ConstantInt::get(XLenTy, Index * ScalarSizeInBytes); - Value *BasePtr = Builder.CreatePtrAdd(Ptr, Offset); - // For rv64, need to truncate i64 to i32 to match signature. As VL is at - // most the number of active lanes (which is bounded by i32) this is safe. - VL = Builder.CreateTrunc(VL, Builder.getInt32Ty()); - - CallInst *CI = - Builder.CreateIntrinsic(Intrinsic::experimental_vp_strided_store, - {VTy, BasePtr->getType(), Stride->getType()}, - {Data, BasePtr, Stride, LaneMask, VL}); - Alignment = commonAlignment(Alignment, Index * ScalarSizeInBytes); - CI->addParamAttr(1, - Attribute::getWithAlignment(CI->getContext(), Alignment)); - return true; - } - Function *VssegNFunc = Intrinsic::getOrInsertDeclaration( Store->getModule(), FixedVssegIntrIds[Factor - 2], {VTy, PtrTy, XLenTy}); diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp index 5404123..7e58b6f 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp @@ -68,6 +68,9 @@ RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { auto &Subtarget = MF->getSubtarget<RISCVSubtarget>(); if (MF->getFunction().getCallingConv() == CallingConv::GHC) return CSR_NoRegs_SaveList; + if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost) + return Subtarget.hasStdExtE() ? CSR_RT_MostRegs_RVE_SaveList + : CSR_RT_MostRegs_SaveList; if (MF->getFunction().hasFnAttribute("interrupt")) { if (Subtarget.hasVInstructions()) { if (Subtarget.hasStdExtD()) @@ -573,6 +576,7 @@ bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, int64_t Val = Offset.getFixed(); int64_t Lo12 = SignExtend64<12>(Val); unsigned Opc = MI.getOpcode(); + if (Opc == RISCV::ADDI && !isInt<12>(Val)) { // We chose to emit the canonical immediate sequence rather than folding // the offset into the using add under the theory that doing so doesn't @@ -585,6 +589,9 @@ bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, (Lo12 & 0b11111) != 0) { // Prefetch instructions require the offset to be 32 byte aligned. MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0); + } else if (Opc == RISCV::MIPS_PREFETCH && !isUInt<9>(Val)) { + // MIPS Prefetch instructions require the offset to be 9 bits encoded. + MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0); } else if ((Opc == RISCV::PseudoRV32ZdinxLD || Opc == RISCV::PseudoRV32ZdinxSD) && Lo12 >= 2044) { @@ -811,7 +818,13 @@ RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF, if (CC == CallingConv::GHC) return CSR_NoRegs_RegMask; - switch (Subtarget.getTargetABI()) { + RISCVABI::ABI ABI = Subtarget.getTargetABI(); + if (CC == CallingConv::PreserveMost) { + if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E) + return CSR_RT_MostRegs_RVE_RegMask; + return CSR_RT_MostRegs_RegMask; + } + switch (ABI) { default: llvm_unreachable("Unrecognized ABI"); case RISCVABI::ABI_ILP32E: diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td index e87f452..ccb39e8 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td @@ -268,6 +268,11 @@ def GPRNoX0 : GPRRegisterClass<(sub GPR, X0)> { let DiagnosticString = "register must be a GPR excluding zero (x0)"; } +def GPRNoX2 : GPRRegisterClass<(sub GPR, X2)> { + let DiagnosticType = "InvalidRegClassGPRNoX2"; + let DiagnosticString = "register must be a GPR excluding sp (x2)"; +} + def GPRNoX0X2 : GPRRegisterClass<(sub GPR, X0, X2)> { let DiagnosticType = "InvalidRegClassGPRNoX0X2"; let DiagnosticString = "register must be a GPR excluding zero (x0) and sp (x2)"; diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp index fd634b5..61dbd06 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -1191,9 +1191,6 @@ static const CostTblEntry VectorIntrinsicCostTable[]{ {Intrinsic::roundeven, MVT::f64, 9}, {Intrinsic::rint, MVT::f32, 7}, {Intrinsic::rint, MVT::f64, 7}, - {Intrinsic::lrint, MVT::i32, 1}, - {Intrinsic::lrint, MVT::i64, 1}, - {Intrinsic::llrint, MVT::i64, 1}, {Intrinsic::nearbyint, MVT::f32, 9}, {Intrinsic::nearbyint, MVT::f64, 9}, {Intrinsic::bswap, MVT::i16, 3}, @@ -1251,11 +1248,48 @@ RISCVTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, switch (ICA.getID()) { case Intrinsic::lrint: case Intrinsic::llrint: - // We can't currently lower half or bfloat vector lrint/llrint. - if (auto *VecTy = dyn_cast<VectorType>(ICA.getArgTypes()[0]); - VecTy && VecTy->getElementType()->is16bitFPTy()) - return InstructionCost::getInvalid(); - [[fallthrough]]; + case Intrinsic::lround: + case Intrinsic::llround: { + auto LT = getTypeLegalizationCost(RetTy); + Type *SrcTy = ICA.getArgTypes().front(); + auto SrcLT = getTypeLegalizationCost(SrcTy); + if (ST->hasVInstructions() && LT.second.isVector()) { + SmallVector<unsigned, 2> Ops; + unsigned SrcEltSz = DL.getTypeSizeInBits(SrcTy->getScalarType()); + unsigned DstEltSz = DL.getTypeSizeInBits(RetTy->getScalarType()); + if (LT.second.getVectorElementType() == MVT::bf16) { + if (!ST->hasVInstructionsBF16Minimal()) + return InstructionCost::getInvalid(); + if (DstEltSz == 32) + Ops = {RISCV::VFWCVTBF16_F_F_V, RISCV::VFCVT_X_F_V}; + else + Ops = {RISCV::VFWCVTBF16_F_F_V, RISCV::VFWCVT_X_F_V}; + } else if (LT.second.getVectorElementType() == MVT::f16 && + !ST->hasVInstructionsF16()) { + if (!ST->hasVInstructionsF16Minimal()) + return InstructionCost::getInvalid(); + if (DstEltSz == 32) + Ops = {RISCV::VFWCVT_F_F_V, RISCV::VFCVT_X_F_V}; + else + Ops = {RISCV::VFWCVT_F_F_V, RISCV::VFWCVT_X_F_V}; + + } else if (SrcEltSz > DstEltSz) { + Ops = {RISCV::VFNCVT_X_F_W}; + } else if (SrcEltSz < DstEltSz) { + Ops = {RISCV::VFWCVT_X_F_V}; + } else { + Ops = {RISCV::VFCVT_X_F_V}; + } + + // We need to use the source LMUL in the case of a narrowing op, and the + // destination LMUL otherwise. + if (SrcEltSz > DstEltSz) + return SrcLT.first * + getRISCVInstructionCost(Ops, SrcLT.second, CostKind); + return LT.first * getRISCVInstructionCost(Ops, LT.second, CostKind); + } + break; + } case Intrinsic::ceil: case Intrinsic::floor: case Intrinsic::trunc: diff --git a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp index c1cc19b..050de3d 100644 --- a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp +++ b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp @@ -646,8 +646,7 @@ bool RISCVVectorPeephole::foldVMV_V_V(MachineInstr &MI) { if (!Src || Src->hasUnmodeledSideEffects() || Src->getParent() != MI.getParent() || !RISCVII::isFirstDefTiedToFirstUse(Src->getDesc()) || - !RISCVII::hasVLOp(Src->getDesc().TSFlags) || - !RISCVII::hasVecPolicyOp(Src->getDesc().TSFlags)) + !RISCVII::hasVLOp(Src->getDesc().TSFlags)) return false; // Src's dest needs to have the same EEW as MI's input. @@ -681,12 +680,14 @@ bool RISCVVectorPeephole::foldVMV_V_V(MachineInstr &MI) { *Src->getParent()->getParent())); } - // If MI was tail agnostic and the VL didn't increase, preserve it. - int64_t Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED; - if ((MI.getOperand(5).getImm() & RISCVVType::TAIL_AGNOSTIC) && - RISCV::isVLKnownLE(MI.getOperand(3), SrcVL)) - Policy |= RISCVVType::TAIL_AGNOSTIC; - Src->getOperand(RISCVII::getVecPolicyOpNum(Src->getDesc())).setImm(Policy); + if (RISCVII::hasVecPolicyOp(Src->getDesc().TSFlags)) { + // If MI was tail agnostic and the VL didn't increase, preserve it. + int64_t Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED; + if ((MI.getOperand(5).getImm() & RISCVVType::TAIL_AGNOSTIC) && + RISCV::isVLKnownLE(MI.getOperand(3), SrcVL)) + Policy |= RISCVVType::TAIL_AGNOSTIC; + Src->getOperand(RISCVII::getVecPolicyOpNum(Src->getDesc())).setImm(Policy); + } MRI->constrainRegClass(Src->getOperand(0).getReg(), MRI->getRegClass(MI.getOperand(0).getReg())); |