aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/RISCV
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/RISCV')
-rw-r--r--llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp38
-rw-r--r--llvm/lib/Target/RISCV/RISCVCallingConv.td4
-rw-r--r--llvm/lib/Target/RISCV/RISCVFrameLowering.cpp3
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp28
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp110
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoP.td3
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td52
-rw-r--r--llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp50
-rw-r--r--llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp15
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp50
10 files changed, 227 insertions, 126 deletions
diff --git a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
index 5e54b82..67cc01e 100644
--- a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
+++ b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
@@ -534,16 +534,26 @@ static DecodeStatus decodeRTZArg(MCInst &Inst, uint32_t Imm, int64_t Address,
return MCDisassembler::Success;
}
-static DecodeStatus decodeXTHeadMemPair(MCInst &Inst, uint32_t Insn,
- uint64_t Address,
- const MCDisassembler *Decoder);
-
static DecodeStatus decodeZcmpRlist(MCInst &Inst, uint32_t Imm,
uint64_t Address,
- const MCDisassembler *Decoder);
+ const MCDisassembler *Decoder) {
+ bool IsRVE = Decoder->getSubtargetInfo().hasFeature(RISCV::FeatureStdExtE);
+ if (Imm < RISCVZC::RA || (IsRVE && Imm >= RISCVZC::RA_S0_S2))
+ return MCDisassembler::Fail;
+ Inst.addOperand(MCOperand::createImm(Imm));
+ return MCDisassembler::Success;
+}
static DecodeStatus decodeXqccmpRlistS0(MCInst &Inst, uint32_t Imm,
uint64_t Address,
+ const MCDisassembler *Decoder) {
+ if (Imm < RISCVZC::RA_S0)
+ return MCDisassembler::Fail;
+ return decodeZcmpRlist(Inst, Imm, Address, Decoder);
+}
+
+static DecodeStatus decodeXTHeadMemPair(MCInst &Inst, uint32_t Insn,
+ uint64_t Address,
const MCDisassembler *Decoder);
static DecodeStatus decodeCSSPushPopchk(MCInst &Inst, uint32_t Insn,
@@ -592,24 +602,6 @@ static DecodeStatus decodeXTHeadMemPair(MCInst &Inst, uint32_t Insn,
return S;
}
-static DecodeStatus decodeZcmpRlist(MCInst &Inst, uint32_t Imm,
- uint64_t Address,
- const MCDisassembler *Decoder) {
- bool IsRVE = Decoder->getSubtargetInfo().hasFeature(RISCV::FeatureStdExtE);
- if (Imm < RISCVZC::RA || (IsRVE && Imm >= RISCVZC::RA_S0_S2))
- return MCDisassembler::Fail;
- Inst.addOperand(MCOperand::createImm(Imm));
- return MCDisassembler::Success;
-}
-
-static DecodeStatus decodeXqccmpRlistS0(MCInst &Inst, uint32_t Imm,
- uint64_t Address,
- const MCDisassembler *Decoder) {
- if (Imm < RISCVZC::RA_S0)
- return MCDisassembler::Fail;
- return decodeZcmpRlist(Inst, Imm, Address, Decoder);
-}
-
// Add implied SP operand for C.*SP compressed instructions. The SP operand
// isn't explicitly encoded in the instruction.
void RISCVDisassembler::addSPOperands(MCInst &MI) const {
diff --git a/llvm/lib/Target/RISCV/RISCVCallingConv.td b/llvm/lib/Target/RISCV/RISCVCallingConv.td
index 4c303a9..da6b95d 100644
--- a/llvm/lib/Target/RISCV/RISCVCallingConv.td
+++ b/llvm/lib/Target/RISCV/RISCVCallingConv.td
@@ -95,3 +95,7 @@ def CSR_XLEN_F32_V_Interrupt_RVE: CalleeSavedRegs<(sub CSR_XLEN_F32_V_Interrupt,
// Same as CSR_XLEN_F64_V_Interrupt, but excluding X16-X31.
def CSR_XLEN_F64_V_Interrupt_RVE: CalleeSavedRegs<(sub CSR_XLEN_F64_V_Interrupt,
(sequence "X%u", 16, 31))>;
+
+def CSR_RT_MostRegs : CalleeSavedRegs<(sub CSR_Interrupt, X6, X7, X28)>;
+def CSR_RT_MostRegs_RVE : CalleeSavedRegs<(sub CSR_RT_MostRegs,
+ (sequence "X%u", 16, 31))>;
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index b1ab76a..9fc0d81 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -1581,7 +1581,8 @@ void RISCVFrameLowering::determineCalleeSaves(MachineFunction &MF,
// Set the register and all its subregisters.
if (!MRI.def_empty(CSReg) || MRI.getUsedPhysRegsMask().test(CSReg)) {
SavedRegs.set(CSReg);
- llvm::for_each(SubRegs, [&](unsigned Reg) { return SavedRegs.set(Reg); });
+ for (unsigned Reg : SubRegs)
+ SavedRegs.set(Reg);
}
// Combine to super register if all of its subregisters are marked.
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 34910b7..f223fdbe 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -634,7 +634,7 @@ bool RISCVDAGToDAGISel::trySignedBitfieldExtract(SDNode *Node) {
// Transform (sra (shl X, C1) C2) with C1 < C2
// -> (SignedBitfieldExtract X, msb, lsb)
if (N0.getOpcode() == ISD::SHL) {
- auto *N01C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
+ auto *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
if (!N01C)
return false;
@@ -750,7 +750,7 @@ bool RISCVDAGToDAGISel::trySignedBitfieldInsertInSign(SDNode *Node) {
// Transform (sra (shl X, C1) C2) with C1 > C2
// -> (NDS.BFOS X, lsb, msb)
if (N0.getOpcode() == ISD::SHL) {
- auto *N01C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
+ auto *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
if (!N01C)
return false;
@@ -1191,7 +1191,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// Optimize (shl (and X, C2), C) -> (slli (srliw X, C3), C3+C)
// where C2 has 32 leading zeros and C3 trailing zeros.
SDNode *SRLIW = CurDAG->getMachineNode(
- RISCV::SRLIW, DL, VT, N0->getOperand(0),
+ RISCV::SRLIW, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(TrailingZeros, DL, VT));
SDNode *SLLI = CurDAG->getMachineNode(
RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
@@ -1210,7 +1210,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// - without Zba a tablegen pattern applies the very same
// transform as we would have done here
SDNode *SLLI = CurDAG->getMachineNode(
- RISCV::SLLI, DL, VT, N0->getOperand(0),
+ RISCV::SLLI, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(LeadingZeros, DL, VT));
SDNode *SRLI = CurDAG->getMachineNode(
RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
@@ -1239,7 +1239,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
unsigned TrailingZeros = llvm::countr_zero(Mask);
if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
SDNode *SRLIW = CurDAG->getMachineNode(
- RISCV::SRLIW, DL, VT, N0->getOperand(0),
+ RISCV::SRLIW, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(TrailingZeros, DL, VT));
SDNode *SLLI = CurDAG->getMachineNode(
RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
@@ -1266,7 +1266,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
if (TrailingOnes == 32) {
SDNode *SRLI = CurDAG->getMachineNode(
Subtarget->is64Bit() ? RISCV::SRLIW : RISCV::SRLI, DL, VT,
- N0->getOperand(0), CurDAG->getTargetConstant(ShAmt, DL, VT));
+ N0.getOperand(0), CurDAG->getTargetConstant(ShAmt, DL, VT));
ReplaceNode(Node, SRLI);
return;
}
@@ -1279,19 +1279,19 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
if (HasBitTest && ShAmt + 1 == TrailingOnes) {
SDNode *BEXTI = CurDAG->getMachineNode(
Subtarget->hasStdExtZbs() ? RISCV::BEXTI : RISCV::TH_TST, DL, VT,
- N0->getOperand(0), CurDAG->getTargetConstant(ShAmt, DL, VT));
+ N0.getOperand(0), CurDAG->getTargetConstant(ShAmt, DL, VT));
ReplaceNode(Node, BEXTI);
return;
}
const unsigned Msb = TrailingOnes - 1;
const unsigned Lsb = ShAmt;
- if (tryUnsignedBitfieldExtract(Node, DL, VT, N0->getOperand(0), Msb, Lsb))
+ if (tryUnsignedBitfieldExtract(Node, DL, VT, N0.getOperand(0), Msb, Lsb))
return;
unsigned LShAmt = Subtarget->getXLen() - TrailingOnes;
SDNode *SLLI =
- CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
+ CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(LShAmt, DL, VT));
SDNode *SRLI = CurDAG->getMachineNode(
RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
@@ -1328,7 +1328,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
break;
unsigned LShAmt = Subtarget->getXLen() - ExtSize;
SDNode *SLLI =
- CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
+ CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(LShAmt, DL, VT));
SDNode *SRAI = CurDAG->getMachineNode(
RISCV::SRAI, DL, VT, SDValue(SLLI, 0),
@@ -2942,8 +2942,8 @@ bool RISCVDAGToDAGISel::SelectAddrRegImm(SDValue Addr, SDValue &Base,
/// Similar to SelectAddrRegImm, except that the offset is restricted to uimm9.
bool RISCVDAGToDAGISel::SelectAddrRegImm9(SDValue Addr, SDValue &Base,
SDValue &Offset) {
- // FIXME: Support FrameIndex. Need to teach eliminateFrameIndex that only
- // a 9-bit immediate can be folded.
+ if (SelectAddrFrameIndex(Addr, Base, Offset))
+ return true;
SDLoc DL(Addr);
MVT VT = Addr.getSimpleValueType();
@@ -2953,8 +2953,8 @@ bool RISCVDAGToDAGISel::SelectAddrRegImm9(SDValue Addr, SDValue &Base,
if (isUInt<9>(CVal)) {
Base = Addr.getOperand(0);
- // FIXME: Support FrameIndex. Need to teach eliminateFrameIndex that only
- // a 9-bit immediate can be folded.
+ if (auto *FIN = dyn_cast<FrameIndexSDNode>(Base))
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), VT);
Offset = CurDAG->getSignedTargetConstant(CVal, DL, VT);
return true;
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 54845e5..b47d89b 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -20730,6 +20730,53 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
return DAG.getAllOnesConstant(DL, VT);
return DAG.getConstant(0, DL, VT);
}
+ case Intrinsic::riscv_vsseg2_mask:
+ case Intrinsic::riscv_vsseg3_mask:
+ case Intrinsic::riscv_vsseg4_mask:
+ case Intrinsic::riscv_vsseg5_mask:
+ case Intrinsic::riscv_vsseg6_mask:
+ case Intrinsic::riscv_vsseg7_mask:
+ case Intrinsic::riscv_vsseg8_mask: {
+ SDValue Tuple = N->getOperand(2);
+ unsigned NF = Tuple.getValueType().getRISCVVectorTupleNumFields();
+
+ if (Subtarget.hasOptimizedSegmentLoadStore(NF) || !Tuple.hasOneUse() ||
+ Tuple.getOpcode() != RISCVISD::TUPLE_INSERT ||
+ !Tuple.getOperand(0).isUndef())
+ return SDValue();
+
+ SDValue Val = Tuple.getOperand(1);
+ unsigned Idx = Tuple.getConstantOperandVal(2);
+
+ unsigned SEW = Val.getValueType().getScalarSizeInBits();
+ assert(Log2_64(SEW) == N->getConstantOperandVal(6) &&
+ "Type mismatch without bitcast?");
+ unsigned Stride = SEW / 8 * NF;
+ unsigned Offset = SEW / 8 * Idx;
+
+ SDValue Ops[] = {
+ /*Chain=*/N->getOperand(0),
+ /*IntID=*/
+ DAG.getTargetConstant(Intrinsic::riscv_vsse_mask, DL, XLenVT),
+ /*StoredVal=*/Val,
+ /*Ptr=*/
+ DAG.getNode(ISD::ADD, DL, XLenVT, N->getOperand(3),
+ DAG.getConstant(Offset, DL, XLenVT)),
+ /*Stride=*/DAG.getConstant(Stride, DL, XLenVT),
+ /*Mask=*/N->getOperand(4),
+ /*VL=*/N->getOperand(5)};
+
+ auto *OldMemSD = cast<MemIntrinsicSDNode>(N);
+ // Match getTgtMemIntrinsic for non-unit stride case
+ EVT MemVT = OldMemSD->getMemoryVT().getScalarType();
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ OldMemSD->getMemOperand(), Offset, MemoryLocation::UnknownSize);
+
+ SDVTList VTs = DAG.getVTList(MVT::Other);
+ return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, VTs, Ops, MemVT,
+ MMO);
+ }
}
}
case ISD::EXPERIMENTAL_VP_REVERSE:
@@ -20822,6 +20869,68 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
}
break;
}
+ case RISCVISD::TUPLE_EXTRACT: {
+ EVT VT = N->getValueType(0);
+ SDValue Tuple = N->getOperand(0);
+ unsigned Idx = N->getConstantOperandVal(1);
+ if (!Tuple.hasOneUse() || Tuple.getOpcode() != ISD::INTRINSIC_W_CHAIN)
+ break;
+
+ unsigned NF = 0;
+ switch (Tuple.getConstantOperandVal(1)) {
+ default:
+ break;
+ case Intrinsic::riscv_vlseg2_mask:
+ case Intrinsic::riscv_vlseg3_mask:
+ case Intrinsic::riscv_vlseg4_mask:
+ case Intrinsic::riscv_vlseg5_mask:
+ case Intrinsic::riscv_vlseg6_mask:
+ case Intrinsic::riscv_vlseg7_mask:
+ case Intrinsic::riscv_vlseg8_mask:
+ NF = Tuple.getValueType().getRISCVVectorTupleNumFields();
+ break;
+ }
+
+ if (!NF || Subtarget.hasOptimizedSegmentLoadStore(NF))
+ break;
+
+ unsigned SEW = VT.getScalarSizeInBits();
+ assert(Log2_64(SEW) == Tuple.getConstantOperandVal(7) &&
+ "Type mismatch without bitcast?");
+ unsigned Stride = SEW / 8 * NF;
+ unsigned Offset = SEW / 8 * Idx;
+
+ SDValue Ops[] = {
+ /*Chain=*/Tuple.getOperand(0),
+ /*IntID=*/DAG.getTargetConstant(Intrinsic::riscv_vlse_mask, DL, XLenVT),
+ /*Passthru=*/Tuple.getOperand(2),
+ /*Ptr=*/
+ DAG.getNode(ISD::ADD, DL, XLenVT, Tuple.getOperand(3),
+ DAG.getConstant(Offset, DL, XLenVT)),
+ /*Stride=*/DAG.getConstant(Stride, DL, XLenVT),
+ /*Mask=*/Tuple.getOperand(4),
+ /*VL=*/Tuple.getOperand(5),
+ /*Policy=*/Tuple.getOperand(6)};
+
+ auto *TupleMemSD = cast<MemIntrinsicSDNode>(Tuple);
+ // Match getTgtMemIntrinsic for non-unit stride case
+ EVT MemVT = TupleMemSD->getMemoryVT().getScalarType();
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ TupleMemSD->getMemOperand(), Offset, MemoryLocation::UnknownSize);
+
+ SDVTList VTs = DAG.getVTList({VT, MVT::Other});
+ SDValue Result = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs,
+ Ops, MemVT, MMO);
+ DAG.ReplaceAllUsesOfValueWith(Tuple.getValue(1), Result.getValue(1));
+ return Result.getValue(0);
+ }
+ case RISCVISD::TUPLE_INSERT: {
+ // tuple_insert tuple, undef, idx -> tuple
+ if (N->getOperand(1).isUndef())
+ return N->getOperand(0);
+ break;
+ }
}
return SDValue();
@@ -22346,6 +22455,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
case CallingConv::C:
case CallingConv::Fast:
case CallingConv::SPIR_KERNEL:
+ case CallingConv::PreserveMost:
case CallingConv::GRAAL:
case CallingConv::RISCV_VectorCall:
#define CC_VLS_CASE(ABI_VLEN) case CallingConv::RISCV_VLSCall_##ABI_VLEN:
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td
index dd365cf..8297d50 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td
@@ -136,6 +136,7 @@ class RVPUnary_ri<bits<2> w, bits<5> uf, string opcodestr>
//===----------------------------------------------------------------------===//
let Predicates = [HasStdExtP] in {
+let IsSignExtendingOpW = 1 in
def CLS : Unary_r<0b011000000011, 0b001, "cls">;
def ABS : Unary_r<0b011000000111, 0b001, "abs">;
} // Predicates = [HasStdExtP]
@@ -146,8 +147,10 @@ let Predicates = [HasStdExtP, IsRV64] in {
def REV16 : Unary_r<0b011010110000, 0b101, "rev16">;
def REV_RV64 : Unary_r<0b011010111111, 0b101, "rev">;
+let IsSignExtendingOpW = 1 in {
def CLSW : UnaryW_r<0b011000000011, 0b001, "clsw">;
def ABSW : UnaryW_r<0b011000000111, 0b001, "absw">;
+}
} // Predicates = [HasStdExtP, IsRV64]
let Predicates = [HasStdExtP] in {
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
index f391300..5265613 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
@@ -1120,27 +1120,11 @@ let Predicates = [HasVendorXqcisync, IsRV32] in {
def QC_C_SYNCWF : QCIRVInst16CBSYNC<0b100, "qc.c.syncwf">;
def QC_C_SYNCWL : QCIRVInst16CBSYNC<0b101, "qc.c.syncwl">;
- let mayLoad = 0, mayStore = 0, hasSideEffects = 1 in
- def QC_C_DELAY : RVInst16CI<0b000, 0b10, (outs),
- (ins uimm5nonzero:$imm),
- "qc.c.delay", "$imm"> {
- let Inst{12} = 0;
- let Inst{11-7} = 0;
- let Inst{6-2} = imm{4-0};
- }
+ // qc.c.delay implemented as an alias, below
} // Predicates = [HasVendorXqcisync, IsRV32]
let Predicates = [HasVendorXqcisim, IsRV32] in {
let mayLoad = 0, mayStore = 0, hasSideEffects = 1 in {
- def QC_PSYSCALLI : RVInstI<0b010, OPC_OP_IMM, (outs), (ins uimm10:$imm10),
- "qc.psyscalli", "$imm10"> {
- bits<10> imm10;
-
- let rs1 = 0;
- let rd = 0;
- let imm12 = {0b00, imm10};
- }
-
def QC_PPUTCI : RVInstI<0b010, OPC_OP_IMM, (outs), (ins uimm8:$imm8),
"qc.pputci", "$imm8"> {
bits<8> imm8;
@@ -1150,18 +1134,7 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 1 in {
let imm12 = {0b0100, imm8};
}
- def QC_PCOREDUMP : QCISim_NONE<0b0110, "qc.pcoredump">;
- def QC_PPREGS : QCISim_NONE<0b0111, "qc.ppregs">;
- def QC_PPREG : QCISim_RS1<0b1000, "qc.ppreg">;
- def QC_PPUTC : QCISim_RS1<0b1001, "qc.pputc">;
- def QC_PPUTS : QCISim_RS1<0b1010, "qc.pputs">;
- def QC_PEXIT : QCISim_RS1<0b1011, "qc.pexit">;
- def QC_PSYSCALL : QCISim_RS1<0b1100, "qc.psyscall">;
-
- def QC_C_PTRACE : RVInst16CI<0b000, 0b10, (outs), (ins), "qc.c.ptrace", ""> {
- let rd = 0;
- let imm = 0;
- }
+ // The other instructions are all implemented as aliases, below
} // mayLoad = 0, mayStore = 0, hasSideEffects = 1
} // Predicates = [HasVendorXqcisim, IsRV32]
@@ -1218,6 +1191,27 @@ let EmitPriority = 0 in {
} // EmitPriority = 0
} // Predicates = [HasVendorXqcilo, IsRV32]
+let Predicates = [HasVendorXqcisim, IsRV32] in {
+let EmitPriority = 1 in {
+ def : InstAlias<"qc.c.ptrace", (C_SLLI X0, 0)>;
+
+ def : InstAlias<"qc.psyscalli $imm", (SLTI X0, X0, uimm10:$imm)>;
+ def : InstAlias<"qc.pcoredump", (SLTI X0, X0, 1536)>;
+ def : InstAlias<"qc.ppregs", (SLTI X0, X0, 1792)>;
+ def : InstAlias<"qc.ppreg $rs1", (SLTI X0, GPR:$rs1, -2048)>;
+ def : InstAlias<"qc.pputc $rs1", (SLTI X0, GPR:$rs1, -1792)>;
+ def : InstAlias<"qc.pputs $rs1", (SLTI X0, GPR:$rs1, -1536)>;
+ def : InstAlias<"qc.pexit $rs1", (SLTI X0, GPR:$rs1, -1280)>;
+ def : InstAlias<"qc.psyscall $rs1", (SLTI X0, GPR:$rs1, -1024)>;
+} // EmitPriority = 1
+} // Predicates = [HasVendorXqcisim, IsRV32]
+
+let Predicates = [HasVendorXqcisync, IsRV32] in {
+let EmitPriority = 1 in {
+ def : InstAlias<"qc.c.delay $imm", (C_SLLI X0, uimm5nonzero:$imm)>;
+}
+} // Predicates = [HasVendorXqcisync, IsRV32]
+
//===----------------------------------------------------------------------===//
// Pseudo-instructions
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp
index 30d8f85..726920e 100644
--- a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp
@@ -216,29 +216,6 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
if (!isLegalInterleavedAccessType(VTy, Factor, Alignment, AS, DL))
return false;
- // If the segment load is going to be performed segment at a time anyways
- // and there's only one element used, use a strided load instead. This
- // will be equally fast, and create less vector register pressure.
- if (Indices.size() == 1 && !Subtarget.hasOptimizedSegmentLoadStore(Factor)) {
- unsigned ScalarSizeInBytes = DL.getTypeStoreSize(VTy->getElementType());
- Value *Stride = ConstantInt::get(XLenTy, Factor * ScalarSizeInBytes);
- Value *Offset = ConstantInt::get(XLenTy, Indices[0] * ScalarSizeInBytes);
- Value *BasePtr = Builder.CreatePtrAdd(Ptr, Offset);
- // For rv64, need to truncate i64 to i32 to match signature. As VL is at most
- // the number of active lanes (which is bounded by i32) this is safe.
- VL = Builder.CreateTrunc(VL, Builder.getInt32Ty());
-
- CallInst *CI =
- Builder.CreateIntrinsic(Intrinsic::experimental_vp_strided_load,
- {VTy, BasePtr->getType(), Stride->getType()},
- {BasePtr, Stride, Mask, VL});
- Alignment = commonAlignment(Alignment, Indices[0] * ScalarSizeInBytes);
- CI->addParamAttr(0,
- Attribute::getWithAlignment(CI->getContext(), Alignment));
- Shuffles[0]->replaceAllUsesWith(CI);
- return true;
- };
-
CallInst *VlsegN = Builder.CreateIntrinsic(
FixedVlsegIntrIds[Factor - 2], {VTy, PtrTy, XLenTy}, {Ptr, Mask, VL});
@@ -289,33 +266,6 @@ bool RISCVTargetLowering::lowerInterleavedStore(Instruction *Store,
if (!isLegalInterleavedAccessType(VTy, Factor, Alignment, AS, DL))
return false;
- unsigned Index;
- // If the segment store only has one active lane (i.e. the interleave is
- // just a spread shuffle), we can use a strided store instead. This will
- // be equally fast, and create less vector register pressure.
- if (!Subtarget.hasOptimizedSegmentLoadStore(Factor) &&
- isSpreadMask(Mask, Factor, Index)) {
- unsigned ScalarSizeInBytes =
- DL.getTypeStoreSize(ShuffleVTy->getElementType());
- Value *Data = SVI->getOperand(0);
- Data = Builder.CreateExtractVector(VTy, Data, uint64_t(0));
- Value *Stride = ConstantInt::get(XLenTy, Factor * ScalarSizeInBytes);
- Value *Offset = ConstantInt::get(XLenTy, Index * ScalarSizeInBytes);
- Value *BasePtr = Builder.CreatePtrAdd(Ptr, Offset);
- // For rv64, need to truncate i64 to i32 to match signature. As VL is at
- // most the number of active lanes (which is bounded by i32) this is safe.
- VL = Builder.CreateTrunc(VL, Builder.getInt32Ty());
-
- CallInst *CI =
- Builder.CreateIntrinsic(Intrinsic::experimental_vp_strided_store,
- {VTy, BasePtr->getType(), Stride->getType()},
- {Data, BasePtr, Stride, LaneMask, VL});
- Alignment = commonAlignment(Alignment, Index * ScalarSizeInBytes);
- CI->addParamAttr(1,
- Attribute::getWithAlignment(CI->getContext(), Alignment));
- return true;
- }
-
Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
Store->getModule(), FixedVssegIntrIds[Factor - 2], {VTy, PtrTy, XLenTy});
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 5404123..7e58b6f 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -68,6 +68,9 @@ RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
if (MF->getFunction().getCallingConv() == CallingConv::GHC)
return CSR_NoRegs_SaveList;
+ if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
+ return Subtarget.hasStdExtE() ? CSR_RT_MostRegs_RVE_SaveList
+ : CSR_RT_MostRegs_SaveList;
if (MF->getFunction().hasFnAttribute("interrupt")) {
if (Subtarget.hasVInstructions()) {
if (Subtarget.hasStdExtD())
@@ -573,6 +576,7 @@ bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int64_t Val = Offset.getFixed();
int64_t Lo12 = SignExtend64<12>(Val);
unsigned Opc = MI.getOpcode();
+
if (Opc == RISCV::ADDI && !isInt<12>(Val)) {
// We chose to emit the canonical immediate sequence rather than folding
// the offset into the using add under the theory that doing so doesn't
@@ -585,6 +589,9 @@ bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
(Lo12 & 0b11111) != 0) {
// Prefetch instructions require the offset to be 32 byte aligned.
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
+ } else if (Opc == RISCV::MIPS_PREFETCH && !isUInt<9>(Val)) {
+ // MIPS Prefetch instructions require the offset to be 9 bits encoded.
+ MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
} else if ((Opc == RISCV::PseudoRV32ZdinxLD ||
Opc == RISCV::PseudoRV32ZdinxSD) &&
Lo12 >= 2044) {
@@ -811,7 +818,13 @@ RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF,
if (CC == CallingConv::GHC)
return CSR_NoRegs_RegMask;
- switch (Subtarget.getTargetABI()) {
+ RISCVABI::ABI ABI = Subtarget.getTargetABI();
+ if (CC == CallingConv::PreserveMost) {
+ if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
+ return CSR_RT_MostRegs_RVE_RegMask;
+ return CSR_RT_MostRegs_RegMask;
+ }
+ switch (ABI) {
default:
llvm_unreachable("Unrecognized ABI");
case RISCVABI::ABI_ILP32E:
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index fd634b5..61dbd06 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -1191,9 +1191,6 @@ static const CostTblEntry VectorIntrinsicCostTable[]{
{Intrinsic::roundeven, MVT::f64, 9},
{Intrinsic::rint, MVT::f32, 7},
{Intrinsic::rint, MVT::f64, 7},
- {Intrinsic::lrint, MVT::i32, 1},
- {Intrinsic::lrint, MVT::i64, 1},
- {Intrinsic::llrint, MVT::i64, 1},
{Intrinsic::nearbyint, MVT::f32, 9},
{Intrinsic::nearbyint, MVT::f64, 9},
{Intrinsic::bswap, MVT::i16, 3},
@@ -1251,11 +1248,48 @@ RISCVTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
switch (ICA.getID()) {
case Intrinsic::lrint:
case Intrinsic::llrint:
- // We can't currently lower half or bfloat vector lrint/llrint.
- if (auto *VecTy = dyn_cast<VectorType>(ICA.getArgTypes()[0]);
- VecTy && VecTy->getElementType()->is16bitFPTy())
- return InstructionCost::getInvalid();
- [[fallthrough]];
+ case Intrinsic::lround:
+ case Intrinsic::llround: {
+ auto LT = getTypeLegalizationCost(RetTy);
+ Type *SrcTy = ICA.getArgTypes().front();
+ auto SrcLT = getTypeLegalizationCost(SrcTy);
+ if (ST->hasVInstructions() && LT.second.isVector()) {
+ SmallVector<unsigned, 2> Ops;
+ unsigned SrcEltSz = DL.getTypeSizeInBits(SrcTy->getScalarType());
+ unsigned DstEltSz = DL.getTypeSizeInBits(RetTy->getScalarType());
+ if (LT.second.getVectorElementType() == MVT::bf16) {
+ if (!ST->hasVInstructionsBF16Minimal())
+ return InstructionCost::getInvalid();
+ if (DstEltSz == 32)
+ Ops = {RISCV::VFWCVTBF16_F_F_V, RISCV::VFCVT_X_F_V};
+ else
+ Ops = {RISCV::VFWCVTBF16_F_F_V, RISCV::VFWCVT_X_F_V};
+ } else if (LT.second.getVectorElementType() == MVT::f16 &&
+ !ST->hasVInstructionsF16()) {
+ if (!ST->hasVInstructionsF16Minimal())
+ return InstructionCost::getInvalid();
+ if (DstEltSz == 32)
+ Ops = {RISCV::VFWCVT_F_F_V, RISCV::VFCVT_X_F_V};
+ else
+ Ops = {RISCV::VFWCVT_F_F_V, RISCV::VFWCVT_X_F_V};
+
+ } else if (SrcEltSz > DstEltSz) {
+ Ops = {RISCV::VFNCVT_X_F_W};
+ } else if (SrcEltSz < DstEltSz) {
+ Ops = {RISCV::VFWCVT_X_F_V};
+ } else {
+ Ops = {RISCV::VFCVT_X_F_V};
+ }
+
+ // We need to use the source LMUL in the case of a narrowing op, and the
+ // destination LMUL otherwise.
+ if (SrcEltSz > DstEltSz)
+ return SrcLT.first *
+ getRISCVInstructionCost(Ops, SrcLT.second, CostKind);
+ return LT.first * getRISCVInstructionCost(Ops, LT.second, CostKind);
+ }
+ break;
+ }
case Intrinsic::ceil:
case Intrinsic::floor:
case Intrinsic::trunc: