aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp')
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp276
1 files changed, 227 insertions, 49 deletions
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index a5bf0e5..9854c73 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -22,6 +22,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/RuntimeLibcallUtil.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/IR/IRBuilder.h"
@@ -126,6 +127,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, GRLenVT, Expand);
+ setOperationAction(ISD::BRCOND, MVT::Other, Custom);
setOperationAction(ISD::SELECT_CC, GRLenVT, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, GRLenVT, Expand);
@@ -339,6 +341,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
{MVT::v16i8, MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v8i16, MVT::v4i16,
MVT::v2i16, MVT::v4i32, MVT::v2i32, MVT::v2i64}) {
setOperationAction(ISD::TRUNCATE, VT, Custom);
+ setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
}
}
@@ -376,6 +379,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
setOperationAction(ISD::ABDS, VT, Legal);
setOperationAction(ISD::ABDU, VT, Legal);
+ setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
}
for (MVT VT : {MVT::v32i8, MVT::v16i16, MVT::v8i32})
setOperationAction(ISD::BITREVERSE, VT, Custom);
@@ -513,6 +517,8 @@ SDValue LoongArchTargetLowering::LowerOperation(SDValue Op,
return lowerPREFETCH(Op, DAG);
case ISD::SELECT:
return lowerSELECT(Op, DAG);
+ case ISD::BRCOND:
+ return lowerBRCOND(Op, DAG);
case ISD::FP_TO_FP16:
return lowerFP_TO_FP16(Op, DAG);
case ISD::FP16_TO_FP:
@@ -521,10 +527,62 @@ SDValue LoongArchTargetLowering::LowerOperation(SDValue Op,
return lowerFP_TO_BF16(Op, DAG);
case ISD::BF16_TO_FP:
return lowerBF16_TO_FP(Op, DAG);
+ case ISD::VECREDUCE_ADD:
+ return lowerVECREDUCE_ADD(Op, DAG);
}
return SDValue();
}
+// Lower vecreduce_add using vhaddw instructions.
+// For Example:
+// call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a)
+// can be lowered to:
+// VHADDW_D_W vr0, vr0, vr0
+// VHADDW_Q_D vr0, vr0, vr0
+// VPICKVE2GR_D a0, vr0, 0
+// ADDI_W a0, a0, 0
+SDValue LoongArchTargetLowering::lowerVECREDUCE_ADD(SDValue Op,
+ SelectionDAG &DAG) const {
+
+ SDLoc DL(Op);
+ MVT OpVT = Op.getSimpleValueType();
+ SDValue Val = Op.getOperand(0);
+
+ unsigned NumEles = Val.getSimpleValueType().getVectorNumElements();
+ unsigned EleBits = Val.getSimpleValueType().getScalarSizeInBits();
+
+ unsigned LegalVecSize = 128;
+ bool isLASX256Vector =
+ Subtarget.hasExtLASX() && Val.getValueSizeInBits() == 256;
+
+ // Ensure operand type legal or enable it legal.
+ while (!isTypeLegal(Val.getSimpleValueType())) {
+ Val = DAG.WidenVector(Val, DL);
+ }
+
+ // NumEles is designed for iterations count, v4i32 for LSX
+ // and v8i32 for LASX should have the same count.
+ if (isLASX256Vector) {
+ NumEles /= 2;
+ LegalVecSize = 256;
+ }
+
+ for (unsigned i = 1; i < NumEles; i *= 2, EleBits *= 2) {
+ MVT IntTy = MVT::getIntegerVT(EleBits);
+ MVT VecTy = MVT::getVectorVT(IntTy, LegalVecSize / EleBits);
+ Val = DAG.getNode(LoongArchISD::VHADDW, DL, VecTy, Val, Val);
+ }
+
+ if (isLASX256Vector) {
+ SDValue Tmp = DAG.getNode(LoongArchISD::XVPERMI, DL, MVT::v4i64, Val,
+ DAG.getConstant(2, DL, MVT::i64));
+ Val = DAG.getNode(ISD::ADD, DL, MVT::v4i64, Tmp, Val);
+ }
+
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT, Val,
+ DAG.getConstant(0, DL, Subtarget.getGRLenVT()));
+}
+
SDValue LoongArchTargetLowering::lowerPREFETCH(SDValue Op,
SelectionDAG &DAG) const {
unsigned IsData = Op.getConstantOperandVal(4);
@@ -858,6 +916,35 @@ SDValue LoongArchTargetLowering::lowerSELECT(SDValue Op,
return DAG.getNode(LoongArchISD::SELECT_CC, DL, VT, Ops);
}
+SDValue LoongArchTargetLowering::lowerBRCOND(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue CondV = Op.getOperand(1);
+ SDLoc DL(Op);
+ MVT GRLenVT = Subtarget.getGRLenVT();
+
+ if (CondV.getOpcode() == ISD::SETCC) {
+ if (CondV.getOperand(0).getValueType() == GRLenVT) {
+ SDValue LHS = CondV.getOperand(0);
+ SDValue RHS = CondV.getOperand(1);
+ ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
+
+ translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
+
+ SDValue TargetCC = DAG.getCondCode(CCVal);
+ return DAG.getNode(LoongArchISD::BR_CC, DL, Op.getValueType(),
+ Op.getOperand(0), LHS, RHS, TargetCC,
+ Op.getOperand(2));
+ } else if (CondV.getOperand(0).getValueType().isFloatingPoint()) {
+ return DAG.getNode(LoongArchISD::BRCOND, DL, Op.getValueType(),
+ Op.getOperand(0), CondV, Op.getOperand(2));
+ }
+ }
+
+ return DAG.getNode(LoongArchISD::BR_CC, DL, Op.getValueType(),
+ Op.getOperand(0), CondV, DAG.getConstant(0, DL, GRLenVT),
+ DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
+}
+
SDValue
LoongArchTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
@@ -2786,7 +2873,7 @@ SDValue LoongArchTargetLowering::lowerUINT_TO_FP(SDValue Op,
EVT RetVT = Op.getValueType();
RTLIB::Libcall LC = RTLIB::getUINTTOFP(OpVT, RetVT);
MakeLibCallOptions CallOptions;
- CallOptions.setTypeListBeforeSoften(OpVT, RetVT, true);
+ CallOptions.setTypeListBeforeSoften(OpVT, RetVT);
SDValue Chain = SDValue();
SDValue Result;
std::tie(Result, Chain) =
@@ -2811,7 +2898,7 @@ SDValue LoongArchTargetLowering::lowerSINT_TO_FP(SDValue Op,
EVT RetVT = Op.getValueType();
RTLIB::Libcall LC = RTLIB::getSINTTOFP(OpVT, RetVT);
MakeLibCallOptions CallOptions;
- CallOptions.setTypeListBeforeSoften(OpVT, RetVT, true);
+ CallOptions.setTypeListBeforeSoften(OpVT, RetVT);
SDValue Chain = SDValue();
SDValue Result;
std::tie(Result, Chain) =
@@ -3037,10 +3124,7 @@ SDValue LoongArchTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
// Prepare argument list to generate call.
ArgListTy Args;
- ArgListEntry Entry;
- Entry.Node = Load;
- Entry.Ty = CallTy;
- Args.push_back(Entry);
+ Args.emplace_back(Load, CallTy);
// Setup call to __tls_get_addr.
TargetLowering::CallLoweringInfo CLI(DAG);
@@ -4107,7 +4191,7 @@ void LoongArchTargetLowering::ReplaceNodeResults(
LC = RTLIB::getFPTOSINT(Src.getValueType(), VT);
MakeLibCallOptions CallOptions;
EVT OpVT = Src.getValueType();
- CallOptions.setTypeListBeforeSoften(OpVT, VT, true);
+ CallOptions.setTypeListBeforeSoften(OpVT, VT);
SDValue Chain = SDValue();
SDValue Result;
std::tie(Result, Chain) =
@@ -4360,7 +4444,7 @@ void LoongArchTargetLowering::ReplaceNodeResults(
RTLIB::Libcall LC =
OpVT == MVT::f64 ? RTLIB::LROUND_F64 : RTLIB::LROUND_F32;
MakeLibCallOptions CallOptions;
- CallOptions.setTypeListBeforeSoften(OpVT, MVT::i64, true);
+ CallOptions.setTypeListBeforeSoften(OpVT, MVT::i64);
SDValue Result = makeLibCall(DAG, LC, MVT::i64, Op0, CallOptions, DL).first;
Result = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Result);
Results.push_back(Result);
@@ -4742,13 +4826,29 @@ static SDValue performBITCASTCombine(SDNode *N, SelectionDAG &DAG,
UseLASX = true;
break;
};
- if (UseLASX && !(Subtarget.has32S() && Subtarget.hasExtLASX()))
- return SDValue();
Src = PropagateSExt ? signExtendBitcastSrcVector(DAG, SExtVT, Src, DL)
: DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
- Opc = UseLASX ? LoongArchISD::XVMSKLTZ : LoongArchISD::VMSKLTZ;
- SDValue V = DAG.getNode(Opc, DL, MVT::i64, Src);
+ SDValue V;
+ if (!Subtarget.has32S() || !Subtarget.hasExtLASX()) {
+ if (Src.getSimpleValueType() == MVT::v32i8) {
+ SDValue Lo, Hi;
+ std::tie(Lo, Hi) = DAG.SplitVector(Src, DL);
+ Lo = DAG.getNode(LoongArchISD::VMSKLTZ, DL, MVT::i64, Lo);
+ Hi = DAG.getNode(LoongArchISD::VMSKLTZ, DL, MVT::i64, Hi);
+ Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
+ DAG.getConstant(16, DL, MVT::i8));
+ V = DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
+ } else if (UseLASX) {
+ return SDValue();
+ }
+ }
+
+ if (!V) {
+ Opc = UseLASX ? LoongArchISD::XVMSKLTZ : LoongArchISD::VMSKLTZ;
+ V = DAG.getNode(Opc, DL, MVT::i64, Src);
+ }
+
EVT T = EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements());
V = DAG.getZExtOrTrunc(V, DL, T);
return DAG.getBitcast(VT, V);
@@ -5156,6 +5256,71 @@ static SDValue performBITREV_WCombine(SDNode *N, SelectionDAG &DAG,
Src.getOperand(0));
}
+// Perform combines for BR_CC conditions.
+static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL,
+ SelectionDAG &DAG, const LoongArchSubtarget &Subtarget) {
+ ISD::CondCode CCVal = cast<CondCodeSDNode>(CC)->get();
+
+ // As far as arithmetic right shift always saves the sign,
+ // shift can be omitted.
+ // Fold setlt (sra X, N), 0 -> setlt X, 0 and
+ // setge (sra X, N), 0 -> setge X, 0
+ if (isNullConstant(RHS) && (CCVal == ISD::SETGE || CCVal == ISD::SETLT) &&
+ LHS.getOpcode() == ISD::SRA) {
+ LHS = LHS.getOperand(0);
+ return true;
+ }
+
+ if (!ISD::isIntEqualitySetCC(CCVal))
+ return false;
+
+ // Fold ((setlt X, Y), 0, ne) -> (X, Y, lt)
+ // Sometimes the setcc is introduced after br_cc/select_cc has been formed.
+ if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
+ LHS.getOperand(0).getValueType() == Subtarget.getGRLenVT()) {
+ // If we're looking for eq 0 instead of ne 0, we need to invert the
+ // condition.
+ bool Invert = CCVal == ISD::SETEQ;
+ CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
+ if (Invert)
+ CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
+
+ RHS = LHS.getOperand(1);
+ LHS = LHS.getOperand(0);
+ translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
+
+ CC = DAG.getCondCode(CCVal);
+ return true;
+ }
+
+ // (X, 1, setne) -> (X, 0, seteq) if we can prove X is 0/1.
+ // This can occur when legalizing some floating point comparisons.
+ APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
+ if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
+ CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
+ CC = DAG.getCondCode(CCVal);
+ RHS = DAG.getConstant(0, DL, LHS.getValueType());
+ return true;
+ }
+
+ return false;
+}
+
+static SDValue performBR_CCCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const LoongArchSubtarget &Subtarget) {
+ SDValue LHS = N->getOperand(1);
+ SDValue RHS = N->getOperand(2);
+ SDValue CC = N->getOperand(3);
+ SDLoc DL(N);
+
+ if (combine_CC(LHS, RHS, CC, DL, DAG, Subtarget))
+ return DAG.getNode(LoongArchISD::BR_CC, DL, N->getValueType(0),
+ N->getOperand(0), LHS, RHS, CC, N->getOperand(4));
+
+ return SDValue();
+}
+
template <unsigned N>
static SDValue legalizeIntrinsicImmArg(SDNode *Node, unsigned ImmOp,
SelectionDAG &DAG,
@@ -5848,6 +6013,8 @@ SDValue LoongArchTargetLowering::PerformDAGCombine(SDNode *N,
return performBITCASTCombine(N, DAG, DCI, Subtarget);
case LoongArchISD::BITREV_W:
return performBITREV_WCombine(N, DAG, DCI, Subtarget);
+ case LoongArchISD::BR_CC:
+ return performBR_CCCombine(N, DAG, DCI, Subtarget);
case ISD::INTRINSIC_WO_CHAIN:
return performINTRINSIC_WO_CHAINCombine(N, DAG, DCI, Subtarget);
case LoongArchISD::MOVGR2FR_W_LA64:
@@ -6042,17 +6209,20 @@ static MachineBasicBlock *
emitPseudoXVINSGR2VR(MachineInstr &MI, MachineBasicBlock *BB,
const LoongArchSubtarget &Subtarget) {
unsigned InsOp;
+ unsigned BroadcastOp;
unsigned HalfSize;
switch (MI.getOpcode()) {
default:
llvm_unreachable("Unexpected opcode");
case LoongArch::PseudoXVINSGR2VR_B:
HalfSize = 16;
- InsOp = LoongArch::VINSGR2VR_B;
+ BroadcastOp = LoongArch::XVREPLGR2VR_B;
+ InsOp = LoongArch::XVEXTRINS_B;
break;
case LoongArch::PseudoXVINSGR2VR_H:
HalfSize = 8;
- InsOp = LoongArch::VINSGR2VR_H;
+ BroadcastOp = LoongArch::XVREPLGR2VR_H;
+ InsOp = LoongArch::XVEXTRINS_H;
break;
}
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
@@ -6066,37 +6236,41 @@ emitPseudoXVINSGR2VR(MachineInstr &MI, MachineBasicBlock *BB,
Register Elt = MI.getOperand(2).getReg();
unsigned Idx = MI.getOperand(3).getImm();
- Register ScratchReg1 = XSrc;
- if (Idx >= HalfSize) {
- ScratchReg1 = MRI.createVirtualRegister(RC);
- BuildMI(*BB, MI, DL, TII->get(LoongArch::XVPERMI_D), ScratchReg1)
- .addReg(XSrc)
- .addImm(14);
- }
+ if (XSrc.isVirtual() && MRI.getVRegDef(XSrc)->isImplicitDef() &&
+ Idx < HalfSize) {
+ Register ScratchSubReg1 = MRI.createVirtualRegister(SubRC);
+ Register ScratchSubReg2 = MRI.createVirtualRegister(SubRC);
- Register ScratchSubReg1 = MRI.createVirtualRegister(SubRC);
- Register ScratchSubReg2 = MRI.createVirtualRegister(SubRC);
- BuildMI(*BB, MI, DL, TII->get(LoongArch::COPY), ScratchSubReg1)
- .addReg(ScratchReg1, 0, LoongArch::sub_128);
- BuildMI(*BB, MI, DL, TII->get(InsOp), ScratchSubReg2)
- .addReg(ScratchSubReg1)
- .addReg(Elt)
- .addImm(Idx >= HalfSize ? Idx - HalfSize : Idx);
+ BuildMI(*BB, MI, DL, TII->get(LoongArch::COPY), ScratchSubReg1)
+ .addReg(XSrc, 0, LoongArch::sub_128);
+ BuildMI(*BB, MI, DL,
+ TII->get(HalfSize == 8 ? LoongArch::VINSGR2VR_H
+ : LoongArch::VINSGR2VR_B),
+ ScratchSubReg2)
+ .addReg(ScratchSubReg1)
+ .addReg(Elt)
+ .addImm(Idx);
+
+ BuildMI(*BB, MI, DL, TII->get(LoongArch::SUBREG_TO_REG), XDst)
+ .addImm(0)
+ .addReg(ScratchSubReg2)
+ .addImm(LoongArch::sub_128);
+ } else {
+ Register ScratchReg1 = MRI.createVirtualRegister(RC);
+ Register ScratchReg2 = MRI.createVirtualRegister(RC);
- Register ScratchReg2 = XDst;
- if (Idx >= HalfSize)
- ScratchReg2 = MRI.createVirtualRegister(RC);
+ BuildMI(*BB, MI, DL, TII->get(BroadcastOp), ScratchReg1).addReg(Elt);
- BuildMI(*BB, MI, DL, TII->get(LoongArch::SUBREG_TO_REG), ScratchReg2)
- .addImm(0)
- .addReg(ScratchSubReg2)
- .addImm(LoongArch::sub_128);
+ BuildMI(*BB, MI, DL, TII->get(LoongArch::XVPERMI_Q), ScratchReg2)
+ .addReg(ScratchReg1)
+ .addReg(XSrc)
+ .addImm(Idx >= HalfSize ? 48 : 18);
- if (Idx >= HalfSize)
- BuildMI(*BB, MI, DL, TII->get(LoongArch::XVPERMI_Q), XDst)
+ BuildMI(*BB, MI, DL, TII->get(InsOp), XDst)
.addReg(XSrc)
.addReg(ScratchReg2)
- .addImm(2);
+ .addImm((Idx >= HalfSize ? Idx - HalfSize : Idx) * 17);
+ }
MI.eraseFromParent();
return BB;
@@ -6570,6 +6744,8 @@ const char *LoongArchTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(TAIL_MEDIUM)
NODE_NAME_CASE(TAIL_LARGE)
NODE_NAME_CASE(SELECT_CC)
+ NODE_NAME_CASE(BR_CC)
+ NODE_NAME_CASE(BRCOND)
NODE_NAME_CASE(SLL_W)
NODE_NAME_CASE(SRA_W)
NODE_NAME_CASE(SRL_W)
@@ -6654,6 +6830,7 @@ const char *LoongArchTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(XVMSKGEZ)
NODE_NAME_CASE(XVMSKEQZ)
NODE_NAME_CASE(XVMSKNEZ)
+ NODE_NAME_CASE(VHADDW)
}
#undef NODE_NAME_CASE
return nullptr;
@@ -6729,8 +6906,7 @@ static bool CC_LoongArchAssign2GRLen(unsigned GRLen, CCState &State,
static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI,
unsigned ValNo, MVT ValVT,
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
- CCState &State, bool IsFixed, bool IsRet,
- Type *OrigTy) {
+ CCState &State, bool IsRet, Type *OrigTy) {
unsigned GRLen = DL.getLargestLegalIntTypeSizeInBits();
assert((GRLen == 32 || GRLen == 64) && "Unspport GRLen");
MVT GRLenVT = GRLen == 32 ? MVT::i32 : MVT::i64;
@@ -6752,7 +6928,7 @@ static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI,
case LoongArchABI::ABI_LP64F:
case LoongArchABI::ABI_ILP32D:
case LoongArchABI::ABI_LP64D:
- UseGPRForFloat = !IsFixed;
+ UseGPRForFloat = ArgFlags.isVarArg();
break;
case LoongArchABI::ABI_ILP32S:
case LoongArchABI::ABI_LP64S:
@@ -6766,7 +6942,8 @@ static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI,
// will not be passed by registers if the original type is larger than
// 2*GRLen, so the register alignment rule does not apply.
unsigned TwoGRLenInBytes = (2 * GRLen) / 8;
- if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoGRLenInBytes &&
+ if (ArgFlags.isVarArg() &&
+ ArgFlags.getNonZeroOrigAlign() == TwoGRLenInBytes &&
DL.getTypeAllocSize(OrigTy) == TwoGRLenInBytes) {
unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
// Skip 'odd' register if necessary.
@@ -6916,7 +7093,7 @@ void LoongArchTargetLowering::analyzeInputArgs(
LoongArchABI::ABI ABI =
MF.getSubtarget<LoongArchSubtarget>().getTargetABI();
if (Fn(MF.getDataLayout(), ABI, i, ArgVT, CCValAssign::Full, Ins[i].Flags,
- CCInfo, /*IsFixed=*/true, IsRet, ArgTy)) {
+ CCInfo, IsRet, ArgTy)) {
LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type " << ArgVT
<< '\n');
llvm_unreachable("");
@@ -6934,7 +7111,7 @@ void LoongArchTargetLowering::analyzeOutputArgs(
LoongArchABI::ABI ABI =
MF.getSubtarget<LoongArchSubtarget>().getTargetABI();
if (Fn(MF.getDataLayout(), ABI, i, ArgVT, CCValAssign::Full, Outs[i].Flags,
- CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
+ CCInfo, IsRet, OrigTy)) {
LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " << ArgVT
<< "\n");
llvm_unreachable("");
@@ -7073,7 +7250,8 @@ static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
static bool CC_LoongArch_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State) {
+ ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
+ CCState &State) {
if (LocVT == MVT::i32 || LocVT == MVT::i64) {
// Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, SpLim
// s0 s1 s2 s3 s4 s5 s6 s7 s8
@@ -7126,6 +7304,7 @@ SDValue LoongArchTargetLowering::LowerFormalArguments(
llvm_unreachable("Unsupported calling convention");
case CallingConv::C:
case CallingConv::Fast:
+ case CallingConv::PreserveMost:
break;
case CallingConv::GHC:
if (!MF.getSubtarget().hasFeature(LoongArch::FeatureBasicF) ||
@@ -7647,8 +7826,7 @@ bool LoongArchTargetLowering::CanLowerReturn(
LoongArchABI::ABI ABI =
MF.getSubtarget<LoongArchSubtarget>().getTargetABI();
if (CC_LoongArch(MF.getDataLayout(), ABI, i, Outs[i].VT, CCValAssign::Full,
- Outs[i].Flags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true,
- nullptr))
+ Outs[i].Flags, CCInfo, /*IsRet=*/true, nullptr))
return false;
}
return true;
@@ -7888,7 +8066,7 @@ LoongArchTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
if (Size < 32 && (AI->getOperation() == AtomicRMWInst::And ||
AI->getOperation() == AtomicRMWInst::Or ||
AI->getOperation() == AtomicRMWInst::Xor))
- return AtomicExpansionKind::Expand;
+ return AtomicExpansionKind::CustomExpand;
if (AI->getOperation() == AtomicRMWInst::Nand || Size < 32)
return AtomicExpansionKind::CmpXChg;
}