aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp')
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp480
1 files changed, 380 insertions, 100 deletions
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index a5bf0e5..fe650a0 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -22,6 +22,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/RuntimeLibcallUtil.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/IR/IRBuilder.h"
@@ -126,6 +127,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, GRLenVT, Expand);
+ setOperationAction(ISD::BRCOND, MVT::Other, Custom);
setOperationAction(ISD::SELECT_CC, GRLenVT, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, GRLenVT, Expand);
@@ -339,6 +341,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
{MVT::v16i8, MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v8i16, MVT::v4i16,
MVT::v2i16, MVT::v4i32, MVT::v2i32, MVT::v2i64}) {
setOperationAction(ISD::TRUNCATE, VT, Custom);
+ setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
}
}
@@ -376,6 +379,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
setOperationAction(ISD::ABDS, VT, Legal);
setOperationAction(ISD::ABDU, VT, Legal);
+ setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
}
for (MVT VT : {MVT::v32i8, MVT::v16i16, MVT::v8i32})
setOperationAction(ISD::BITREVERSE, VT, Custom);
@@ -513,6 +517,8 @@ SDValue LoongArchTargetLowering::LowerOperation(SDValue Op,
return lowerPREFETCH(Op, DAG);
case ISD::SELECT:
return lowerSELECT(Op, DAG);
+ case ISD::BRCOND:
+ return lowerBRCOND(Op, DAG);
case ISD::FP_TO_FP16:
return lowerFP_TO_FP16(Op, DAG);
case ISD::FP16_TO_FP:
@@ -521,10 +527,62 @@ SDValue LoongArchTargetLowering::LowerOperation(SDValue Op,
return lowerFP_TO_BF16(Op, DAG);
case ISD::BF16_TO_FP:
return lowerBF16_TO_FP(Op, DAG);
+ case ISD::VECREDUCE_ADD:
+ return lowerVECREDUCE_ADD(Op, DAG);
}
return SDValue();
}
+// Lower vecreduce_add using vhaddw instructions.
+// For Example:
+// call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a)
+// can be lowered to:
+// VHADDW_D_W vr0, vr0, vr0
+// VHADDW_Q_D vr0, vr0, vr0
+// VPICKVE2GR_D a0, vr0, 0
+// ADDI_W a0, a0, 0
+SDValue LoongArchTargetLowering::lowerVECREDUCE_ADD(SDValue Op,
+ SelectionDAG &DAG) const {
+
+ SDLoc DL(Op);
+ MVT OpVT = Op.getSimpleValueType();
+ SDValue Val = Op.getOperand(0);
+
+ unsigned NumEles = Val.getSimpleValueType().getVectorNumElements();
+ unsigned EleBits = Val.getSimpleValueType().getScalarSizeInBits();
+
+ unsigned LegalVecSize = 128;
+ bool isLASX256Vector =
+ Subtarget.hasExtLASX() && Val.getValueSizeInBits() == 256;
+
+ // Ensure operand type legal or enable it legal.
+ while (!isTypeLegal(Val.getSimpleValueType())) {
+ Val = DAG.WidenVector(Val, DL);
+ }
+
+ // NumEles is designed for iterations count, v4i32 for LSX
+ // and v8i32 for LASX should have the same count.
+ if (isLASX256Vector) {
+ NumEles /= 2;
+ LegalVecSize = 256;
+ }
+
+ for (unsigned i = 1; i < NumEles; i *= 2, EleBits *= 2) {
+ MVT IntTy = MVT::getIntegerVT(EleBits);
+ MVT VecTy = MVT::getVectorVT(IntTy, LegalVecSize / EleBits);
+ Val = DAG.getNode(LoongArchISD::VHADDW, DL, VecTy, Val, Val);
+ }
+
+ if (isLASX256Vector) {
+ SDValue Tmp = DAG.getNode(LoongArchISD::XVPERMI, DL, MVT::v4i64, Val,
+ DAG.getConstant(2, DL, MVT::i64));
+ Val = DAG.getNode(ISD::ADD, DL, MVT::v4i64, Tmp, Val);
+ }
+
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT, Val,
+ DAG.getConstant(0, DL, Subtarget.getGRLenVT()));
+}
+
SDValue LoongArchTargetLowering::lowerPREFETCH(SDValue Op,
SelectionDAG &DAG) const {
unsigned IsData = Op.getConstantOperandVal(4);
@@ -858,6 +916,35 @@ SDValue LoongArchTargetLowering::lowerSELECT(SDValue Op,
return DAG.getNode(LoongArchISD::SELECT_CC, DL, VT, Ops);
}
+SDValue LoongArchTargetLowering::lowerBRCOND(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue CondV = Op.getOperand(1);
+ SDLoc DL(Op);
+ MVT GRLenVT = Subtarget.getGRLenVT();
+
+ if (CondV.getOpcode() == ISD::SETCC) {
+ if (CondV.getOperand(0).getValueType() == GRLenVT) {
+ SDValue LHS = CondV.getOperand(0);
+ SDValue RHS = CondV.getOperand(1);
+ ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
+
+ translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
+
+ SDValue TargetCC = DAG.getCondCode(CCVal);
+ return DAG.getNode(LoongArchISD::BR_CC, DL, Op.getValueType(),
+ Op.getOperand(0), LHS, RHS, TargetCC,
+ Op.getOperand(2));
+ } else if (CondV.getOperand(0).getValueType().isFloatingPoint()) {
+ return DAG.getNode(LoongArchISD::BRCOND, DL, Op.getValueType(),
+ Op.getOperand(0), CondV, Op.getOperand(2));
+ }
+ }
+
+ return DAG.getNode(LoongArchISD::BR_CC, DL, Op.getValueType(),
+ Op.getOperand(0), CondV, DAG.getConstant(0, DL, GRLenVT),
+ DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
+}
+
SDValue
LoongArchTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
@@ -1030,6 +1117,7 @@ static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
static SDValue lowerVECTOR_SHUFFLEAsShift(const SDLoc &DL, ArrayRef<int> Mask,
MVT VT, SDValue V1, SDValue V2,
SelectionDAG &DAG,
+ const LoongArchSubtarget &Subtarget,
const APInt &Zeroable) {
int Size = Mask.size();
assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
@@ -1056,7 +1144,7 @@ static SDValue lowerVECTOR_SHUFFLEAsShift(const SDLoc &DL, ArrayRef<int> Mask,
"Illegal integer vector type");
V = DAG.getBitcast(ShiftVT, V);
V = DAG.getNode(Opcode, DL, ShiftVT, V,
- DAG.getConstant(ShiftAmt, DL, MVT::i64));
+ DAG.getConstant(ShiftAmt, DL, Subtarget.getGRLenVT()));
return DAG.getBitcast(VT, V);
}
@@ -1225,10 +1313,10 @@ static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
/// (VBSRL_V $v1, $v1, 8)
/// (VBSLL_V $v0, $v0, 8)
/// (VOR_V $v0, $V0, $v1)
-static SDValue lowerVECTOR_SHUFFLEAsByteRotate(const SDLoc &DL,
- ArrayRef<int> Mask, MVT VT,
- SDValue V1, SDValue V2,
- SelectionDAG &DAG) {
+static SDValue
+lowerVECTOR_SHUFFLEAsByteRotate(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
+ SDValue V1, SDValue V2, SelectionDAG &DAG,
+ const LoongArchSubtarget &Subtarget) {
SDValue Lo = V1, Hi = V2;
int ByteRotation = matchShuffleAsByteRotate(VT, Lo, Hi, Mask);
@@ -1241,11 +1329,12 @@ static SDValue lowerVECTOR_SHUFFLEAsByteRotate(const SDLoc &DL,
int LoByteShift = 16 - ByteRotation;
int HiByteShift = ByteRotation;
+ MVT GRLenVT = Subtarget.getGRLenVT();
SDValue LoShift = DAG.getNode(LoongArchISD::VBSLL, DL, ByteVT, Lo,
- DAG.getConstant(LoByteShift, DL, MVT::i64));
+ DAG.getConstant(LoByteShift, DL, GRLenVT));
SDValue HiShift = DAG.getNode(LoongArchISD::VBSRL, DL, ByteVT, Hi,
- DAG.getConstant(HiByteShift, DL, MVT::i64));
+ DAG.getConstant(HiByteShift, DL, GRLenVT));
return DAG.getBitcast(VT, DAG.getNode(ISD::OR, DL, ByteVT, LoShift, HiShift));
}
@@ -1350,9 +1439,10 @@ static SDValue lowerVECTOR_SHUFFLEAsZeroOrAnyExtend(const SDLoc &DL,
///
/// When undef's appear in the mask they are treated as if they were whatever
/// value is necessary in order to fit the above form.
-static SDValue lowerVECTOR_SHUFFLE_VREPLVEI(const SDLoc &DL, ArrayRef<int> Mask,
- MVT VT, SDValue V1, SDValue V2,
- SelectionDAG &DAG) {
+static SDValue
+lowerVECTOR_SHUFFLE_VREPLVEI(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
+ SDValue V1, SDValue V2, SelectionDAG &DAG,
+ const LoongArchSubtarget &Subtarget) {
int SplatIndex = -1;
for (const auto &M : Mask) {
if (M != -1) {
@@ -1368,7 +1458,7 @@ static SDValue lowerVECTOR_SHUFFLE_VREPLVEI(const SDLoc &DL, ArrayRef<int> Mask,
if (fitsRegularPattern<int>(Mask.begin(), 1, Mask.end(), SplatIndex, 0)) {
APInt Imm(64, SplatIndex);
return DAG.getNode(LoongArchISD::VREPLVEI, DL, VT, V1,
- DAG.getConstant(Imm, DL, MVT::i64));
+ DAG.getConstant(Imm, DL, Subtarget.getGRLenVT()));
}
return SDValue();
@@ -1392,9 +1482,10 @@ static SDValue lowerVECTOR_SHUFFLE_VREPLVEI(const SDLoc &DL, ArrayRef<int> Mask,
/// (VSHUF4I_H $v0, $v1, 27)
/// where the 27 comes from:
/// 3 + (2 << 2) + (1 << 4) + (0 << 6)
-static SDValue lowerVECTOR_SHUFFLE_VSHUF4I(const SDLoc &DL, ArrayRef<int> Mask,
- MVT VT, SDValue V1, SDValue V2,
- SelectionDAG &DAG) {
+static SDValue
+lowerVECTOR_SHUFFLE_VSHUF4I(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
+ SDValue V1, SDValue V2, SelectionDAG &DAG,
+ const LoongArchSubtarget &Subtarget) {
unsigned SubVecSize = 4;
if (VT == MVT::v2f64 || VT == MVT::v2i64)
@@ -1436,13 +1527,15 @@ static SDValue lowerVECTOR_SHUFFLE_VSHUF4I(const SDLoc &DL, ArrayRef<int> Mask,
Imm |= M & 0x3;
}
+ MVT GRLenVT = Subtarget.getGRLenVT();
+
// Return vshuf4i.d
if (VT == MVT::v2f64 || VT == MVT::v2i64)
return DAG.getNode(LoongArchISD::VSHUF4I, DL, VT, V1, V2,
- DAG.getConstant(Imm, DL, MVT::i64));
+ DAG.getConstant(Imm, DL, GRLenVT));
return DAG.getNode(LoongArchISD::VSHUF4I, DL, VT, V1,
- DAG.getConstant(Imm, DL, MVT::i64));
+ DAG.getConstant(Imm, DL, GRLenVT));
}
/// Lower VECTOR_SHUFFLE into VPACKEV (if possible).
@@ -1722,7 +1815,8 @@ static SDValue lowerVECTOR_SHUFFLE_VSHUF(const SDLoc &DL, ArrayRef<int> Mask,
/// This routine breaks down the specific type of 128-bit shuffle and
/// dispatches to the lowering routines accordingly.
static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
- SDValue V1, SDValue V2, SelectionDAG &DAG) {
+ SDValue V1, SDValue V2, SelectionDAG &DAG,
+ const LoongArchSubtarget &Subtarget) {
assert((VT.SimpleTy == MVT::v16i8 || VT.SimpleTy == MVT::v8i16 ||
VT.SimpleTy == MVT::v4i32 || VT.SimpleTy == MVT::v2i64 ||
VT.SimpleTy == MVT::v4f32 || VT.SimpleTy == MVT::v2f64) &&
@@ -1740,9 +1834,11 @@ static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
SDValue Result;
// TODO: Add more comparison patterns.
if (V2.isUndef()) {
- if ((Result = lowerVECTOR_SHUFFLE_VREPLVEI(DL, Mask, VT, V1, V2, DAG)))
+ if ((Result = lowerVECTOR_SHUFFLE_VREPLVEI(DL, Mask, VT, V1, V2, DAG,
+ Subtarget)))
return Result;
- if ((Result = lowerVECTOR_SHUFFLE_VSHUF4I(DL, Mask, VT, V1, V2, DAG)))
+ if ((Result =
+ lowerVECTOR_SHUFFLE_VSHUF4I(DL, Mask, VT, V1, V2, DAG, Subtarget)))
return Result;
// TODO: This comment may be enabled in the future to better match the
@@ -1765,15 +1861,17 @@ static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
if ((Result = lowerVECTOR_SHUFFLE_VPICKOD(DL, Mask, VT, V1, V2, DAG)))
return Result;
if ((VT.SimpleTy == MVT::v2i64 || VT.SimpleTy == MVT::v2f64) &&
- (Result = lowerVECTOR_SHUFFLE_VSHUF4I(DL, Mask, VT, V1, V2, DAG)))
+ (Result =
+ lowerVECTOR_SHUFFLE_VSHUF4I(DL, Mask, VT, V1, V2, DAG, Subtarget)))
return Result;
if ((Result = lowerVECTOR_SHUFFLEAsZeroOrAnyExtend(DL, Mask, VT, V1, V2, DAG,
Zeroable)))
return Result;
- if ((Result =
- lowerVECTOR_SHUFFLEAsShift(DL, Mask, VT, V1, V2, DAG, Zeroable)))
+ if ((Result = lowerVECTOR_SHUFFLEAsShift(DL, Mask, VT, V1, V2, DAG, Subtarget,
+ Zeroable)))
return Result;
- if ((Result = lowerVECTOR_SHUFFLEAsByteRotate(DL, Mask, VT, V1, V2, DAG)))
+ if ((Result = lowerVECTOR_SHUFFLEAsByteRotate(DL, Mask, VT, V1, V2, DAG,
+ Subtarget)))
return Result;
if (SDValue NewShuffle = widenShuffleMask(DL, Mask, VT, V1, V2, DAG))
return NewShuffle;
@@ -1790,10 +1888,10 @@ static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
///
/// When undef's appear in the mask they are treated as if they were whatever
/// value is necessary in order to fit the above form.
-static SDValue lowerVECTOR_SHUFFLE_XVREPLVEI(const SDLoc &DL,
- ArrayRef<int> Mask, MVT VT,
- SDValue V1, SDValue V2,
- SelectionDAG &DAG) {
+static SDValue
+lowerVECTOR_SHUFFLE_XVREPLVEI(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
+ SDValue V1, SDValue V2, SelectionDAG &DAG,
+ const LoongArchSubtarget &Subtarget) {
int SplatIndex = -1;
for (const auto &M : Mask) {
if (M != -1) {
@@ -1815,21 +1913,22 @@ static SDValue lowerVECTOR_SHUFFLE_XVREPLVEI(const SDLoc &DL,
0)) {
APInt Imm(64, SplatIndex);
return DAG.getNode(LoongArchISD::VREPLVEI, DL, VT, V1,
- DAG.getConstant(Imm, DL, MVT::i64));
+ DAG.getConstant(Imm, DL, Subtarget.getGRLenVT()));
}
return SDValue();
}
/// Lower VECTOR_SHUFFLE into XVSHUF4I (if possible).
-static SDValue lowerVECTOR_SHUFFLE_XVSHUF4I(const SDLoc &DL, ArrayRef<int> Mask,
- MVT VT, SDValue V1, SDValue V2,
- SelectionDAG &DAG) {
+static SDValue
+lowerVECTOR_SHUFFLE_XVSHUF4I(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
+ SDValue V1, SDValue V2, SelectionDAG &DAG,
+ const LoongArchSubtarget &Subtarget) {
// When the size is less than or equal to 4, lower cost instructions may be
// used.
if (Mask.size() <= 4)
return SDValue();
- return lowerVECTOR_SHUFFLE_VSHUF4I(DL, Mask, VT, V1, V2, DAG);
+ return lowerVECTOR_SHUFFLE_VSHUF4I(DL, Mask, VT, V1, V2, DAG, Subtarget);
}
/// Lower VECTOR_SHUFFLE into XVPACKEV (if possible).
@@ -2059,15 +2158,15 @@ static SDValue lowerVECTOR_SHUFFLE_XVSHUF(const SDLoc &DL, ArrayRef<int> Mask,
/// cases need to be converted to it for processing.
///
/// This function may modify V1, V2 and Mask
-static void canonicalizeShuffleVectorByLane(const SDLoc &DL,
- MutableArrayRef<int> Mask, MVT VT,
- SDValue &V1, SDValue &V2,
- SelectionDAG &DAG) {
+static void canonicalizeShuffleVectorByLane(
+ const SDLoc &DL, MutableArrayRef<int> Mask, MVT VT, SDValue &V1,
+ SDValue &V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget) {
enum HalfMaskType { HighLaneTy, LowLaneTy, None };
int MaskSize = Mask.size();
int HalfSize = Mask.size() / 2;
+ MVT GRLenVT = Subtarget.getGRLenVT();
HalfMaskType preMask = None, postMask = None;
@@ -2105,13 +2204,13 @@ static void canonicalizeShuffleVectorByLane(const SDLoc &DL,
if (preMask == LowLaneTy && postMask == HighLaneTy) {
V1 = DAG.getBitcast(MVT::v4i64, V1);
V1 = DAG.getNode(LoongArchISD::XVPERMI, DL, MVT::v4i64, V1,
- DAG.getConstant(0b01001110, DL, MVT::i64));
+ DAG.getConstant(0b01001110, DL, GRLenVT));
V1 = DAG.getBitcast(VT, V1);
if (!V2.isUndef()) {
V2 = DAG.getBitcast(MVT::v4i64, V2);
V2 = DAG.getNode(LoongArchISD::XVPERMI, DL, MVT::v4i64, V2,
- DAG.getConstant(0b01001110, DL, MVT::i64));
+ DAG.getConstant(0b01001110, DL, GRLenVT));
V2 = DAG.getBitcast(VT, V2);
}
@@ -2124,13 +2223,13 @@ static void canonicalizeShuffleVectorByLane(const SDLoc &DL,
} else if (preMask == LowLaneTy && postMask == LowLaneTy) {
V1 = DAG.getBitcast(MVT::v4i64, V1);
V1 = DAG.getNode(LoongArchISD::XVPERMI, DL, MVT::v4i64, V1,
- DAG.getConstant(0b11101110, DL, MVT::i64));
+ DAG.getConstant(0b11101110, DL, GRLenVT));
V1 = DAG.getBitcast(VT, V1);
if (!V2.isUndef()) {
V2 = DAG.getBitcast(MVT::v4i64, V2);
V2 = DAG.getNode(LoongArchISD::XVPERMI, DL, MVT::v4i64, V2,
- DAG.getConstant(0b11101110, DL, MVT::i64));
+ DAG.getConstant(0b11101110, DL, GRLenVT));
V2 = DAG.getBitcast(VT, V2);
}
@@ -2140,13 +2239,13 @@ static void canonicalizeShuffleVectorByLane(const SDLoc &DL,
} else if (preMask == HighLaneTy && postMask == HighLaneTy) {
V1 = DAG.getBitcast(MVT::v4i64, V1);
V1 = DAG.getNode(LoongArchISD::XVPERMI, DL, MVT::v4i64, V1,
- DAG.getConstant(0b01000100, DL, MVT::i64));
+ DAG.getConstant(0b01000100, DL, GRLenVT));
V1 = DAG.getBitcast(VT, V1);
if (!V2.isUndef()) {
V2 = DAG.getBitcast(MVT::v4i64, V2);
V2 = DAG.getNode(LoongArchISD::XVPERMI, DL, MVT::v4i64, V2,
- DAG.getConstant(0b01000100, DL, MVT::i64));
+ DAG.getConstant(0b01000100, DL, GRLenVT));
V2 = DAG.getBitcast(VT, V2);
}
@@ -2208,7 +2307,8 @@ static SDValue lowerVECTOR_SHUFFLEAsLanePermuteAndShuffle(const SDLoc &DL,
/// This routine breaks down the specific type of 256-bit shuffle and
/// dispatches to the lowering routines accordingly.
static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
- SDValue V1, SDValue V2, SelectionDAG &DAG) {
+ SDValue V1, SDValue V2, SelectionDAG &DAG,
+ const LoongArchSubtarget &Subtarget) {
assert((VT.SimpleTy == MVT::v32i8 || VT.SimpleTy == MVT::v16i16 ||
VT.SimpleTy == MVT::v8i32 || VT.SimpleTy == MVT::v4i64 ||
VT.SimpleTy == MVT::v8f32 || VT.SimpleTy == MVT::v4f64) &&
@@ -2222,7 +2322,7 @@ static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
// canonicalize non cross-lane shuffle vector
SmallVector<int> NewMask(Mask);
- canonicalizeShuffleVectorByLane(DL, NewMask, VT, V1, V2, DAG);
+ canonicalizeShuffleVectorByLane(DL, NewMask, VT, V1, V2, DAG, Subtarget);
APInt KnownUndef, KnownZero;
computeZeroableShuffleElements(NewMask, V1, V2, KnownUndef, KnownZero);
@@ -2231,9 +2331,11 @@ static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
SDValue Result;
// TODO: Add more comparison patterns.
if (V2.isUndef()) {
- if ((Result = lowerVECTOR_SHUFFLE_XVREPLVEI(DL, NewMask, VT, V1, V2, DAG)))
+ if ((Result = lowerVECTOR_SHUFFLE_XVREPLVEI(DL, NewMask, VT, V1, V2, DAG,
+ Subtarget)))
return Result;
- if ((Result = lowerVECTOR_SHUFFLE_XVSHUF4I(DL, NewMask, VT, V1, V2, DAG)))
+ if ((Result = lowerVECTOR_SHUFFLE_XVSHUF4I(DL, NewMask, VT, V1, V2, DAG,
+ Subtarget)))
return Result;
if ((Result = lowerVECTOR_SHUFFLEAsLanePermuteAndShuffle(DL, NewMask, VT,
V1, V2, DAG)))
@@ -2258,10 +2360,11 @@ static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
return Result;
if ((Result = lowerVECTOR_SHUFFLE_XVPICKOD(DL, NewMask, VT, V1, V2, DAG)))
return Result;
- if ((Result =
- lowerVECTOR_SHUFFLEAsShift(DL, NewMask, VT, V1, V2, DAG, Zeroable)))
+ if ((Result = lowerVECTOR_SHUFFLEAsShift(DL, NewMask, VT, V1, V2, DAG,
+ Subtarget, Zeroable)))
return Result;
- if ((Result = lowerVECTOR_SHUFFLEAsByteRotate(DL, NewMask, VT, V1, V2, DAG)))
+ if ((Result = lowerVECTOR_SHUFFLEAsByteRotate(DL, NewMask, VT, V1, V2, DAG,
+ Subtarget)))
return Result;
if (SDValue NewShuffle = widenShuffleMask(DL, NewMask, VT, V1, V2, DAG))
return NewShuffle;
@@ -2313,10 +2416,10 @@ SDValue LoongArchTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
// For each vector width, delegate to a specialized lowering routine.
if (VT.is128BitVector())
- return lower128BitShuffle(DL, OrigMask, VT, V1, V2, DAG);
+ return lower128BitShuffle(DL, OrigMask, VT, V1, V2, DAG, Subtarget);
if (VT.is256BitVector())
- return lower256BitShuffle(DL, OrigMask, VT, V1, V2, DAG);
+ return lower256BitShuffle(DL, OrigMask, VT, V1, V2, DAG, Subtarget);
return SDValue();
}
@@ -2460,6 +2563,16 @@ SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op,
SplatBitSize != 64)
return SDValue();
+ if (SplatBitSize == 64 && !Subtarget.is64Bit()) {
+ // We can only handle 64-bit elements that are within
+ // the signed 32-bit range on 32-bit targets.
+ if (!SplatValue.isSignedIntN(32))
+ return SDValue();
+ if ((Is128Vec && ResTy == MVT::v4i32) ||
+ (Is256Vec && ResTy == MVT::v8i32))
+ return Op;
+ }
+
EVT ViaVecTy;
switch (SplatBitSize) {
@@ -2786,7 +2899,7 @@ SDValue LoongArchTargetLowering::lowerUINT_TO_FP(SDValue Op,
EVT RetVT = Op.getValueType();
RTLIB::Libcall LC = RTLIB::getUINTTOFP(OpVT, RetVT);
MakeLibCallOptions CallOptions;
- CallOptions.setTypeListBeforeSoften(OpVT, RetVT, true);
+ CallOptions.setTypeListBeforeSoften(OpVT, RetVT);
SDValue Chain = SDValue();
SDValue Result;
std::tie(Result, Chain) =
@@ -2811,7 +2924,7 @@ SDValue LoongArchTargetLowering::lowerSINT_TO_FP(SDValue Op,
EVT RetVT = Op.getValueType();
RTLIB::Libcall LC = RTLIB::getSINTTOFP(OpVT, RetVT);
MakeLibCallOptions CallOptions;
- CallOptions.setTypeListBeforeSoften(OpVT, RetVT, true);
+ CallOptions.setTypeListBeforeSoften(OpVT, RetVT);
SDValue Chain = SDValue();
SDValue Result;
std::tie(Result, Chain) =
@@ -3037,10 +3150,7 @@ SDValue LoongArchTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
// Prepare argument list to generate call.
ArgListTy Args;
- ArgListEntry Entry;
- Entry.Node = Load;
- Entry.Ty = CallTy;
- Args.push_back(Entry);
+ Args.emplace_back(Load, CallTy);
// Setup call to __tls_get_addr.
TargetLowering::CallLoweringInfo CLI(DAG);
@@ -4107,7 +4217,7 @@ void LoongArchTargetLowering::ReplaceNodeResults(
LC = RTLIB::getFPTOSINT(Src.getValueType(), VT);
MakeLibCallOptions CallOptions;
EVT OpVT = Src.getValueType();
- CallOptions.setTypeListBeforeSoften(OpVT, VT, true);
+ CallOptions.setTypeListBeforeSoften(OpVT, VT);
SDValue Chain = SDValue();
SDValue Result;
std::tie(Result, Chain) =
@@ -4360,7 +4470,7 @@ void LoongArchTargetLowering::ReplaceNodeResults(
RTLIB::Libcall LC =
OpVT == MVT::f64 ? RTLIB::LROUND_F64 : RTLIB::LROUND_F32;
MakeLibCallOptions CallOptions;
- CallOptions.setTypeListBeforeSoften(OpVT, MVT::i64, true);
+ CallOptions.setTypeListBeforeSoften(OpVT, MVT::i64);
SDValue Result = makeLibCall(DAG, LC, MVT::i64, Op0, CallOptions, DL).first;
Result = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Result);
Results.push_back(Result);
@@ -4742,13 +4852,29 @@ static SDValue performBITCASTCombine(SDNode *N, SelectionDAG &DAG,
UseLASX = true;
break;
};
- if (UseLASX && !(Subtarget.has32S() && Subtarget.hasExtLASX()))
- return SDValue();
Src = PropagateSExt ? signExtendBitcastSrcVector(DAG, SExtVT, Src, DL)
: DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
- Opc = UseLASX ? LoongArchISD::XVMSKLTZ : LoongArchISD::VMSKLTZ;
- SDValue V = DAG.getNode(Opc, DL, MVT::i64, Src);
+ SDValue V;
+ if (!Subtarget.has32S() || !Subtarget.hasExtLASX()) {
+ if (Src.getSimpleValueType() == MVT::v32i8) {
+ SDValue Lo, Hi;
+ std::tie(Lo, Hi) = DAG.SplitVector(Src, DL);
+ Lo = DAG.getNode(LoongArchISD::VMSKLTZ, DL, MVT::i64, Lo);
+ Hi = DAG.getNode(LoongArchISD::VMSKLTZ, DL, MVT::i64, Hi);
+ Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
+ DAG.getConstant(16, DL, MVT::i8));
+ V = DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
+ } else if (UseLASX) {
+ return SDValue();
+ }
+ }
+
+ if (!V) {
+ Opc = UseLASX ? LoongArchISD::XVMSKLTZ : LoongArchISD::VMSKLTZ;
+ V = DAG.getNode(Opc, DL, MVT::i64, Src);
+ }
+
EVT T = EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements());
V = DAG.getZExtOrTrunc(V, DL, T);
return DAG.getBitcast(VT, V);
@@ -5156,6 +5282,145 @@ static SDValue performBITREV_WCombine(SDNode *N, SelectionDAG &DAG,
Src.getOperand(0));
}
+// Perform common combines for BR_CC and SELECT_CC conditions.
+static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL,
+ SelectionDAG &DAG, const LoongArchSubtarget &Subtarget) {
+ ISD::CondCode CCVal = cast<CondCodeSDNode>(CC)->get();
+
+ // As far as arithmetic right shift always saves the sign,
+ // shift can be omitted.
+ // Fold setlt (sra X, N), 0 -> setlt X, 0 and
+ // setge (sra X, N), 0 -> setge X, 0
+ if (isNullConstant(RHS) && (CCVal == ISD::SETGE || CCVal == ISD::SETLT) &&
+ LHS.getOpcode() == ISD::SRA) {
+ LHS = LHS.getOperand(0);
+ return true;
+ }
+
+ if (!ISD::isIntEqualitySetCC(CCVal))
+ return false;
+
+ // Fold ((setlt X, Y), 0, ne) -> (X, Y, lt)
+ // Sometimes the setcc is introduced after br_cc/select_cc has been formed.
+ if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
+ LHS.getOperand(0).getValueType() == Subtarget.getGRLenVT()) {
+ // If we're looking for eq 0 instead of ne 0, we need to invert the
+ // condition.
+ bool Invert = CCVal == ISD::SETEQ;
+ CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
+ if (Invert)
+ CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
+
+ RHS = LHS.getOperand(1);
+ LHS = LHS.getOperand(0);
+ translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
+
+ CC = DAG.getCondCode(CCVal);
+ return true;
+ }
+
+ // Fold ((srl (and X, 1<<C), C), 0, eq/ne) -> ((shl X, GRLen-1-C), 0, ge/lt)
+ if (isNullConstant(RHS) && LHS.getOpcode() == ISD::SRL && LHS.hasOneUse() &&
+ LHS.getOperand(1).getOpcode() == ISD::Constant) {
+ SDValue LHS0 = LHS.getOperand(0);
+ if (LHS0.getOpcode() == ISD::AND &&
+ LHS0.getOperand(1).getOpcode() == ISD::Constant) {
+ uint64_t Mask = LHS0.getConstantOperandVal(1);
+ uint64_t ShAmt = LHS.getConstantOperandVal(1);
+ if (isPowerOf2_64(Mask) && Log2_64(Mask) == ShAmt) {
+ CCVal = CCVal == ISD::SETEQ ? ISD::SETGE : ISD::SETLT;
+ CC = DAG.getCondCode(CCVal);
+
+ ShAmt = LHS.getValueSizeInBits() - 1 - ShAmt;
+ LHS = LHS0.getOperand(0);
+ if (ShAmt != 0)
+ LHS =
+ DAG.getNode(ISD::SHL, DL, LHS.getValueType(), LHS0.getOperand(0),
+ DAG.getConstant(ShAmt, DL, LHS.getValueType()));
+ return true;
+ }
+ }
+ }
+
+ // (X, 1, setne) -> (X, 0, seteq) if we can prove X is 0/1.
+ // This can occur when legalizing some floating point comparisons.
+ APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
+ if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
+ CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
+ CC = DAG.getCondCode(CCVal);
+ RHS = DAG.getConstant(0, DL, LHS.getValueType());
+ return true;
+ }
+
+ return false;
+}
+
+static SDValue performBR_CCCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const LoongArchSubtarget &Subtarget) {
+ SDValue LHS = N->getOperand(1);
+ SDValue RHS = N->getOperand(2);
+ SDValue CC = N->getOperand(3);
+ SDLoc DL(N);
+
+ if (combine_CC(LHS, RHS, CC, DL, DAG, Subtarget))
+ return DAG.getNode(LoongArchISD::BR_CC, DL, N->getValueType(0),
+ N->getOperand(0), LHS, RHS, CC, N->getOperand(4));
+
+ return SDValue();
+}
+
+static SDValue performSELECT_CCCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const LoongArchSubtarget &Subtarget) {
+ // Transform
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+ SDValue CC = N->getOperand(2);
+ ISD::CondCode CCVal = cast<CondCodeSDNode>(CC)->get();
+ SDValue TrueV = N->getOperand(3);
+ SDValue FalseV = N->getOperand(4);
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+
+ // If the True and False values are the same, we don't need a select_cc.
+ if (TrueV == FalseV)
+ return TrueV;
+
+ // (select (x < 0), y, z) -> x >> (GRLEN - 1) & (y - z) + z
+ // (select (x >= 0), y, z) -> x >> (GRLEN - 1) & (z - y) + y
+ if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
+ isNullConstant(RHS) &&
+ (CCVal == ISD::CondCode::SETLT || CCVal == ISD::CondCode::SETGE)) {
+ if (CCVal == ISD::CondCode::SETGE)
+ std::swap(TrueV, FalseV);
+
+ int64_t TrueSImm = cast<ConstantSDNode>(TrueV)->getSExtValue();
+ int64_t FalseSImm = cast<ConstantSDNode>(FalseV)->getSExtValue();
+ // Only handle simm12, if it is not in this range, it can be considered as
+ // register.
+ if (isInt<12>(TrueSImm) && isInt<12>(FalseSImm) &&
+ isInt<12>(TrueSImm - FalseSImm)) {
+ SDValue SRA =
+ DAG.getNode(ISD::SRA, DL, VT, LHS,
+ DAG.getConstant(Subtarget.getGRLen() - 1, DL, VT));
+ SDValue AND =
+ DAG.getNode(ISD::AND, DL, VT, SRA,
+ DAG.getSignedConstant(TrueSImm - FalseSImm, DL, VT));
+ return DAG.getNode(ISD::ADD, DL, VT, AND, FalseV);
+ }
+
+ if (CCVal == ISD::CondCode::SETGE)
+ std::swap(TrueV, FalseV);
+ }
+
+ if (combine_CC(LHS, RHS, CC, DL, DAG, Subtarget))
+ return DAG.getNode(LoongArchISD::SELECT_CC, DL, N->getValueType(0),
+ {LHS, RHS, CC, TrueV, FalseV});
+
+ return SDValue();
+}
+
template <unsigned N>
static SDValue legalizeIntrinsicImmArg(SDNode *Node, unsigned ImmOp,
SelectionDAG &DAG,
@@ -5848,6 +6113,10 @@ SDValue LoongArchTargetLowering::PerformDAGCombine(SDNode *N,
return performBITCASTCombine(N, DAG, DCI, Subtarget);
case LoongArchISD::BITREV_W:
return performBITREV_WCombine(N, DAG, DCI, Subtarget);
+ case LoongArchISD::BR_CC:
+ return performBR_CCCombine(N, DAG, DCI, Subtarget);
+ case LoongArchISD::SELECT_CC:
+ return performSELECT_CCCombine(N, DAG, DCI, Subtarget);
case ISD::INTRINSIC_WO_CHAIN:
return performINTRINSIC_WO_CHAINCombine(N, DAG, DCI, Subtarget);
case LoongArchISD::MOVGR2FR_W_LA64:
@@ -6042,17 +6311,20 @@ static MachineBasicBlock *
emitPseudoXVINSGR2VR(MachineInstr &MI, MachineBasicBlock *BB,
const LoongArchSubtarget &Subtarget) {
unsigned InsOp;
+ unsigned BroadcastOp;
unsigned HalfSize;
switch (MI.getOpcode()) {
default:
llvm_unreachable("Unexpected opcode");
case LoongArch::PseudoXVINSGR2VR_B:
HalfSize = 16;
- InsOp = LoongArch::VINSGR2VR_B;
+ BroadcastOp = LoongArch::XVREPLGR2VR_B;
+ InsOp = LoongArch::XVEXTRINS_B;
break;
case LoongArch::PseudoXVINSGR2VR_H:
HalfSize = 8;
- InsOp = LoongArch::VINSGR2VR_H;
+ BroadcastOp = LoongArch::XVREPLGR2VR_H;
+ InsOp = LoongArch::XVEXTRINS_H;
break;
}
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
@@ -6066,37 +6338,41 @@ emitPseudoXVINSGR2VR(MachineInstr &MI, MachineBasicBlock *BB,
Register Elt = MI.getOperand(2).getReg();
unsigned Idx = MI.getOperand(3).getImm();
- Register ScratchReg1 = XSrc;
- if (Idx >= HalfSize) {
- ScratchReg1 = MRI.createVirtualRegister(RC);
- BuildMI(*BB, MI, DL, TII->get(LoongArch::XVPERMI_D), ScratchReg1)
- .addReg(XSrc)
- .addImm(14);
- }
+ if (XSrc.isVirtual() && MRI.getVRegDef(XSrc)->isImplicitDef() &&
+ Idx < HalfSize) {
+ Register ScratchSubReg1 = MRI.createVirtualRegister(SubRC);
+ Register ScratchSubReg2 = MRI.createVirtualRegister(SubRC);
- Register ScratchSubReg1 = MRI.createVirtualRegister(SubRC);
- Register ScratchSubReg2 = MRI.createVirtualRegister(SubRC);
- BuildMI(*BB, MI, DL, TII->get(LoongArch::COPY), ScratchSubReg1)
- .addReg(ScratchReg1, 0, LoongArch::sub_128);
- BuildMI(*BB, MI, DL, TII->get(InsOp), ScratchSubReg2)
- .addReg(ScratchSubReg1)
- .addReg(Elt)
- .addImm(Idx >= HalfSize ? Idx - HalfSize : Idx);
+ BuildMI(*BB, MI, DL, TII->get(LoongArch::COPY), ScratchSubReg1)
+ .addReg(XSrc, 0, LoongArch::sub_128);
+ BuildMI(*BB, MI, DL,
+ TII->get(HalfSize == 8 ? LoongArch::VINSGR2VR_H
+ : LoongArch::VINSGR2VR_B),
+ ScratchSubReg2)
+ .addReg(ScratchSubReg1)
+ .addReg(Elt)
+ .addImm(Idx);
+
+ BuildMI(*BB, MI, DL, TII->get(LoongArch::SUBREG_TO_REG), XDst)
+ .addImm(0)
+ .addReg(ScratchSubReg2)
+ .addImm(LoongArch::sub_128);
+ } else {
+ Register ScratchReg1 = MRI.createVirtualRegister(RC);
+ Register ScratchReg2 = MRI.createVirtualRegister(RC);
- Register ScratchReg2 = XDst;
- if (Idx >= HalfSize)
- ScratchReg2 = MRI.createVirtualRegister(RC);
+ BuildMI(*BB, MI, DL, TII->get(BroadcastOp), ScratchReg1).addReg(Elt);
- BuildMI(*BB, MI, DL, TII->get(LoongArch::SUBREG_TO_REG), ScratchReg2)
- .addImm(0)
- .addReg(ScratchSubReg2)
- .addImm(LoongArch::sub_128);
+ BuildMI(*BB, MI, DL, TII->get(LoongArch::XVPERMI_Q), ScratchReg2)
+ .addReg(ScratchReg1)
+ .addReg(XSrc)
+ .addImm(Idx >= HalfSize ? 48 : 18);
- if (Idx >= HalfSize)
- BuildMI(*BB, MI, DL, TII->get(LoongArch::XVPERMI_Q), XDst)
+ BuildMI(*BB, MI, DL, TII->get(InsOp), XDst)
.addReg(XSrc)
.addReg(ScratchReg2)
- .addImm(2);
+ .addImm((Idx >= HalfSize ? Idx - HalfSize : Idx) * 17);
+ }
MI.eraseFromParent();
return BB;
@@ -6570,6 +6846,8 @@ const char *LoongArchTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(TAIL_MEDIUM)
NODE_NAME_CASE(TAIL_LARGE)
NODE_NAME_CASE(SELECT_CC)
+ NODE_NAME_CASE(BR_CC)
+ NODE_NAME_CASE(BRCOND)
NODE_NAME_CASE(SLL_W)
NODE_NAME_CASE(SRA_W)
NODE_NAME_CASE(SRL_W)
@@ -6654,6 +6932,7 @@ const char *LoongArchTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(XVMSKGEZ)
NODE_NAME_CASE(XVMSKEQZ)
NODE_NAME_CASE(XVMSKNEZ)
+ NODE_NAME_CASE(VHADDW)
}
#undef NODE_NAME_CASE
return nullptr;
@@ -6729,8 +7008,7 @@ static bool CC_LoongArchAssign2GRLen(unsigned GRLen, CCState &State,
static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI,
unsigned ValNo, MVT ValVT,
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
- CCState &State, bool IsFixed, bool IsRet,
- Type *OrigTy) {
+ CCState &State, bool IsRet, Type *OrigTy) {
unsigned GRLen = DL.getLargestLegalIntTypeSizeInBits();
assert((GRLen == 32 || GRLen == 64) && "Unspport GRLen");
MVT GRLenVT = GRLen == 32 ? MVT::i32 : MVT::i64;
@@ -6752,7 +7030,7 @@ static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI,
case LoongArchABI::ABI_LP64F:
case LoongArchABI::ABI_ILP32D:
case LoongArchABI::ABI_LP64D:
- UseGPRForFloat = !IsFixed;
+ UseGPRForFloat = ArgFlags.isVarArg();
break;
case LoongArchABI::ABI_ILP32S:
case LoongArchABI::ABI_LP64S:
@@ -6766,7 +7044,8 @@ static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI,
// will not be passed by registers if the original type is larger than
// 2*GRLen, so the register alignment rule does not apply.
unsigned TwoGRLenInBytes = (2 * GRLen) / 8;
- if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoGRLenInBytes &&
+ if (ArgFlags.isVarArg() &&
+ ArgFlags.getNonZeroOrigAlign() == TwoGRLenInBytes &&
DL.getTypeAllocSize(OrigTy) == TwoGRLenInBytes) {
unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
// Skip 'odd' register if necessary.
@@ -6916,7 +7195,7 @@ void LoongArchTargetLowering::analyzeInputArgs(
LoongArchABI::ABI ABI =
MF.getSubtarget<LoongArchSubtarget>().getTargetABI();
if (Fn(MF.getDataLayout(), ABI, i, ArgVT, CCValAssign::Full, Ins[i].Flags,
- CCInfo, /*IsFixed=*/true, IsRet, ArgTy)) {
+ CCInfo, IsRet, ArgTy)) {
LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type " << ArgVT
<< '\n');
llvm_unreachable("");
@@ -6934,7 +7213,7 @@ void LoongArchTargetLowering::analyzeOutputArgs(
LoongArchABI::ABI ABI =
MF.getSubtarget<LoongArchSubtarget>().getTargetABI();
if (Fn(MF.getDataLayout(), ABI, i, ArgVT, CCValAssign::Full, Outs[i].Flags,
- CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
+ CCInfo, IsRet, OrigTy)) {
LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " << ArgVT
<< "\n");
llvm_unreachable("");
@@ -7073,7 +7352,8 @@ static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
static bool CC_LoongArch_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State) {
+ ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
+ CCState &State) {
if (LocVT == MVT::i32 || LocVT == MVT::i64) {
// Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, SpLim
// s0 s1 s2 s3 s4 s5 s6 s7 s8
@@ -7126,6 +7406,7 @@ SDValue LoongArchTargetLowering::LowerFormalArguments(
llvm_unreachable("Unsupported calling convention");
case CallingConv::C:
case CallingConv::Fast:
+ case CallingConv::PreserveMost:
break;
case CallingConv::GHC:
if (!MF.getSubtarget().hasFeature(LoongArch::FeatureBasicF) ||
@@ -7647,8 +7928,7 @@ bool LoongArchTargetLowering::CanLowerReturn(
LoongArchABI::ABI ABI =
MF.getSubtarget<LoongArchSubtarget>().getTargetABI();
if (CC_LoongArch(MF.getDataLayout(), ABI, i, Outs[i].VT, CCValAssign::Full,
- Outs[i].Flags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true,
- nullptr))
+ Outs[i].Flags, CCInfo, /*IsRet=*/true, nullptr))
return false;
}
return true;
@@ -7888,7 +8168,7 @@ LoongArchTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
if (Size < 32 && (AI->getOperation() == AtomicRMWInst::And ||
AI->getOperation() == AtomicRMWInst::Or ||
AI->getOperation() == AtomicRMWInst::Xor))
- return AtomicExpansionKind::Expand;
+ return AtomicExpansionKind::CustomExpand;
if (AI->getOperation() == AtomicRMWInst::Nand || Size < 32)
return AtomicExpansionKind::CmpXChg;
}