aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp314
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/alu32.ll276
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/condops.ll2284
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/div.ll696
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/imm.ll2741
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/mem.ll92
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/mem64.ll341
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rem.ll390
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll877
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zba.ll1937
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb-intrinsic.ll77
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb-zbkb.ll575
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll1051
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbc-intrinsic.ll42
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbc-zbkc-intrinsic.ll67
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbkb-intrinsic.ll73
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbkb.ll370
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbs.ll1159
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/sadd_sat.ll151
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/sadd_sat_plus.ll185
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/ssub_sat.ll151
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/ssub_sat_plus.ll185
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/uadd_sat.ll120
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/uadd_sat_plus.ll141
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/usub_sat.ll113
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/usub_sat_plus.ll131
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/vararg.ll1391
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/xaluo.ll2609
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/xtheadmac.ll123
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/xtheadmemidx.ll717
-rw-r--r--llvm/test/CodeGen/RISCV/shl-cttz.ll334
31 files changed, 136 insertions, 19577 deletions
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index d8451a6..d6d96dd 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -76,10 +76,6 @@ static cl::opt<int>
"use for creating a floating-point immediate value"),
cl::init(2));
-static cl::opt<bool>
- RV64LegalI32("riscv-experimental-rv64-legal-i32", cl::ReallyHidden,
- cl::desc("Make i32 a legal type for SelectionDAG on RV64."));
-
RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
const RISCVSubtarget &STI)
: TargetLowering(TM), Subtarget(STI) {
@@ -119,8 +115,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
// Set up the register classes.
addRegisterClass(XLenVT, &RISCV::GPRRegClass);
- if (Subtarget.is64Bit() && RV64LegalI32)
- addRegisterClass(MVT::i32, &RISCV::GPRRegClass);
if (Subtarget.hasStdExtZfhmin())
addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
@@ -243,12 +237,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, XLenVT, Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::BR_CC, MVT::i32, Expand);
setOperationAction(ISD::BRCOND, MVT::Other, Custom);
setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
setCondCodeAction(ISD::SETGT, XLenVT, Custom);
setCondCodeAction(ISD::SETGE, XLenVT, Expand);
@@ -259,15 +249,10 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setCondCodeAction(ISD::SETLE, XLenVT, Expand);
}
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::SETCC, MVT::i32, Promote);
-
setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand);
setOperationAction(ISD::VASTART, MVT::Other, Custom);
setOperationAction({ISD::VAARG, ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::VAARG, MVT::i32, Promote);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
@@ -280,54 +265,30 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
if (Subtarget.is64Bit()) {
setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
- if (!RV64LegalI32) {
- setOperationAction(ISD::LOAD, MVT::i32, Custom);
- setOperationAction({ISD::ADD, ISD::SUB, ISD::SHL, ISD::SRA, ISD::SRL},
- MVT::i32, Custom);
- setOperationAction({ISD::UADDO, ISD::USUBO, ISD::UADDSAT, ISD::USUBSAT},
- MVT::i32, Custom);
- if (!Subtarget.hasStdExtZbb())
- setOperationAction({ISD::SADDSAT, ISD::SSUBSAT}, MVT::i32, Custom);
- } else {
- setOperationAction(ISD::SSUBO, MVT::i32, Custom);
- if (Subtarget.hasStdExtZbb()) {
- setOperationAction({ISD::SADDSAT, ISD::SSUBSAT}, MVT::i32, Custom);
- setOperationAction({ISD::UADDSAT, ISD::USUBSAT}, MVT::i32, Custom);
- }
- }
+ setOperationAction(ISD::LOAD, MVT::i32, Custom);
+ setOperationAction({ISD::ADD, ISD::SUB, ISD::SHL, ISD::SRA, ISD::SRL},
+ MVT::i32, Custom);
+ setOperationAction({ISD::UADDO, ISD::USUBO, ISD::UADDSAT, ISD::USUBSAT},
+ MVT::i32, Custom);
+ if (!Subtarget.hasStdExtZbb())
+ setOperationAction({ISD::SADDSAT, ISD::SSUBSAT}, MVT::i32, Custom);
setOperationAction(ISD::SADDO, MVT::i32, Custom);
}
if (!Subtarget.hasStdExtZmmul()) {
setOperationAction({ISD::MUL, ISD::MULHS, ISD::MULHU}, XLenVT, Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::MUL, MVT::i32, Promote);
} else if (Subtarget.is64Bit()) {
setOperationAction(ISD::MUL, MVT::i128, Custom);
- if (!RV64LegalI32)
- setOperationAction(ISD::MUL, MVT::i32, Custom);
- else
- setOperationAction(ISD::SMULO, MVT::i32, Custom);
+ setOperationAction(ISD::MUL, MVT::i32, Custom);
} else {
setOperationAction(ISD::MUL, MVT::i64, Custom);
}
if (!Subtarget.hasStdExtM()) {
- setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM},
- XLenVT, Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM}, MVT::i32,
- Promote);
+ setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM}, XLenVT,
+ Expand);
} else if (Subtarget.is64Bit()) {
- if (!RV64LegalI32)
- setOperationAction({ISD::SDIV, ISD::UDIV, ISD::UREM},
- {MVT::i8, MVT::i16, MVT::i32}, Custom);
- }
-
- if (RV64LegalI32 && Subtarget.is64Bit()) {
- setOperationAction({ISD::MULHS, ISD::MULHU}, MVT::i32, Expand);
- setOperationAction(
- {ISD::SDIVREM, ISD::UDIVREM, ISD::SMUL_LOHI, ISD::UMUL_LOHI}, MVT::i32,
- Expand);
+ setOperationAction({ISD::SDIV, ISD::UDIV, ISD::UREM},
+ {MVT::i8, MVT::i16, MVT::i32}, Custom);
}
setOperationAction(
@@ -338,7 +299,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
Custom);
if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) {
- if (!RV64LegalI32 && Subtarget.is64Bit())
+ if (Subtarget.is64Bit())
setOperationAction({ISD::ROTL, ISD::ROTR}, MVT::i32, Custom);
} else if (Subtarget.hasVendorXTHeadBb()) {
if (Subtarget.is64Bit())
@@ -348,8 +309,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::ROTL, XLenVT, Expand);
} else {
setOperationAction({ISD::ROTL, ISD::ROTR}, XLenVT, Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction({ISD::ROTL, ISD::ROTR}, MVT::i32, Expand);
}
// With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
@@ -359,13 +318,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
Subtarget.hasVendorXTHeadBb())
? Legal
: Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::BSWAP, MVT::i32,
- (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb() ||
- Subtarget.hasVendorXTHeadBb())
- ? Promote
- : Expand);
-
if (Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit()) {
setOperationAction(ISD::BITREVERSE, XLenVT, Legal);
@@ -379,42 +331,24 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
(Subtarget.hasVendorXCValu() && !Subtarget.is64Bit())) {
setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, XLenVT,
Legal);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, MVT::i32,
- Promote);
}
if (Subtarget.hasStdExtZbb() ||
(Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit())) {
- if (Subtarget.is64Bit()) {
- if (RV64LegalI32)
- setOperationAction(ISD::CTTZ, MVT::i32, Legal);
- else
- setOperationAction({ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF}, MVT::i32, Custom);
- }
+ if (Subtarget.is64Bit())
+ setOperationAction({ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF}, MVT::i32, Custom);
} else {
setOperationAction({ISD::CTTZ, ISD::CTPOP}, XLenVT, Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction({ISD::CTTZ, ISD::CTPOP}, MVT::i32, Expand);
}
if (Subtarget.hasStdExtZbb() || Subtarget.hasVendorXTHeadBb() ||
(Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit())) {
// We need the custom lowering to make sure that the resulting sequence
// for the 32bit case is efficient on 64bit targets.
- if (Subtarget.is64Bit()) {
- if (RV64LegalI32) {
- setOperationAction(ISD::CTLZ, MVT::i32,
- Subtarget.hasStdExtZbb() ? Legal : Promote);
- if (!Subtarget.hasStdExtZbb())
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Promote);
- } else
- setOperationAction({ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, MVT::i32, Custom);
- }
+ if (Subtarget.is64Bit())
+ setOperationAction({ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, MVT::i32, Custom);
} else {
setOperationAction(ISD::CTLZ, XLenVT, Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::CTLZ, MVT::i32, Expand);
}
if (Subtarget.hasVendorXCValu() && !Subtarget.is64Bit()) {
@@ -422,15 +356,12 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
} else if (Subtarget.hasShortForwardBranchOpt()) {
// We can use PseudoCCSUB to implement ABS.
setOperationAction(ISD::ABS, XLenVT, Legal);
- } else if (!RV64LegalI32 && Subtarget.is64Bit()) {
+ } else if (Subtarget.is64Bit()) {
setOperationAction(ISD::ABS, MVT::i32, Custom);
}
- if (!Subtarget.hasVendorXTHeadCondMov()) {
+ if (!Subtarget.hasVendorXTHeadCondMov())
setOperationAction(ISD::SELECT, XLenVT, Custom);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::SELECT, MVT::i32, Promote);
- }
static const unsigned FPLegalNodeTypes[] = {
ISD::FMINNUM, ISD::FMAXNUM, ISD::LRINT,
@@ -614,11 +545,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
ISD::STRICT_UINT_TO_FP, ISD::STRICT_SINT_TO_FP},
XLenVT, Legal);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction({ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT,
- ISD::STRICT_UINT_TO_FP, ISD::STRICT_SINT_TO_FP},
- MVT::i32, Legal);
-
setOperationAction(ISD::GET_ROUNDING, XLenVT, Custom);
setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
}
@@ -673,8 +599,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setBooleanVectorContents(ZeroOrOneBooleanContent);
setOperationAction(ISD::VSCALE, XLenVT, Custom);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::VSCALE, MVT::i32, Custom);
// RVV intrinsics may have illegal operands.
// We also need to custom legalize vmv.x.s.
@@ -1413,11 +1337,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
}
}
- if (Subtarget.hasStdExtA()) {
+ if (Subtarget.hasStdExtA())
setOperationAction(ISD::ATOMIC_LOAD_SUB, XLenVT, Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand);
- }
if (Subtarget.hasForcedAtomics()) {
// Force __sync libcalls to be emitted for atomic rmw/cas operations.
@@ -2340,9 +2261,6 @@ MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
MVT PartVT = TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
- if (RV64LegalI32 && Subtarget.is64Bit() && PartVT == MVT::i32)
- return MVT::i64;
-
return PartVT;
}
@@ -2364,12 +2282,6 @@ unsigned RISCVTargetLowering::getVectorTypeBreakdownForCallingConv(
unsigned NumRegs = TargetLowering::getVectorTypeBreakdownForCallingConv(
Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
- if (RV64LegalI32 && Subtarget.is64Bit() && IntermediateVT == MVT::i32)
- IntermediateVT = MVT::i64;
-
- if (RV64LegalI32 && Subtarget.is64Bit() && RegisterVT == MVT::i32)
- RegisterVT = MVT::i64;
-
return NumRegs;
}
@@ -5681,78 +5593,6 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
return Op;
}
-static SDValue lowerSADDSAT_SSUBSAT(SDValue Op, SelectionDAG &DAG) {
- assert(Op.getValueType() == MVT::i32 && RV64LegalI32 &&
- "Unexpected custom legalisation");
-
- // With Zbb, we can widen to i64 and smin/smax with INT32_MAX/MIN.
- bool IsAdd = Op.getOpcode() == ISD::SADDSAT;
- SDLoc DL(Op);
- SDValue LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(0));
- SDValue RHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue Result =
- DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
-
- APInt MinVal = APInt::getSignedMinValue(32).sext(64);
- APInt MaxVal = APInt::getSignedMaxValue(32).sext(64);
- SDValue SatMin = DAG.getConstant(MinVal, DL, MVT::i64);
- SDValue SatMax = DAG.getConstant(MaxVal, DL, MVT::i64);
- Result = DAG.getNode(ISD::SMIN, DL, MVT::i64, Result, SatMax);
- Result = DAG.getNode(ISD::SMAX, DL, MVT::i64, Result, SatMin);
- return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Result);
-}
-
-static SDValue lowerUADDSAT_USUBSAT(SDValue Op, SelectionDAG &DAG) {
- assert(Op.getValueType() == MVT::i32 && RV64LegalI32 &&
- "Unexpected custom legalisation");
-
- // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
- // sign extend allows overflow of the lower 32 bits to be detected on
- // the promoted size.
- SDLoc DL(Op);
- SDValue LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(0));
- SDValue RHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue WideOp = DAG.getNode(Op.getOpcode(), DL, MVT::i64, LHS, RHS);
- return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, WideOp);
-}
-
-// Custom lower i32 SADDO/SSUBO with RV64LegalI32 so we take advantage of addw.
-static SDValue lowerSADDO_SSUBO(SDValue Op, SelectionDAG &DAG) {
- assert(Op.getValueType() == MVT::i32 && RV64LegalI32 &&
- "Unexpected custom legalisation");
- if (isa<ConstantSDNode>(Op.getOperand(1)))
- return SDValue();
-
- bool IsAdd = Op.getOpcode() == ISD::SADDO;
- SDLoc DL(Op);
- SDValue LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(0));
- SDValue RHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue WideOp =
- DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
- SDValue Res = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, WideOp);
- SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, WideOp,
- DAG.getValueType(MVT::i32));
- SDValue Ovf = DAG.getSetCC(DL, Op.getValue(1).getValueType(), WideOp, SExt,
- ISD::SETNE);
- return DAG.getMergeValues({Res, Ovf}, DL);
-}
-
-// Custom lower i32 SMULO with RV64LegalI32 so we take advantage of mulw.
-static SDValue lowerSMULO(SDValue Op, SelectionDAG &DAG) {
- assert(Op.getValueType() == MVT::i32 && RV64LegalI32 &&
- "Unexpected custom legalisation");
- SDLoc DL(Op);
- SDValue LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(0));
- SDValue RHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS);
- SDValue Res = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
- SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Mul,
- DAG.getValueType(MVT::i32));
- SDValue Ovf = DAG.getSetCC(DL, Op.getValue(1).getValueType(), Mul, SExt,
- ISD::SETNE);
- return DAG.getMergeValues({Res, Ovf}, DL);
-}
-
SDValue RISCVTargetLowering::LowerIS_FPCLASS(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
@@ -6267,11 +6107,6 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
return lowerFRAMEADDR(Op, DAG);
case ISD::RETURNADDR:
return lowerRETURNADDR(Op, DAG);
- case ISD::SADDO:
- case ISD::SSUBO:
- return lowerSADDO_SSUBO(Op, DAG);
- case ISD::SMULO:
- return lowerSMULO(Op, DAG);
case ISD::SHL_PARTS:
return lowerShiftLeftParts(Op, DAG);
case ISD::SRA_PARTS:
@@ -6710,7 +6545,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
RTLIB::getFPROUND(Op.getOperand(0).getValueType(), MVT::bf16);
SDValue Res =
makeLibCall(DAG, LC, MVT::f32, Op.getOperand(0), CallOptions, DL).first;
- if (Subtarget.is64Bit() && !RV64LegalI32)
+ if (Subtarget.is64Bit())
return DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Res);
return DAG.getBitcast(MVT::i32, Res);
}
@@ -6739,7 +6574,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
RTLIB::getFPROUND(Op.getOperand(0).getValueType(), MVT::f16);
SDValue Res =
makeLibCall(DAG, LC, MVT::f32, Op.getOperand(0), CallOptions, DL).first;
- if (Subtarget.is64Bit() && !RV64LegalI32)
+ if (Subtarget.is64Bit())
return DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Res);
return DAG.getBitcast(MVT::i32, Res);
}
@@ -7033,13 +6868,9 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
return lowerToScalableOp(Op, DAG);
case ISD::UADDSAT:
case ISD::USUBSAT:
- if (!Op.getValueType().isVector())
- return lowerUADDSAT_USUBSAT(Op, DAG);
return lowerToScalableOp(Op, DAG);
case ISD::SADDSAT:
case ISD::SSUBSAT:
- if (!Op.getValueType().isVector())
- return lowerSADDSAT_SSUBSAT(Op, DAG);
return lowerToScalableOp(Op, DAG);
case ISD::ABDS:
case ISD::ABDU: {
@@ -9098,13 +8929,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::riscv_sm3p1: Opc = RISCVISD::SM3P1; break;
}
- if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
- SDValue NewOp =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp);
- return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);
- }
-
return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1));
}
case Intrinsic::riscv_sm4ks:
@@ -9112,16 +8936,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
unsigned Opc =
IntNo == Intrinsic::riscv_sm4ks ? RISCVISD::SM4KS : RISCVISD::SM4ED;
- if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
- SDValue NewOp0 =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue NewOp1 =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(2));
- SDValue Res =
- DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, Op.getOperand(3));
- return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);
- }
-
return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2),
Op.getOperand(3));
}
@@ -9131,63 +8945,21 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
IntNo == Intrinsic::riscv_zip ? RISCVISD::ZIP : RISCVISD::UNZIP;
return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1));
}
- case Intrinsic::riscv_mopr: {
- if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
- SDValue NewOp =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue Res = DAG.getNode(
- RISCVISD::MOPR, DL, MVT::i64, NewOp,
- DAG.getTargetConstant(Op.getConstantOperandVal(2), DL, MVT::i64));
- return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);
- }
+ case Intrinsic::riscv_mopr:
return DAG.getNode(RISCVISD::MOPR, DL, XLenVT, Op.getOperand(1),
Op.getOperand(2));
- }
case Intrinsic::riscv_moprr: {
- if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
- SDValue NewOp0 =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue NewOp1 =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(2));
- SDValue Res = DAG.getNode(
- RISCVISD::MOPRR, DL, MVT::i64, NewOp0, NewOp1,
- DAG.getTargetConstant(Op.getConstantOperandVal(3), DL, MVT::i64));
- return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);
- }
return DAG.getNode(RISCVISD::MOPRR, DL, XLenVT, Op.getOperand(1),
Op.getOperand(2), Op.getOperand(3));
}
case Intrinsic::riscv_clmul:
- if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
- SDValue NewOp0 =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue NewOp1 =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(2));
- SDValue Res = DAG.getNode(RISCVISD::CLMUL, DL, MVT::i64, NewOp0, NewOp1);
- return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);
- }
return DAG.getNode(RISCVISD::CLMUL, DL, XLenVT, Op.getOperand(1),
Op.getOperand(2));
case Intrinsic::riscv_clmulh:
case Intrinsic::riscv_clmulr: {
unsigned Opc =
IntNo == Intrinsic::riscv_clmulh ? RISCVISD::CLMULH : RISCVISD::CLMULR;
- if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
- SDValue NewOp0 =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue NewOp1 =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(2));
- NewOp0 = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0,
- DAG.getConstant(32, DL, MVT::i64));
- NewOp1 = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp1,
- DAG.getConstant(32, DL, MVT::i64));
- SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1);
- Res = DAG.getNode(ISD::SRL, DL, MVT::i64, Res,
- DAG.getConstant(32, DL, MVT::i64));
- return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);
- }
-
return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
}
case Intrinsic::experimental_get_vector_length:
@@ -11425,9 +11197,6 @@ RISCVTargetLowering::lowerVPSpliceExperimental(SDValue Op,
Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
}
- // EVL1 may need to be extended to XLenVT with RV64LegalI32.
- EVL1 = DAG.getNode(ISD::ZERO_EXTEND, DL, XLenVT, EVL1);
-
bool IsMaskVector = VT.getVectorElementType() == MVT::i1;
if (IsMaskVector) {
ContainerVT = ContainerVT.changeVectorElementType(MVT::i8);
@@ -13538,7 +13307,7 @@ static SDValue performTRUNCATECombine(SDNode *N, SelectionDAG &DAG,
// shift amounts larger than 31 would produce poison. If we wait until
// type legalization, we'll create RISCVISD::SRLW and we can't recover it
// to use a BEXT instruction.
- if (!RV64LegalI32 && Subtarget.is64Bit() && Subtarget.hasStdExtZbs() && VT == MVT::i1 &&
+ if (Subtarget.is64Bit() && Subtarget.hasStdExtZbs() && VT == MVT::i1 &&
N0.getValueType() == MVT::i32 && N0.getOpcode() == ISD::SRL &&
!isa<ConstantSDNode>(N0.getOperand(1)) && N0.hasOneUse()) {
SDLoc DL(N0);
@@ -13565,7 +13334,7 @@ static SDValue performANDCombine(SDNode *N,
// shift amounts larger than 31 would produce poison. If we wait until
// type legalization, we'll create RISCVISD::SRLW and we can't recover it
// to use a BEXT instruction.
- if (!RV64LegalI32 && Subtarget.is64Bit() && Subtarget.hasStdExtZbs() &&
+ if (Subtarget.is64Bit() && Subtarget.hasStdExtZbs() &&
N->getValueType(0) == MVT::i32 && isOneConstant(N->getOperand(1)) &&
N0.getOpcode() == ISD::SRL && !isa<ConstantSDNode>(N0.getOperand(1)) &&
N0.hasOneUse()) {
@@ -13663,7 +13432,7 @@ static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
// Pre-promote (i32 (xor (shl -1, X), ~0)) on RV64 with Zbs so we can use
// (ADDI (BSET X0, X), -1). If we wait until/ type legalization, we'll create
// RISCVISD:::SLLW and we can't recover it to use a BSET instruction.
- if (!RV64LegalI32 && Subtarget.is64Bit() && Subtarget.hasStdExtZbs() &&
+ if (Subtarget.is64Bit() && Subtarget.hasStdExtZbs() &&
N->getValueType(0) == MVT::i32 && isAllOnesConstant(N1) &&
N0.getOpcode() == ISD::SHL && isAllOnesConstant(N0.getOperand(0)) &&
!isa<ConstantSDNode>(N0.getOperand(1)) && N0.hasOneUse()) {
@@ -13700,23 +13469,6 @@ static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
}
}
- // Combine (xor (trunc (X cc Y)) 1) -> (trunc (X !cc Y)). This is needed with
- // RV64LegalI32 when the setcc is created after type legalization. An i1 xor
- // would have been promoted to i32, but the setcc would have i64 result.
- if (N->getValueType(0) == MVT::i32 && N0.getOpcode() == ISD::TRUNCATE &&
- isOneConstant(N1) && N0.getOperand(0).getOpcode() == ISD::SETCC) {
- SDValue N00 = N0.getOperand(0);
- SDLoc DL(N);
- SDValue LHS = N00.getOperand(0);
- SDValue RHS = N00.getOperand(1);
- SDValue CC = N00.getOperand(2);
- ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
- LHS.getValueType());
- SDValue Setcc = DAG.getSetCC(SDLoc(N00), N0.getOperand(0).getValueType(),
- LHS, RHS, NotCC);
- return DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N->getValueType(0), Setcc);
- }
-
if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget))
return V;
if (SDValue V = combineBinOpOfExtractToReduceTree(N, DAG, Subtarget))
@@ -19166,12 +18918,7 @@ static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
(VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) {
Val = DAG.getNode(RISCVISD::FMV_H_X, DL, VA.getValVT(), Val);
} else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) {
- if (RV64LegalI32) {
- Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Val);
- Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
- } else {
- Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
- }
+ Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
} else {
Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
}
@@ -19232,12 +18979,7 @@ static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
(VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) {
Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, LocVT, Val);
} else if (LocVT == MVT::i64 && VA.getValVT() == MVT::f32) {
- if (RV64LegalI32) {
- Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
- Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Val);
- } else {
- Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
- }
+ Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
} else {
Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/alu32.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/alu32.ll
deleted file mode 100644
index 659fa41..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/alu32.ll
+++ /dev/null
@@ -1,276 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefix=RV64I
-
-; These tests are each targeted at a particular RISC-V ALU instruction. Most
-; other files in this folder exercise LLVM IR instructions that don't directly
-; match a RISC-V instruction.
-
-; Register-immediate instructions.
-
-define i32 @addi(i32 %a) nounwind {
-; RV64I-LABEL: addi:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addiw a0, a0, 1
-; RV64I-NEXT: ret
- %1 = add i32 %a, 1
- ret i32 %1
-}
-
-define i32 @slti(i32 %a) nounwind {
-; RV64I-LABEL: slti:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: slti a0, a0, 2
-; RV64I-NEXT: ret
- %1 = icmp slt i32 %a, 2
- %2 = zext i1 %1 to i32
- ret i32 %2
-}
-
-define i32 @sltiu(i32 %a) nounwind {
-; RV64I-LABEL: sltiu:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: sltiu a0, a0, 3
-; RV64I-NEXT: ret
- %1 = icmp ult i32 %a, 3
- %2 = zext i1 %1 to i32
- ret i32 %2
-}
-
-define i32 @xori(i32 %a) nounwind {
-; RV64I-LABEL: xori:
-; RV64I: # %bb.0:
-; RV64I-NEXT: xori a0, a0, 4
-; RV64I-NEXT: ret
- %1 = xor i32 %a, 4
- ret i32 %1
-}
-
-define i32 @ori(i32 %a) nounwind {
-; RV64I-LABEL: ori:
-; RV64I: # %bb.0:
-; RV64I-NEXT: ori a0, a0, 5
-; RV64I-NEXT: ret
- %1 = or i32 %a, 5
- ret i32 %1
-}
-
-define i32 @andi(i32 %a) nounwind {
-; RV64I-LABEL: andi:
-; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 6
-; RV64I-NEXT: ret
- %1 = and i32 %a, 6
- ret i32 %1
-}
-
-define i32 @slli(i32 %a) nounwind {
-; RV64I-LABEL: slli:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slliw a0, a0, 7
-; RV64I-NEXT: ret
- %1 = shl i32 %a, 7
- ret i32 %1
-}
-
-define i32 @srli(i32 %a) nounwind {
-; RV64I-LABEL: srli:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a0, a0, 8
-; RV64I-NEXT: ret
- %1 = lshr i32 %a, 8
- ret i32 %1
-}
-
-define i32 @srai(i32 %a) nounwind {
-; RV64I-LABEL: srai:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sraiw a0, a0, 9
-; RV64I-NEXT: ret
- %1 = ashr i32 %a, 9
- ret i32 %1
-}
-
-; Register-register instructions
-
-define i32 @add(i32 %a, i32 %b) nounwind {
-; RV64I-LABEL: add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: ret
- %1 = add i32 %a, %b
- ret i32 %1
-}
-
-define i32 @sub(i32 %a, i32 %b) nounwind {
-; RV64I-LABEL: sub:
-; RV64I: # %bb.0:
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: ret
- %1 = sub i32 %a, %b
- ret i32 %1
-}
-
-define i32 @sub_negative_constant_lhs(i32 %a) nounwind {
-; RV64I-LABEL: sub_negative_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, -2
-; RV64I-NEXT: subw a0, a1, a0
-; RV64I-NEXT: ret
- %1 = sub i32 -2, %a
- ret i32 %1
-}
-
-define i32 @sll(i32 %a, i32 %b) nounwind {
-; RV64I-LABEL: sll:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sllw a0, a0, a1
-; RV64I-NEXT: ret
- %1 = shl i32 %a, %b
- ret i32 %1
-}
-
-; Make sure we don't emit instructions to zero extend the shift amount to i64.
-define i32 @sll_shamt_zext(i32 %a, i32 %b) nounwind {
-; RV64I-LABEL: sll_shamt_zext:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a1, a1, 1
-; RV64I-NEXT: sllw a0, a0, a1
-; RV64I-NEXT: ret
- %shamt = add i32 %b, 1
- %1 = shl i32 %a, %shamt
- ret i32 %1
-}
-
-define i32 @sll_negative_constant_lhs(i32 %a) nounwind {
-; RV64I-LABEL: sll_negative_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, -1
-; RV64I-NEXT: sllw a0, a1, a0
-; RV64I-NEXT: ret
- %1 = shl i32 -1, %a
- ret i32 %1
-}
-
-define i32 @slt(i32 %a, i32 %b) nounwind {
-; RV64I-LABEL: slt:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sext.w a1, a1
-; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: slt a0, a0, a1
-; RV64I-NEXT: ret
- %1 = icmp slt i32 %a, %b
- %2 = zext i1 %1 to i32
- ret i32 %2
-}
-
-define i32 @sltu(i32 %a, i32 %b) nounwind {
-;
-; RV64I-LABEL: sltu:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sext.w a1, a1
-; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: sltu a0, a0, a1
-; RV64I-NEXT: ret
- %1 = icmp ult i32 %a, %b
- %2 = zext i1 %1 to i32
- ret i32 %2
-}
-
-define i32 @xor(i32 %a, i32 %b) nounwind {
-;
-; RV64I-LABEL: xor:
-; RV64I: # %bb.0:
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: ret
- %1 = xor i32 %a, %b
- ret i32 %1
-}
-
-define i32 @srl(i32 %a, i32 %b) nounwind {
-;
-; RV64I-LABEL: srl:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srlw a0, a0, a1
-; RV64I-NEXT: ret
- %1 = lshr i32 %a, %b
- ret i32 %1
-}
-
-; Make sure we don't emit instructions to zero extend the shift amount to i64.
-define i32 @srl_shamt_zext(i32 %a, i32 %b) nounwind {
-; RV64I-LABEL: srl_shamt_zext:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a1, a1, 1
-; RV64I-NEXT: srlw a0, a0, a1
-; RV64I-NEXT: ret
- %shamt = add i32 %b, 1
- %1 = lshr i32 %a, %shamt
- ret i32 %1
-}
-
-define i32 @srl_negative_constant_lhs(i32 %a) nounwind {
-;
-; RV64I-LABEL: srl_negative_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, -1
-; RV64I-NEXT: srlw a0, a1, a0
-; RV64I-NEXT: ret
- %1 = lshr i32 -1, %a
- ret i32 %1
-}
-
-define i32 @sra(i32 %a, i32 %b) nounwind {
-;
-; RV64I-LABEL: sra:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sraw a0, a0, a1
-; RV64I-NEXT: ret
- %1 = ashr i32 %a, %b
- ret i32 %1
-}
-
-; Make sure we don't emit instructions to zero extend the shift amount to i64.
-define i32 @sra_shamt_zext(i32 %a, i32 %b) nounwind {
-; RV64I-LABEL: sra_shamt_zext:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a1, a1, 1
-; RV64I-NEXT: sraw a0, a0, a1
-; RV64I-NEXT: ret
- %shamt = add i32 %b, 1
- %1 = ashr i32 %a, %shamt
- ret i32 %1
-}
-
-define i32 @sra_negative_constant_lhs(i32 %a) nounwind {
-;
-; RV64I-LABEL: sra_negative_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: sraw a0, a1, a0
-; RV64I-NEXT: ret
- %1 = ashr i32 2147483648, %a
- ret i32 %1
-}
-
-define i32 @or(i32 %a, i32 %b) nounwind {
-;
-; RV64I-LABEL: or:
-; RV64I: # %bb.0:
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
- %1 = or i32 %a, %b
- ret i32 %1
-}
-
-define i32 @and(i32 %a, i32 %b) nounwind {
-;
-; RV64I-LABEL: and:
-; RV64I: # %bb.0:
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
- %1 = and i32 %a, %b
- ret i32 %1
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/condops.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/condops.ll
deleted file mode 100644
index 42e1205..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/condops.ll
+++ /dev/null
@@ -1,2284 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -mtriple=riscv64 -target-abi=lp64f -mattr=+f,+zbs \
-; RUN: -riscv-experimental-rv64-legal-i32 < %s | FileCheck %s -check-prefix=RV64I
-; RUN: llc -mtriple=riscv64 -target-abi=lp64f -mattr=+f,+zbs,+xventanacondops \
-; RUN: -riscv-experimental-rv64-legal-i32 < %s | FileCheck %s -check-prefix=RV64XVENTANACONDOPS
-; RUN: llc -mtriple=riscv64 -target-abi=lp64f -mattr=+f,+zbs,+xtheadcondmov \
-; RUN: -riscv-experimental-rv64-legal-i32 < %s | FileCheck %s -check-prefix=RV64XTHEADCONDMOV
-; RUN: llc -mtriple=riscv64 -target-abi=lp64f -mattr=+f,+zbs,+zicond \
-; RUN: -riscv-experimental-rv64-legal-i32 < %s | FileCheck %s -check-prefix=RV64ZICOND
-
-define i64 @zero1(i64 %rs1, i1 zeroext %rc) {
-; RV64I-LABEL: zero1:
-; RV64I: # %bb.0:
-; RV64I-NEXT: neg a1, a1
-; RV64I-NEXT: and a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero1:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero1:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a0, zero, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero1:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.eqz a0, a0, a1
-; RV64ZICOND-NEXT: ret
- %sel = select i1 %rc, i64 %rs1, i64 0
- ret i64 %sel
-}
-
-define i64 @zero2(i64 %rs1, i1 zeroext %rc) {
-; RV64I-LABEL: zero2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a1, a1, -1
-; RV64I-NEXT: and a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero2:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero2:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a0, zero, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero2:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.nez a0, a0, a1
-; RV64ZICOND-NEXT: ret
- %sel = select i1 %rc, i64 0, i64 %rs1
- ret i64 %sel
-}
-
-define i64 @zero_singlebit1(i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: zero_singlebit1:
-; RV64I: # %bb.0:
-; RV64I-NEXT: bexti a1, a1, 12
-; RV64I-NEXT: addi a1, a1, -1
-; RV64I-NEXT: and a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero_singlebit1:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: bexti a1, a1, 12
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero_singlebit1:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: lui a2, 1
-; RV64XTHEADCONDMOV-NEXT: and a1, a1, a2
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a0, zero, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero_singlebit1:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: bexti a1, a1, 12
-; RV64ZICOND-NEXT: czero.nez a0, a0, a1
-; RV64ZICOND-NEXT: ret
- %and = and i64 %rs2, 4096
- %rc = icmp eq i64 %and, 0
- %sel = select i1 %rc, i64 %rs1, i64 0
- ret i64 %sel
-}
-
-define i64 @zero_singlebit2(i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: zero_singlebit2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a1, a1, 51
-; RV64I-NEXT: srai a1, a1, 63
-; RV64I-NEXT: and a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero_singlebit2:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: bexti a1, a1, 12
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero_singlebit2:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: slli a1, a1, 51
-; RV64XTHEADCONDMOV-NEXT: srai a1, a1, 63
-; RV64XTHEADCONDMOV-NEXT: and a0, a1, a0
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero_singlebit2:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: bexti a1, a1, 12
-; RV64ZICOND-NEXT: czero.eqz a0, a0, a1
-; RV64ZICOND-NEXT: ret
- %and = and i64 %rs2, 4096
- %rc = icmp eq i64 %and, 0
- %sel = select i1 %rc, i64 0, i64 %rs1
- ret i64 %sel
-}
-
-define i64 @add1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: add1:
-; RV64I: # %bb.0:
-; RV64I-NEXT: neg a0, a0
-; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: add1:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: add a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: add1:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, zero, a0
-; RV64XTHEADCONDMOV-NEXT: add a0, a1, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: add1:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.eqz a0, a2, a0
-; RV64ZICOND-NEXT: add a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %add = add i64 %rs1, %rs2
- %sel = select i1 %rc, i64 %add, i64 %rs1
- ret i64 %sel
-}
-
-define i64 @add2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: add2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: neg a0, a0
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: add2:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: add a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: add2:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: add a0, a2, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: add2:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.eqz a0, a1, a0
-; RV64ZICOND-NEXT: add a0, a2, a0
-; RV64ZICOND-NEXT: ret
- %add = add i64 %rs1, %rs2
- %sel = select i1 %rc, i64 %add, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @add3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: add3:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: add3:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: add a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: add3:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, zero, a0
-; RV64XTHEADCONDMOV-NEXT: add a0, a1, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: add3:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.nez a0, a2, a0
-; RV64ZICOND-NEXT: add a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %add = add i64 %rs1, %rs2
- %sel = select i1 %rc, i64 %rs1, i64 %add
- ret i64 %sel
-}
-
-define i64 @add4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: add4:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: add4:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: add a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: add4:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: add a0, a2, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: add4:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.nez a0, a1, a0
-; RV64ZICOND-NEXT: add a0, a2, a0
-; RV64ZICOND-NEXT: ret
- %add = add i64 %rs1, %rs2
- %sel = select i1 %rc, i64 %rs2, i64 %add
- ret i64 %sel
-}
-
-define i64 @sub1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: sub1:
-; RV64I: # %bb.0:
-; RV64I-NEXT: neg a0, a0
-; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: sub a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: sub1:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: sub a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: sub1:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, zero, a0
-; RV64XTHEADCONDMOV-NEXT: sub a0, a1, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: sub1:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.eqz a0, a2, a0
-; RV64ZICOND-NEXT: sub a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %sub = sub i64 %rs1, %rs2
- %sel = select i1 %rc, i64 %sub, i64 %rs1
- ret i64 %sel
-}
-
-define i64 @sub2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: sub2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: sub a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: sub2:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: sub a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: sub2:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, zero, a0
-; RV64XTHEADCONDMOV-NEXT: sub a0, a1, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: sub2:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.nez a0, a2, a0
-; RV64ZICOND-NEXT: sub a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %sub = sub i64 %rs1, %rs2
- %sel = select i1 %rc, i64 %rs1, i64 %sub
- ret i64 %sel
-}
-
-define i64 @or1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: or1:
-; RV64I: # %bb.0:
-; RV64I-NEXT: neg a0, a0
-; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: or1:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: or1:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, zero, a0
-; RV64XTHEADCONDMOV-NEXT: or a0, a1, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: or1:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.eqz a0, a2, a0
-; RV64ZICOND-NEXT: or a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %or = or i64 %rs1, %rs2
- %sel = select i1 %rc, i64 %or, i64 %rs1
- ret i64 %sel
-}
-
-define i64 @or2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: or2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: neg a0, a0
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: or a0, a2, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: or2:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: or2:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: or a0, a2, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: or2:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.eqz a0, a1, a0
-; RV64ZICOND-NEXT: or a0, a2, a0
-; RV64ZICOND-NEXT: ret
- %or = or i64 %rs1, %rs2
- %sel = select i1 %rc, i64 %or, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @or3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: or3:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: or3:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: or3:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, zero, a0
-; RV64XTHEADCONDMOV-NEXT: or a0, a1, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: or3:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.nez a0, a2, a0
-; RV64ZICOND-NEXT: or a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %or = or i64 %rs1, %rs2
- %sel = select i1 %rc, i64 %rs1, i64 %or
- ret i64 %sel
-}
-
-define i64 @or4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: or4:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: or a0, a2, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: or4:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: or4:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: or a0, a2, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: or4:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.nez a0, a1, a0
-; RV64ZICOND-NEXT: or a0, a2, a0
-; RV64ZICOND-NEXT: ret
- %or = or i64 %rs1, %rs2
- %sel = select i1 %rc, i64 %rs2, i64 %or
- ret i64 %sel
-}
-
-define i64 @xor1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: xor1:
-; RV64I: # %bb.0:
-; RV64I-NEXT: neg a0, a0
-; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: xor a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: xor1:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: xor a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: xor1:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, zero, a0
-; RV64XTHEADCONDMOV-NEXT: xor a0, a1, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: xor1:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.eqz a0, a2, a0
-; RV64ZICOND-NEXT: xor a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %xor = xor i64 %rs1, %rs2
- %sel = select i1 %rc, i64 %xor, i64 %rs1
- ret i64 %sel
-}
-
-define i64 @xor2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: xor2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: neg a0, a0
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: xor a0, a2, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: xor2:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: xor a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: xor2:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: xor a0, a2, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: xor2:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.eqz a0, a1, a0
-; RV64ZICOND-NEXT: xor a0, a2, a0
-; RV64ZICOND-NEXT: ret
- %xor = xor i64 %rs1, %rs2
- %sel = select i1 %rc, i64 %xor, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @xor3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: xor3:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: xor a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: xor3:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: xor a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: xor3:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, zero, a0
-; RV64XTHEADCONDMOV-NEXT: xor a0, a1, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: xor3:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.nez a0, a2, a0
-; RV64ZICOND-NEXT: xor a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %xor = xor i64 %rs1, %rs2
- %sel = select i1 %rc, i64 %rs1, i64 %xor
- ret i64 %sel
-}
-
-define i64 @xor4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: xor4:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: xor a0, a2, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: xor4:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: xor a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: xor4:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: xor a0, a2, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: xor4:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.nez a0, a1, a0
-; RV64ZICOND-NEXT: xor a0, a2, a0
-; RV64ZICOND-NEXT: ret
- %xor = xor i64 %rs1, %rs2
- %sel = select i1 %rc, i64 %rs2, i64 %xor
- ret i64 %sel
-}
-
-define i64 @and1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: and1:
-; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB18_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: .LBB18_2:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: and1:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: and a2, a1, a2
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: and1:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: and a2, a1, a2
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, a1, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: and1:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: and a2, a1, a2
-; RV64ZICOND-NEXT: czero.nez a0, a1, a0
-; RV64ZICOND-NEXT: or a0, a2, a0
-; RV64ZICOND-NEXT: ret
- %and = and i64 %rs1, %rs2
- %sel = select i1 %rc, i64 %and, i64 %rs1
- ret i64 %sel
-}
-
-define i64 @and2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: and2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB19_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: and a2, a1, a2
-; RV64I-NEXT: .LBB19_2:
-; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: and2:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: and a1, a1, a2
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: and2:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: and a1, a1, a2
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, a2, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: and2:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: and a1, a1, a2
-; RV64ZICOND-NEXT: czero.nez a0, a2, a0
-; RV64ZICOND-NEXT: or a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %and = and i64 %rs1, %rs2
- %sel = select i1 %rc, i64 %and, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @and3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: and3:
-; RV64I: # %bb.0:
-; RV64I-NEXT: bnez a0, .LBB20_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: .LBB20_2:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: and3:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: and a2, a1, a2
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: and3:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: and a2, a1, a2
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, a1, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: and3:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: and a2, a1, a2
-; RV64ZICOND-NEXT: czero.eqz a0, a1, a0
-; RV64ZICOND-NEXT: or a0, a2, a0
-; RV64ZICOND-NEXT: ret
- %and = and i64 %rs1, %rs2
- %sel = select i1 %rc, i64 %rs1, i64 %and
- ret i64 %sel
-}
-
-define i64 @and4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: and4:
-; RV64I: # %bb.0:
-; RV64I-NEXT: bnez a0, .LBB21_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: and a2, a1, a2
-; RV64I-NEXT: .LBB21_2:
-; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: and4:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: and a1, a1, a2
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: and4:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: and a1, a1, a2
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, a2, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: and4:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: and a1, a1, a2
-; RV64ZICOND-NEXT: czero.eqz a0, a2, a0
-; RV64ZICOND-NEXT: or a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %and = and i64 %rs1, %rs2
- %sel = select i1 %rc, i64 %rs2, i64 %and
- ret i64 %sel
-}
-
-define i64 @basic(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: basic:
-; RV64I: # %bb.0:
-; RV64I-NEXT: bnez a0, .LBB22_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a1, a2
-; RV64I-NEXT: .LBB22_2:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: basic:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a2, a2, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a2
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: basic:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, a2, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: basic:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.nez a2, a2, a0
-; RV64ZICOND-NEXT: czero.eqz a0, a1, a0
-; RV64ZICOND-NEXT: or a0, a0, a2
-; RV64ZICOND-NEXT: ret
- %sel = select i1 %rc, i64 %rs1, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @seteq(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: seteq:
-; RV64I: # %bb.0:
-; RV64I-NEXT: beq a0, a1, .LBB23_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a2, a3
-; RV64I-NEXT: .LBB23_2:
-; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: seteq:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a3, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: seteq:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: xor a0, a0, a1
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, a3, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: seteq:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: xor a0, a0, a1
-; RV64ZICOND-NEXT: czero.eqz a1, a3, a0
-; RV64ZICOND-NEXT: czero.nez a0, a2, a0
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
- %rc = icmp eq i64 %a, %b
- %sel = select i1 %rc, i64 %rs1, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @setne(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: setne:
-; RV64I: # %bb.0:
-; RV64I-NEXT: bne a0, a1, .LBB24_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a2, a3
-; RV64I-NEXT: .LBB24_2:
-; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: setne:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a1, a3, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: setne:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: xor a0, a0, a1
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, a3, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: setne:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: xor a0, a0, a1
-; RV64ZICOND-NEXT: czero.nez a1, a3, a0
-; RV64ZICOND-NEXT: czero.eqz a0, a2, a0
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
- %rc = icmp ne i64 %a, %b
- %sel = select i1 %rc, i64 %rs1, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @setgt(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: setgt:
-; RV64I: # %bb.0:
-; RV64I-NEXT: blt a1, a0, .LBB25_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a2, a3
-; RV64I-NEXT: .LBB25_2:
-; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: setgt:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: slt a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a1, a3, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: setgt:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: slt a0, a1, a0
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, a3, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: setgt:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: slt a0, a1, a0
-; RV64ZICOND-NEXT: czero.nez a1, a3, a0
-; RV64ZICOND-NEXT: czero.eqz a0, a2, a0
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
- %rc = icmp sgt i64 %a, %b
- %sel = select i1 %rc, i64 %rs1, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @setge(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: setge:
-; RV64I: # %bb.0:
-; RV64I-NEXT: bge a0, a1, .LBB26_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a2, a3
-; RV64I-NEXT: .LBB26_2:
-; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: setge:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: slt a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a3, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: setge:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: slt a0, a0, a1
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, a3, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: setge:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: slt a0, a0, a1
-; RV64ZICOND-NEXT: czero.eqz a1, a3, a0
-; RV64ZICOND-NEXT: czero.nez a0, a2, a0
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
- %rc = icmp sge i64 %a, %b
- %sel = select i1 %rc, i64 %rs1, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @setlt(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: setlt:
-; RV64I: # %bb.0:
-; RV64I-NEXT: blt a0, a1, .LBB27_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a2, a3
-; RV64I-NEXT: .LBB27_2:
-; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: setlt:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: slt a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a1, a3, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: setlt:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: slt a0, a0, a1
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, a3, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: setlt:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: slt a0, a0, a1
-; RV64ZICOND-NEXT: czero.nez a1, a3, a0
-; RV64ZICOND-NEXT: czero.eqz a0, a2, a0
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
- %rc = icmp slt i64 %a, %b
- %sel = select i1 %rc, i64 %rs1, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @setle(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: setle:
-; RV64I: # %bb.0:
-; RV64I-NEXT: bge a1, a0, .LBB28_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a2, a3
-; RV64I-NEXT: .LBB28_2:
-; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: setle:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: slt a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a3, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: setle:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: slt a0, a1, a0
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, a3, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: setle:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: slt a0, a1, a0
-; RV64ZICOND-NEXT: czero.eqz a1, a3, a0
-; RV64ZICOND-NEXT: czero.nez a0, a2, a0
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
- %rc = icmp sle i64 %a, %b
- %sel = select i1 %rc, i64 %rs1, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @setugt(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: setugt:
-; RV64I: # %bb.0:
-; RV64I-NEXT: bltu a1, a0, .LBB29_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a2, a3
-; RV64I-NEXT: .LBB29_2:
-; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: setugt:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: sltu a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a1, a3, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: setugt:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: sltu a0, a1, a0
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, a3, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: setugt:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: sltu a0, a1, a0
-; RV64ZICOND-NEXT: czero.nez a1, a3, a0
-; RV64ZICOND-NEXT: czero.eqz a0, a2, a0
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
- %rc = icmp ugt i64 %a, %b
- %sel = select i1 %rc, i64 %rs1, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @setuge(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: setuge:
-; RV64I: # %bb.0:
-; RV64I-NEXT: bgeu a0, a1, .LBB30_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a2, a3
-; RV64I-NEXT: .LBB30_2:
-; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: setuge:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: sltu a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a3, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: setuge:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: sltu a0, a0, a1
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, a3, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: setuge:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: sltu a0, a0, a1
-; RV64ZICOND-NEXT: czero.eqz a1, a3, a0
-; RV64ZICOND-NEXT: czero.nez a0, a2, a0
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
- %rc = icmp uge i64 %a, %b
- %sel = select i1 %rc, i64 %rs1, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @setult(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: setult:
-; RV64I: # %bb.0:
-; RV64I-NEXT: bltu a0, a1, .LBB31_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a2, a3
-; RV64I-NEXT: .LBB31_2:
-; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: setult:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: sltu a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a1, a3, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: setult:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: sltu a0, a0, a1
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, a3, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: setult:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: sltu a0, a0, a1
-; RV64ZICOND-NEXT: czero.nez a1, a3, a0
-; RV64ZICOND-NEXT: czero.eqz a0, a2, a0
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
- %rc = icmp ult i64 %a, %b
- %sel = select i1 %rc, i64 %rs1, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @setule(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: setule:
-; RV64I: # %bb.0:
-; RV64I-NEXT: bgeu a1, a0, .LBB32_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a2, a3
-; RV64I-NEXT: .LBB32_2:
-; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: setule:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: sltu a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a3, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: setule:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: sltu a0, a1, a0
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, a3, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: setule:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: sltu a0, a1, a0
-; RV64ZICOND-NEXT: czero.eqz a1, a3, a0
-; RV64ZICOND-NEXT: czero.nez a0, a2, a0
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
- %rc = icmp ule i64 %a, %b
- %sel = select i1 %rc, i64 %rs1, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @seteq_zero(i64 %a, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: seteq_zero:
-; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB33_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a1, a2
-; RV64I-NEXT: .LBB33_2:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: seteq_zero:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a2, a2, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a2
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: seteq_zero:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, a2, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: seteq_zero:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.eqz a2, a2, a0
-; RV64ZICOND-NEXT: czero.nez a0, a1, a0
-; RV64ZICOND-NEXT: or a0, a0, a2
-; RV64ZICOND-NEXT: ret
- %rc = icmp eq i64 %a, 0
- %sel = select i1 %rc, i64 %rs1, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @setne_zero(i64 %a, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: setne_zero:
-; RV64I: # %bb.0:
-; RV64I-NEXT: bnez a0, .LBB34_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a1, a2
-; RV64I-NEXT: .LBB34_2:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: setne_zero:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a2, a2, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a2
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: setne_zero:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, a2, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: setne_zero:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.nez a2, a2, a0
-; RV64ZICOND-NEXT: czero.eqz a0, a1, a0
-; RV64ZICOND-NEXT: or a0, a0, a2
-; RV64ZICOND-NEXT: ret
- %rc = icmp ne i64 %a, 0
- %sel = select i1 %rc, i64 %rs1, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @seteq_constant(i64 %a, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: seteq_constant:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a3, 123
-; RV64I-NEXT: beq a0, a3, .LBB35_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a1, a2
-; RV64I-NEXT: .LBB35_2:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: seteq_constant:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: addi a0, a0, -123
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a2, a2, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a2
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: seteq_constant:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: addi a0, a0, -123
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, a2, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: seteq_constant:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: addi a0, a0, -123
-; RV64ZICOND-NEXT: czero.eqz a2, a2, a0
-; RV64ZICOND-NEXT: czero.nez a0, a1, a0
-; RV64ZICOND-NEXT: or a0, a0, a2
-; RV64ZICOND-NEXT: ret
- %rc = icmp eq i64 %a, 123
- %sel = select i1 %rc, i64 %rs1, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @setne_constant(i64 %a, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: setne_constant:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a3, 456
-; RV64I-NEXT: bne a0, a3, .LBB36_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a1, a2
-; RV64I-NEXT: .LBB36_2:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: setne_constant:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: addi a0, a0, -456
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a2, a2, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a2
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: setne_constant:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: addi a0, a0, -456
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, a2, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: setne_constant:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: addi a0, a0, -456
-; RV64ZICOND-NEXT: czero.nez a2, a2, a0
-; RV64ZICOND-NEXT: czero.eqz a0, a1, a0
-; RV64ZICOND-NEXT: or a0, a0, a2
-; RV64ZICOND-NEXT: ret
- %rc = icmp ne i64 %a, 456
- %sel = select i1 %rc, i64 %rs1, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @seteq_2048(i64 %a, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: seteq_2048:
-; RV64I: # %bb.0:
-; RV64I-NEXT: bseti a3, zero, 11
-; RV64I-NEXT: beq a0, a3, .LBB37_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a1, a2
-; RV64I-NEXT: .LBB37_2:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: seteq_2048:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: addi a0, a0, -2048
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a2, a2, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a2
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: seteq_2048:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: addi a0, a0, -2048
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, a2, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: seteq_2048:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: addi a0, a0, -2048
-; RV64ZICOND-NEXT: czero.eqz a2, a2, a0
-; RV64ZICOND-NEXT: czero.nez a0, a1, a0
-; RV64ZICOND-NEXT: or a0, a0, a2
-; RV64ZICOND-NEXT: ret
- %rc = icmp eq i64 %a, 2048
- %sel = select i1 %rc, i64 %rs1, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @seteq_neg2048(i64 %a, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: seteq_neg2048:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a3, -2048
-; RV64I-NEXT: beq a0, a3, .LBB38_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a1, a2
-; RV64I-NEXT: .LBB38_2:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: seteq_neg2048:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: xori a0, a0, -2048
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a2, a2, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a2
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: seteq_neg2048:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: xori a0, a0, -2048
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, a2, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: seteq_neg2048:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: xori a0, a0, -2048
-; RV64ZICOND-NEXT: czero.eqz a2, a2, a0
-; RV64ZICOND-NEXT: czero.nez a0, a1, a0
-; RV64ZICOND-NEXT: or a0, a0, a2
-; RV64ZICOND-NEXT: ret
- %rc = icmp eq i64 %a, -2048
- %sel = select i1 %rc, i64 %rs1, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @setne_neg2048(i64 %a, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: setne_neg2048:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a3, -2048
-; RV64I-NEXT: bne a0, a3, .LBB39_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a1, a2
-; RV64I-NEXT: .LBB39_2:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: setne_neg2048:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: xori a0, a0, -2048
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a2, a2, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a2
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: setne_neg2048:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: xori a0, a0, -2048
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, a2, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: setne_neg2048:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: xori a0, a0, -2048
-; RV64ZICOND-NEXT: czero.nez a2, a2, a0
-; RV64ZICOND-NEXT: czero.eqz a0, a1, a0
-; RV64ZICOND-NEXT: or a0, a0, a2
-; RV64ZICOND-NEXT: ret
- %rc = icmp ne i64 %a, -2048
- %sel = select i1 %rc, i64 %rs1, i64 %rs2
- ret i64 %sel
-}
-
-define i64 @zero1_seteq(i64 %a, i64 %b, i64 %rs1) {
-; RV64I-LABEL: zero1_seteq:
-; RV64I: # %bb.0:
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: snez a0, a0
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero1_seteq:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero1_seteq:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: xor a0, a0, a1
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, zero, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero1_seteq:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: xor a0, a0, a1
-; RV64ZICOND-NEXT: czero.nez a0, a2, a0
-; RV64ZICOND-NEXT: ret
- %rc = icmp eq i64 %a, %b
- %sel = select i1 %rc, i64 %rs1, i64 0
- ret i64 %sel
-}
-
-define i64 @zero2_seteq(i64 %a, i64 %b, i64 %rs1) {
-; RV64I-LABEL: zero2_seteq:
-; RV64I: # %bb.0:
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: seqz a0, a0
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero2_seteq:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero2_seteq:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: xor a0, a0, a1
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, zero, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero2_seteq:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: xor a0, a0, a1
-; RV64ZICOND-NEXT: czero.eqz a0, a2, a0
-; RV64ZICOND-NEXT: ret
- %rc = icmp eq i64 %a, %b
- %sel = select i1 %rc, i64 0, i64 %rs1
- ret i64 %sel
-}
-
-define i64 @zero1_setne(i64 %a, i64 %b, i64 %rs1) {
-; RV64I-LABEL: zero1_setne:
-; RV64I: # %bb.0:
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: seqz a0, a0
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero1_setne:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero1_setne:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: xor a0, a0, a1
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, zero, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero1_setne:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: xor a0, a0, a1
-; RV64ZICOND-NEXT: czero.eqz a0, a2, a0
-; RV64ZICOND-NEXT: ret
- %rc = icmp ne i64 %a, %b
- %sel = select i1 %rc, i64 %rs1, i64 0
- ret i64 %sel
-}
-
-define i64 @zero2_setne(i64 %a, i64 %b, i64 %rs1) {
-; RV64I-LABEL: zero2_setne:
-; RV64I: # %bb.0:
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: snez a0, a0
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero2_setne:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero2_setne:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: xor a0, a0, a1
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, zero, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero2_setne:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: xor a0, a0, a1
-; RV64ZICOND-NEXT: czero.nez a0, a2, a0
-; RV64ZICOND-NEXT: ret
- %rc = icmp ne i64 %a, %b
- %sel = select i1 %rc, i64 0, i64 %rs1
- ret i64 %sel
-}
-
-define i64 @zero1_seteq_zero(i64 %a, i64 %rs1) {
-; RV64I-LABEL: zero1_seteq_zero:
-; RV64I: # %bb.0:
-; RV64I-NEXT: snez a0, a0
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero1_seteq_zero:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero1_seteq_zero:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero1_seteq_zero:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.nez a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %rc = icmp eq i64 %a, 0
- %sel = select i1 %rc, i64 %rs1, i64 0
- ret i64 %sel
-}
-
-define i64 @zero2_seteq_zero(i64 %a, i64 %rs1) {
-; RV64I-LABEL: zero2_seteq_zero:
-; RV64I: # %bb.0:
-; RV64I-NEXT: seqz a0, a0
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero2_seteq_zero:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero2_seteq_zero:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero2_seteq_zero:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.eqz a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %rc = icmp eq i64 %a, 0
- %sel = select i1 %rc, i64 0, i64 %rs1
- ret i64 %sel
-}
-
-define i64 @zero1_setne_zero(i64 %a, i64 %rs1) {
-; RV64I-LABEL: zero1_setne_zero:
-; RV64I: # %bb.0:
-; RV64I-NEXT: seqz a0, a0
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero1_setne_zero:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero1_setne_zero:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero1_setne_zero:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.eqz a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %rc = icmp ne i64 %a, 0
- %sel = select i1 %rc, i64 %rs1, i64 0
- ret i64 %sel
-}
-
-define i64 @zero2_setne_zero(i64 %a, i64 %rs1) {
-; RV64I-LABEL: zero2_setne_zero:
-; RV64I: # %bb.0:
-; RV64I-NEXT: snez a0, a0
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero2_setne_zero:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero2_setne_zero:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero2_setne_zero:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: czero.nez a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %rc = icmp ne i64 %a, 0
- %sel = select i1 %rc, i64 0, i64 %rs1
- ret i64 %sel
-}
-
-define i64 @zero1_seteq_constant(i64 %a, i64 %rs1) {
-; RV64I-LABEL: zero1_seteq_constant:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a0, a0, 231
-; RV64I-NEXT: snez a0, a0
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero1_seteq_constant:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: addi a0, a0, 231
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero1_seteq_constant:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: addi a0, a0, 231
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero1_seteq_constant:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: addi a0, a0, 231
-; RV64ZICOND-NEXT: czero.nez a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %rc = icmp eq i64 %a, -231
- %sel = select i1 %rc, i64 %rs1, i64 0
- ret i64 %sel
-}
-
-define i64 @zero2_seteq_constant(i64 %a, i64 %rs1) {
-; RV64I-LABEL: zero2_seteq_constant:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a0, a0, -546
-; RV64I-NEXT: seqz a0, a0
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero2_seteq_constant:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: addi a0, a0, -546
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero2_seteq_constant:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: addi a0, a0, -546
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero2_seteq_constant:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: addi a0, a0, -546
-; RV64ZICOND-NEXT: czero.eqz a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %rc = icmp eq i64 %a, 546
- %sel = select i1 %rc, i64 0, i64 %rs1
- ret i64 %sel
-}
-
-define i64 @zero1_setne_constant(i64 %a, i64 %rs1) {
-; RV64I-LABEL: zero1_setne_constant:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a0, a0, -321
-; RV64I-NEXT: seqz a0, a0
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero1_setne_constant:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: addi a0, a0, -321
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero1_setne_constant:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: addi a0, a0, -321
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero1_setne_constant:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: addi a0, a0, -321
-; RV64ZICOND-NEXT: czero.eqz a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %rc = icmp ne i64 %a, 321
- %sel = select i1 %rc, i64 %rs1, i64 0
- ret i64 %sel
-}
-
-define i64 @zero2_setne_constant(i64 %a, i64 %rs1) {
-; RV64I-LABEL: zero2_setne_constant:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a0, a0, 654
-; RV64I-NEXT: snez a0, a0
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero2_setne_constant:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: addi a0, a0, 654
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero2_setne_constant:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: addi a0, a0, 654
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero2_setne_constant:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: addi a0, a0, 654
-; RV64ZICOND-NEXT: czero.nez a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %rc = icmp ne i64 %a, -654
- %sel = select i1 %rc, i64 0, i64 %rs1
- ret i64 %sel
-}
-
-define i64 @zero1_seteq_neg2048(i64 %a, i64 %rs1) {
-; RV64I-LABEL: zero1_seteq_neg2048:
-; RV64I: # %bb.0:
-; RV64I-NEXT: xori a0, a0, -2048
-; RV64I-NEXT: snez a0, a0
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero1_seteq_neg2048:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: xori a0, a0, -2048
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero1_seteq_neg2048:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: xori a0, a0, -2048
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero1_seteq_neg2048:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: xori a0, a0, -2048
-; RV64ZICOND-NEXT: czero.nez a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %rc = icmp eq i64 %a, -2048
- %sel = select i1 %rc, i64 %rs1, i64 0
- ret i64 %sel
-}
-
-define i64 @zero2_seteq_neg2048(i64 %a, i64 %rs1) {
-; RV64I-LABEL: zero2_seteq_neg2048:
-; RV64I: # %bb.0:
-; RV64I-NEXT: xori a0, a0, -2048
-; RV64I-NEXT: seqz a0, a0
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero2_seteq_neg2048:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: xori a0, a0, -2048
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero2_seteq_neg2048:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: xori a0, a0, -2048
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero2_seteq_neg2048:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: xori a0, a0, -2048
-; RV64ZICOND-NEXT: czero.eqz a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %rc = icmp eq i64 %a, -2048
- %sel = select i1 %rc, i64 0, i64 %rs1
- ret i64 %sel
-}
-
-define i64 @zero1_setne_neg2048(i64 %a, i64 %rs1) {
-; RV64I-LABEL: zero1_setne_neg2048:
-; RV64I: # %bb.0:
-; RV64I-NEXT: xori a0, a0, -2048
-; RV64I-NEXT: seqz a0, a0
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero1_setne_neg2048:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: xori a0, a0, -2048
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero1_setne_neg2048:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: xori a0, a0, -2048
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero1_setne_neg2048:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: xori a0, a0, -2048
-; RV64ZICOND-NEXT: czero.eqz a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %rc = icmp ne i64 %a, -2048
- %sel = select i1 %rc, i64 %rs1, i64 0
- ret i64 %sel
-}
-
-define i64 @zero2_setne_neg2048(i64 %a, i64 %rs1) {
-; RV64I-LABEL: zero2_setne_neg2048:
-; RV64I: # %bb.0:
-; RV64I-NEXT: xori a0, a0, -2048
-; RV64I-NEXT: snez a0, a0
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: zero2_setne_neg2048:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: xori a0, a0, -2048
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: zero2_setne_neg2048:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: xori a0, a0, -2048
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: mv a0, a1
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: zero2_setne_neg2048:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: xori a0, a0, -2048
-; RV64ZICOND-NEXT: czero.nez a0, a1, a0
-; RV64ZICOND-NEXT: ret
- %rc = icmp ne i64 %a, -2048
- %sel = select i1 %rc, i64 0, i64 %rs1
- ret i64 %sel
-}
-
-define void @sextw_removal_maskc(i1 %c, i32 signext %arg, i32 signext %arg1) nounwind {
-; RV64I-LABEL: sextw_removal_maskc:
-; RV64I: # %bb.0: # %bb
-; RV64I-NEXT: addi sp, sp, -32
-; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a2
-; RV64I-NEXT: slli a0, a0, 63
-; RV64I-NEXT: srai a0, a0, 63
-; RV64I-NEXT: and s1, a0, a1
-; RV64I-NEXT: .LBB56_1: # %bb2
-; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call bar
-; RV64I-NEXT: sllw s1, s1, s0
-; RV64I-NEXT: bnez a0, .LBB56_1
-; RV64I-NEXT: # %bb.2: # %bb7
-; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 32
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: sextw_removal_maskc:
-; RV64XVENTANACONDOPS: # %bb.0: # %bb
-; RV64XVENTANACONDOPS-NEXT: addi sp, sp, -32
-; RV64XVENTANACONDOPS-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; RV64XVENTANACONDOPS-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64XVENTANACONDOPS-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; RV64XVENTANACONDOPS-NEXT: mv s0, a2
-; RV64XVENTANACONDOPS-NEXT: andi a0, a0, 1
-; RV64XVENTANACONDOPS-NEXT: vt.maskc s1, a1, a0
-; RV64XVENTANACONDOPS-NEXT: .LBB56_1: # %bb2
-; RV64XVENTANACONDOPS-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64XVENTANACONDOPS-NEXT: mv a0, s1
-; RV64XVENTANACONDOPS-NEXT: call bar
-; RV64XVENTANACONDOPS-NEXT: sllw s1, s1, s0
-; RV64XVENTANACONDOPS-NEXT: bnez a0, .LBB56_1
-; RV64XVENTANACONDOPS-NEXT: # %bb.2: # %bb7
-; RV64XVENTANACONDOPS-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; RV64XVENTANACONDOPS-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; RV64XVENTANACONDOPS-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
-; RV64XVENTANACONDOPS-NEXT: addi sp, sp, 32
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: sextw_removal_maskc:
-; RV64XTHEADCONDMOV: # %bb.0: # %bb
-; RV64XTHEADCONDMOV-NEXT: addi sp, sp, -32
-; RV64XTHEADCONDMOV-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; RV64XTHEADCONDMOV-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64XTHEADCONDMOV-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; RV64XTHEADCONDMOV-NEXT: mv s0, a2
-; RV64XTHEADCONDMOV-NEXT: mv s1, a1
-; RV64XTHEADCONDMOV-NEXT: andi a0, a0, 1
-; RV64XTHEADCONDMOV-NEXT: th.mveqz s1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: .LBB56_1: # %bb2
-; RV64XTHEADCONDMOV-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64XTHEADCONDMOV-NEXT: sext.w a0, s1
-; RV64XTHEADCONDMOV-NEXT: call bar
-; RV64XTHEADCONDMOV-NEXT: sllw s1, s1, s0
-; RV64XTHEADCONDMOV-NEXT: bnez a0, .LBB56_1
-; RV64XTHEADCONDMOV-NEXT: # %bb.2: # %bb7
-; RV64XTHEADCONDMOV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; RV64XTHEADCONDMOV-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; RV64XTHEADCONDMOV-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
-; RV64XTHEADCONDMOV-NEXT: addi sp, sp, 32
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: sextw_removal_maskc:
-; RV64ZICOND: # %bb.0: # %bb
-; RV64ZICOND-NEXT: addi sp, sp, -32
-; RV64ZICOND-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; RV64ZICOND-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64ZICOND-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; RV64ZICOND-NEXT: mv s0, a2
-; RV64ZICOND-NEXT: andi a0, a0, 1
-; RV64ZICOND-NEXT: czero.eqz s1, a1, a0
-; RV64ZICOND-NEXT: .LBB56_1: # %bb2
-; RV64ZICOND-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64ZICOND-NEXT: mv a0, s1
-; RV64ZICOND-NEXT: call bar
-; RV64ZICOND-NEXT: sllw s1, s1, s0
-; RV64ZICOND-NEXT: bnez a0, .LBB56_1
-; RV64ZICOND-NEXT: # %bb.2: # %bb7
-; RV64ZICOND-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; RV64ZICOND-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; RV64ZICOND-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
-; RV64ZICOND-NEXT: addi sp, sp, 32
-; RV64ZICOND-NEXT: ret
-bb:
- %i = select i1 %c, i32 %arg, i32 0
- br label %bb2
-
-bb2: ; preds = %bb2, %bb
- %i3 = phi i32 [ %i, %bb ], [ %i5, %bb2 ]
- %i4 = tail call signext i32 @bar(i32 signext %i3)
- %i5 = shl i32 %i3, %arg1
- %i6 = icmp eq i32 %i4, 0
- br i1 %i6, label %bb7, label %bb2
-
-bb7: ; preds = %bb2
- ret void
-}
-declare signext i32 @bar(i32 signext)
-
-define void @sextw_removal_maskcn(i1 %c, i32 signext %arg, i32 signext %arg1) nounwind {
-; RV64I-LABEL: sextw_removal_maskcn:
-; RV64I: # %bb.0: # %bb
-; RV64I-NEXT: addi sp, sp, -32
-; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a2
-; RV64I-NEXT: andi a0, a0, 1
-; RV64I-NEXT: addiw a0, a0, -1
-; RV64I-NEXT: and s1, a0, a1
-; RV64I-NEXT: .LBB57_1: # %bb2
-; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call bar
-; RV64I-NEXT: sllw s1, s1, s0
-; RV64I-NEXT: bnez a0, .LBB57_1
-; RV64I-NEXT: # %bb.2: # %bb7
-; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 32
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: sextw_removal_maskcn:
-; RV64XVENTANACONDOPS: # %bb.0: # %bb
-; RV64XVENTANACONDOPS-NEXT: addi sp, sp, -32
-; RV64XVENTANACONDOPS-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; RV64XVENTANACONDOPS-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64XVENTANACONDOPS-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; RV64XVENTANACONDOPS-NEXT: mv s0, a2
-; RV64XVENTANACONDOPS-NEXT: andi a0, a0, 1
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn s1, a1, a0
-; RV64XVENTANACONDOPS-NEXT: .LBB57_1: # %bb2
-; RV64XVENTANACONDOPS-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64XVENTANACONDOPS-NEXT: mv a0, s1
-; RV64XVENTANACONDOPS-NEXT: call bar
-; RV64XVENTANACONDOPS-NEXT: sllw s1, s1, s0
-; RV64XVENTANACONDOPS-NEXT: bnez a0, .LBB57_1
-; RV64XVENTANACONDOPS-NEXT: # %bb.2: # %bb7
-; RV64XVENTANACONDOPS-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; RV64XVENTANACONDOPS-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; RV64XVENTANACONDOPS-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
-; RV64XVENTANACONDOPS-NEXT: addi sp, sp, 32
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: sextw_removal_maskcn:
-; RV64XTHEADCONDMOV: # %bb.0: # %bb
-; RV64XTHEADCONDMOV-NEXT: addi sp, sp, -32
-; RV64XTHEADCONDMOV-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; RV64XTHEADCONDMOV-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64XTHEADCONDMOV-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; RV64XTHEADCONDMOV-NEXT: mv s0, a2
-; RV64XTHEADCONDMOV-NEXT: mv s1, a1
-; RV64XTHEADCONDMOV-NEXT: andi a0, a0, 1
-; RV64XTHEADCONDMOV-NEXT: th.mvnez s1, zero, a0
-; RV64XTHEADCONDMOV-NEXT: .LBB57_1: # %bb2
-; RV64XTHEADCONDMOV-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64XTHEADCONDMOV-NEXT: sext.w a0, s1
-; RV64XTHEADCONDMOV-NEXT: call bar
-; RV64XTHEADCONDMOV-NEXT: sllw s1, s1, s0
-; RV64XTHEADCONDMOV-NEXT: bnez a0, .LBB57_1
-; RV64XTHEADCONDMOV-NEXT: # %bb.2: # %bb7
-; RV64XTHEADCONDMOV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; RV64XTHEADCONDMOV-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; RV64XTHEADCONDMOV-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
-; RV64XTHEADCONDMOV-NEXT: addi sp, sp, 32
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: sextw_removal_maskcn:
-; RV64ZICOND: # %bb.0: # %bb
-; RV64ZICOND-NEXT: addi sp, sp, -32
-; RV64ZICOND-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; RV64ZICOND-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64ZICOND-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; RV64ZICOND-NEXT: mv s0, a2
-; RV64ZICOND-NEXT: andi a0, a0, 1
-; RV64ZICOND-NEXT: czero.nez s1, a1, a0
-; RV64ZICOND-NEXT: .LBB57_1: # %bb2
-; RV64ZICOND-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64ZICOND-NEXT: mv a0, s1
-; RV64ZICOND-NEXT: call bar
-; RV64ZICOND-NEXT: sllw s1, s1, s0
-; RV64ZICOND-NEXT: bnez a0, .LBB57_1
-; RV64ZICOND-NEXT: # %bb.2: # %bb7
-; RV64ZICOND-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; RV64ZICOND-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; RV64ZICOND-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
-; RV64ZICOND-NEXT: addi sp, sp, 32
-; RV64ZICOND-NEXT: ret
-bb:
- %i = select i1 %c, i32 0, i32 %arg
- br label %bb2
-
-bb2: ; preds = %bb2, %bb
- %i3 = phi i32 [ %i, %bb ], [ %i5, %bb2 ]
- %i4 = tail call signext i32 @bar(i32 signext %i3)
- %i5 = shl i32 %i3, %arg1
- %i6 = icmp eq i32 %i4, 0
- br i1 %i6, label %bb7, label %bb2
-
-bb7: ; preds = %bb2
- ret void
-}
-
-define i32 @setune_32(float %a, float %b, i32 %rs1, i32 %rs2) {
-; RV64I-LABEL: setune_32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: feq.s a2, fa0, fa1
-; RV64I-NEXT: beqz a2, .LBB58_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: .LBB58_2:
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: setune_32:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: feq.s a2, fa0, fa1
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a1, a2
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a0, a2
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: setune_32:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: feq.s a2, fa0, fa1
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a0, a1, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: setune_32:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: feq.s a2, fa0, fa1
-; RV64ZICOND-NEXT: czero.eqz a1, a1, a2
-; RV64ZICOND-NEXT: czero.nez a0, a0, a2
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
- %rc = fcmp une float %a, %b
- %sel = select i1 %rc, i32 %rs1, i32 %rs2
- ret i32 %sel
-}
-
-define i64 @setune_64(float %a, float %b, i64 %rs1, i64 %rs2) {
-; RV64I-LABEL: setune_64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: feq.s a2, fa0, fa1
-; RV64I-NEXT: beqz a2, .LBB59_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: .LBB59_2:
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: setune_64:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: feq.s a2, fa0, fa1
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a1, a2
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a0, a2
-; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: setune_64:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: feq.s a2, fa0, fa1
-; RV64XTHEADCONDMOV-NEXT: th.mvnez a0, a1, a2
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: setune_64:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: feq.s a2, fa0, fa1
-; RV64ZICOND-NEXT: czero.eqz a1, a1, a2
-; RV64ZICOND-NEXT: czero.nez a0, a0, a2
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
- %rc = fcmp une float %a, %b
- %sel = select i1 %rc, i64 %rs1, i64 %rs2
- ret i64 %sel
-}
-
-; Test that we can ComputeNumSignBits across basic blocks when the live out is
-; RISCVISD::SELECT_CC. There should be no slli+srai or sext.h in the output.
-define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2, i16 signext %3) nounwind {
-; RV64I-LABEL: numsignbits:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a3
-; RV64I-NEXT: beqz a0, .LBB60_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv s0, a2
-; RV64I-NEXT: .LBB60_2:
-; RV64I-NEXT: beqz a1, .LBB60_4
-; RV64I-NEXT: # %bb.3:
-; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call bat
-; RV64I-NEXT: .LBB60_4:
-; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: numsignbits:
-; RV64XVENTANACONDOPS: # %bb.0:
-; RV64XVENTANACONDOPS-NEXT: addi sp, sp, -16
-; RV64XVENTANACONDOPS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64XVENTANACONDOPS-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a2, a2, a0
-; RV64XVENTANACONDOPS-NEXT: vt.maskcn s0, a3, a0
-; RV64XVENTANACONDOPS-NEXT: or s0, s0, a2
-; RV64XVENTANACONDOPS-NEXT: beqz a1, .LBB60_2
-; RV64XVENTANACONDOPS-NEXT: # %bb.1:
-; RV64XVENTANACONDOPS-NEXT: mv a0, s0
-; RV64XVENTANACONDOPS-NEXT: call bat
-; RV64XVENTANACONDOPS-NEXT: .LBB60_2:
-; RV64XVENTANACONDOPS-NEXT: mv a0, s0
-; RV64XVENTANACONDOPS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64XVENTANACONDOPS-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64XVENTANACONDOPS-NEXT: addi sp, sp, 16
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: numsignbits:
-; RV64XTHEADCONDMOV: # %bb.0:
-; RV64XTHEADCONDMOV-NEXT: addi sp, sp, -16
-; RV64XTHEADCONDMOV-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64XTHEADCONDMOV-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, a3, a0
-; RV64XTHEADCONDMOV-NEXT: sext.w s0, a2
-; RV64XTHEADCONDMOV-NEXT: beqz a1, .LBB60_2
-; RV64XTHEADCONDMOV-NEXT: # %bb.1:
-; RV64XTHEADCONDMOV-NEXT: mv a0, s0
-; RV64XTHEADCONDMOV-NEXT: call bat
-; RV64XTHEADCONDMOV-NEXT: .LBB60_2:
-; RV64XTHEADCONDMOV-NEXT: mv a0, s0
-; RV64XTHEADCONDMOV-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64XTHEADCONDMOV-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64XTHEADCONDMOV-NEXT: addi sp, sp, 16
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: numsignbits:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: addi sp, sp, -16
-; RV64ZICOND-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ZICOND-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64ZICOND-NEXT: czero.eqz a2, a2, a0
-; RV64ZICOND-NEXT: czero.nez s0, a3, a0
-; RV64ZICOND-NEXT: or s0, s0, a2
-; RV64ZICOND-NEXT: beqz a1, .LBB60_2
-; RV64ZICOND-NEXT: # %bb.1:
-; RV64ZICOND-NEXT: mv a0, s0
-; RV64ZICOND-NEXT: call bat
-; RV64ZICOND-NEXT: .LBB60_2:
-; RV64ZICOND-NEXT: mv a0, s0
-; RV64ZICOND-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64ZICOND-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64ZICOND-NEXT: addi sp, sp, 16
-; RV64ZICOND-NEXT: ret
- %5 = icmp eq i16 %0, 0
- %6 = select i1 %5, i16 %3, i16 %2
- %7 = icmp eq i16 %1, 0
- br i1 %7, label %9, label %8
-
-8: ; preds = %4
- tail call void @bat(i16 signext %6)
- br label %9
-
-9: ; preds = %8, %4
- ret i16 %6
-}
-
-declare void @bat(i16 signext)
-
-define i64 @single_bit(i64 %x) {
-; RV64I-LABEL: single_bit:
-; RV64I: # %bb.0: # %entry
-; RV64I-NEXT: slli a1, a0, 53
-; RV64I-NEXT: srai a1, a1, 63
-; RV64I-NEXT: and a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: single_bit:
-; RV64XVENTANACONDOPS: # %bb.0: # %entry
-; RV64XVENTANACONDOPS-NEXT: andi a1, a0, 1024
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: single_bit:
-; RV64XTHEADCONDMOV: # %bb.0: # %entry
-; RV64XTHEADCONDMOV-NEXT: slli a1, a0, 53
-; RV64XTHEADCONDMOV-NEXT: srai a1, a1, 63
-; RV64XTHEADCONDMOV-NEXT: and a0, a1, a0
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: single_bit:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: andi a1, a0, 1024
-; RV64ZICOND-NEXT: czero.eqz a0, a0, a1
-; RV64ZICOND-NEXT: ret
-entry:
- %and = and i64 %x, 1024
- %tobool.not = icmp eq i64 %and, 0
- %cond = select i1 %tobool.not, i64 0, i64 %x
- ret i64 %cond
-}
-
-; Test to fold select with single bit check to (and (sra (shl x))).
-define i64 @single_bit2(i64 %x) {
-; RV64I-LABEL: single_bit2:
-; RV64I: # %bb.0: # %entry
-; RV64I-NEXT: slli a1, a0, 52
-; RV64I-NEXT: srai a1, a1, 63
-; RV64I-NEXT: and a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64XVENTANACONDOPS-LABEL: single_bit2:
-; RV64XVENTANACONDOPS: # %bb.0: # %entry
-; RV64XVENTANACONDOPS-NEXT: bexti a1, a0, 11
-; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT: ret
-;
-; RV64XTHEADCONDMOV-LABEL: single_bit2:
-; RV64XTHEADCONDMOV: # %bb.0: # %entry
-; RV64XTHEADCONDMOV-NEXT: slli a1, a0, 52
-; RV64XTHEADCONDMOV-NEXT: srai a1, a1, 63
-; RV64XTHEADCONDMOV-NEXT: and a0, a1, a0
-; RV64XTHEADCONDMOV-NEXT: ret
-;
-; RV64ZICOND-LABEL: single_bit2:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: bexti a1, a0, 11
-; RV64ZICOND-NEXT: czero.eqz a0, a0, a1
-; RV64ZICOND-NEXT: ret
-entry:
- %and = and i64 %x, 2048
- %tobool.not = icmp eq i64 %and, 0
- %cond = select i1 %tobool.not, i64 0, i64 %x
- ret i64 %cond
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/div.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/div.ll
deleted file mode 100644
index 17d9e9c..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/div.ll
+++ /dev/null
@@ -1,696 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck -check-prefix=RV64I %s
-; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck -check-prefix=RV64IM %s
-
-define i32 @udiv(i32 %a, i32 %b) nounwind {
-; RV64I-LABEL: udiv:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: srli a1, a1, 32
-; RV64I-NEXT: call __udivdi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: udiv:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: divuw a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = udiv i32 %a, %b
- ret i32 %1
-}
-
-define i32 @udiv_constant(i32 %a) nounwind {
-; RV64I-LABEL: udiv_constant:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __udivdi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: udiv_constant:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: slli a0, a0, 32
-; RV64IM-NEXT: lui a1, 838861
-; RV64IM-NEXT: addi a1, a1, -819
-; RV64IM-NEXT: slli a1, a1, 32
-; RV64IM-NEXT: mulhu a0, a0, a1
-; RV64IM-NEXT: srli a0, a0, 34
-; RV64IM-NEXT: ret
- %1 = udiv i32 %a, 5
- ret i32 %1
-}
-
-define i32 @udiv_pow2(i32 %a) nounwind {
-; RV64I-LABEL: udiv_pow2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a0, a0, 3
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: udiv_pow2:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: srliw a0, a0, 3
-; RV64IM-NEXT: ret
- %1 = udiv i32 %a, 8
- ret i32 %1
-}
-
-define i32 @udiv_constant_lhs(i32 %a) nounwind {
-; RV64I-LABEL: udiv_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a1, a0, 32
-; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: call __udivdi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: udiv_constant_lhs:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 10
-; RV64IM-NEXT: divuw a0, a1, a0
-; RV64IM-NEXT: ret
- %1 = udiv i32 10, %a
- ret i32 %1
-}
-
-define i64 @udiv64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: udiv64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: tail __udivdi3
-;
-; RV64IM-LABEL: udiv64:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: divu a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = udiv i64 %a, %b
- ret i64 %1
-}
-
-define i64 @udiv64_constant(i64 %a) nounwind {
-; RV64I-LABEL: udiv64_constant:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: tail __udivdi3
-;
-; RV64IM-LABEL: udiv64_constant:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: lui a1, 838861
-; RV64IM-NEXT: addiw a1, a1, -819
-; RV64IM-NEXT: slli a2, a1, 32
-; RV64IM-NEXT: add a1, a1, a2
-; RV64IM-NEXT: mulhu a0, a0, a1
-; RV64IM-NEXT: srli a0, a0, 2
-; RV64IM-NEXT: ret
- %1 = udiv i64 %a, 5
- ret i64 %1
-}
-
-define i64 @udiv64_constant_lhs(i64 %a) nounwind {
-; RV64I-LABEL: udiv64_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: mv a1, a0
-; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: tail __udivdi3
-;
-; RV64IM-LABEL: udiv64_constant_lhs:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 10
-; RV64IM-NEXT: divu a0, a1, a0
-; RV64IM-NEXT: ret
- %1 = udiv i64 10, %a
- ret i64 %1
-}
-
-define i8 @udiv8(i8 %a, i8 %b) nounwind {
-; RV64I-LABEL: udiv8:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: andi a0, a0, 255
-; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: call __udivdi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: udiv8:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: andi a1, a1, 255
-; RV64IM-NEXT: andi a0, a0, 255
-; RV64IM-NEXT: divuw a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = udiv i8 %a, %b
- ret i8 %1
-}
-
-define i8 @udiv8_constant(i8 %a) nounwind {
-; RV64I-LABEL: udiv8_constant:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: andi a0, a0, 255
-; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __udivdi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: udiv8_constant:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: andi a0, a0, 255
-; RV64IM-NEXT: li a1, 205
-; RV64IM-NEXT: mul a0, a0, a1
-; RV64IM-NEXT: srliw a0, a0, 10
-; RV64IM-NEXT: ret
- %1 = udiv i8 %a, 5
- ret i8 %1
-}
-
-define i8 @udiv8_pow2(i8 %a) nounwind {
-; RV64I-LABEL: udiv8_pow2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 56
-; RV64I-NEXT: srli a0, a0, 59
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: udiv8_pow2:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: slli a0, a0, 56
-; RV64IM-NEXT: srli a0, a0, 59
-; RV64IM-NEXT: ret
- %1 = udiv i8 %a, 8
- ret i8 %1
-}
-
-define i8 @udiv8_constant_lhs(i8 %a) nounwind {
-; RV64I-LABEL: udiv8_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: andi a1, a0, 255
-; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: call __udivdi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: udiv8_constant_lhs:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: andi a0, a0, 255
-; RV64IM-NEXT: li a1, 10
-; RV64IM-NEXT: divuw a0, a1, a0
-; RV64IM-NEXT: ret
- %1 = udiv i8 10, %a
- ret i8 %1
-}
-
-define i16 @udiv16(i16 %a, i16 %b) nounwind {
-; RV64I-LABEL: udiv16:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lui a2, 16
-; RV64I-NEXT: addiw a2, a2, -1
-; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: call __udivdi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: udiv16:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: lui a2, 16
-; RV64IM-NEXT: addi a2, a2, -1
-; RV64IM-NEXT: and a1, a1, a2
-; RV64IM-NEXT: and a0, a0, a2
-; RV64IM-NEXT: divuw a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = udiv i16 %a, %b
- ret i16 %1
-}
-
-define i16 @udiv16_constant(i16 %a) nounwind {
-; RV64I-LABEL: udiv16_constant:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: slli a0, a0, 48
-; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __udivdi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: udiv16_constant:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: lui a1, 52429
-; RV64IM-NEXT: slli a1, a1, 4
-; RV64IM-NEXT: slli a0, a0, 48
-; RV64IM-NEXT: mulhu a0, a0, a1
-; RV64IM-NEXT: srliw a0, a0, 18
-; RV64IM-NEXT: ret
- %1 = udiv i16 %a, 5
- ret i16 %1
-}
-
-define i16 @udiv16_pow2(i16 %a) nounwind {
-; RV64I-LABEL: udiv16_pow2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 48
-; RV64I-NEXT: srli a0, a0, 51
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: udiv16_pow2:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: slli a0, a0, 48
-; RV64IM-NEXT: srli a0, a0, 51
-; RV64IM-NEXT: ret
- %1 = udiv i16 %a, 8
- ret i16 %1
-}
-
-define i16 @udiv16_constant_lhs(i16 %a) nounwind {
-; RV64I-LABEL: udiv16_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: slli a0, a0, 48
-; RV64I-NEXT: srli a1, a0, 48
-; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: call __udivdi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: udiv16_constant_lhs:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: slli a0, a0, 48
-; RV64IM-NEXT: srli a0, a0, 48
-; RV64IM-NEXT: li a1, 10
-; RV64IM-NEXT: divuw a0, a1, a0
-; RV64IM-NEXT: ret
- %1 = udiv i16 10, %a
- ret i16 %1
-}
-
-define i32 @sdiv(i32 %a, i32 %b) nounwind {
-; RV64I-LABEL: sdiv:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: sext.w a1, a1
-; RV64I-NEXT: call __divdi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: sdiv:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: divw a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = sdiv i32 %a, %b
- ret i32 %1
-}
-
-define i32 @sdiv_constant(i32 %a) nounwind {
-; RV64I-LABEL: sdiv_constant:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __divdi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: sdiv_constant:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: sext.w a0, a0
-; RV64IM-NEXT: lui a1, 419430
-; RV64IM-NEXT: addiw a1, a1, 1639
-; RV64IM-NEXT: mul a0, a0, a1
-; RV64IM-NEXT: srli a1, a0, 63
-; RV64IM-NEXT: srai a0, a0, 33
-; RV64IM-NEXT: addw a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = sdiv i32 %a, 5
- ret i32 %1
-}
-
-define i32 @sdiv_pow2(i32 %a) nounwind {
-; RV64I-LABEL: sdiv_pow2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sraiw a1, a0, 31
-; RV64I-NEXT: srliw a1, a1, 29
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: sraiw a0, a0, 3
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: sdiv_pow2:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: sraiw a1, a0, 31
-; RV64IM-NEXT: srliw a1, a1, 29
-; RV64IM-NEXT: add a0, a0, a1
-; RV64IM-NEXT: sraiw a0, a0, 3
-; RV64IM-NEXT: ret
- %1 = sdiv i32 %a, 8
- ret i32 %1
-}
-
-define i32 @sdiv_pow2_2(i32 %a) nounwind {
-; RV64I-LABEL: sdiv_pow2_2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sraiw a1, a0, 31
-; RV64I-NEXT: srliw a1, a1, 16
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: sraiw a0, a0, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: sdiv_pow2_2:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: sraiw a1, a0, 31
-; RV64IM-NEXT: srliw a1, a1, 16
-; RV64IM-NEXT: add a0, a0, a1
-; RV64IM-NEXT: sraiw a0, a0, 16
-; RV64IM-NEXT: ret
- %1 = sdiv i32 %a, 65536
- ret i32 %1
-}
-
-define i32 @sdiv_constant_lhs(i32 %a) nounwind {
-; RV64I-LABEL: sdiv_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sext.w a1, a0
-; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: call __divdi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: sdiv_constant_lhs:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, -10
-; RV64IM-NEXT: divw a0, a1, a0
-; RV64IM-NEXT: ret
- %1 = sdiv i32 -10, %a
- ret i32 %1
-}
-
-define i64 @sdiv64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: sdiv64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: tail __divdi3
-;
-; RV64IM-LABEL: sdiv64:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: div a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = sdiv i64 %a, %b
- ret i64 %1
-}
-
-define i64 @sdiv64_constant(i64 %a) nounwind {
-; RV64I-LABEL: sdiv64_constant:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: tail __divdi3
-;
-; RV64IM-LABEL: sdiv64_constant:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: lui a1, %hi(.LCPI21_0)
-; RV64IM-NEXT: ld a1, %lo(.LCPI21_0)(a1)
-; RV64IM-NEXT: mulh a0, a0, a1
-; RV64IM-NEXT: srli a1, a0, 63
-; RV64IM-NEXT: srai a0, a0, 1
-; RV64IM-NEXT: add a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = sdiv i64 %a, 5
- ret i64 %1
-}
-
-define i64 @sdiv64_constant_lhs(i64 %a) nounwind {
-; RV64I-LABEL: sdiv64_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: mv a1, a0
-; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: tail __divdi3
-;
-; RV64IM-LABEL: sdiv64_constant_lhs:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 10
-; RV64IM-NEXT: div a0, a1, a0
-; RV64IM-NEXT: ret
- %1 = sdiv i64 10, %a
- ret i64 %1
-}
-
-; Although this sdiv has two sexti32 operands, it shouldn't compile to divw on
-; RV64M as that wouldn't produce the correct result for e.g. INT_MIN/-1.
-
-define i64 @sdiv64_sext_operands(i32 %a, i32 %b) nounwind {
-; RV64I-LABEL: sdiv64_sext_operands:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: sext.w a1, a1
-; RV64I-NEXT: tail __divdi3
-;
-; RV64IM-LABEL: sdiv64_sext_operands:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: sext.w a0, a0
-; RV64IM-NEXT: sext.w a1, a1
-; RV64IM-NEXT: div a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = sext i32 %a to i64
- %2 = sext i32 %b to i64
- %3 = sdiv i64 %1, %2
- ret i64 %3
-}
-
-define i8 @sdiv8(i8 %a, i8 %b) nounwind {
-; RV64I-LABEL: sdiv8:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: slli a1, a1, 24
-; RV64I-NEXT: sraiw a1, a1, 24
-; RV64I-NEXT: slli a0, a0, 24
-; RV64I-NEXT: sraiw a0, a0, 24
-; RV64I-NEXT: call __divdi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: sdiv8:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: slli a1, a1, 24
-; RV64IM-NEXT: sraiw a1, a1, 24
-; RV64IM-NEXT: slli a0, a0, 24
-; RV64IM-NEXT: sraiw a0, a0, 24
-; RV64IM-NEXT: divw a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = sdiv i8 %a, %b
- ret i8 %1
-}
-
-define i8 @sdiv8_constant(i8 %a) nounwind {
-; RV64I-LABEL: sdiv8_constant:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: slli a0, a0, 24
-; RV64I-NEXT: sraiw a0, a0, 24
-; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __divdi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: sdiv8_constant:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: slli a0, a0, 24
-; RV64IM-NEXT: sraiw a0, a0, 24
-; RV64IM-NEXT: li a1, 103
-; RV64IM-NEXT: mul a0, a0, a1
-; RV64IM-NEXT: sraiw a1, a0, 9
-; RV64IM-NEXT: slli a0, a0, 48
-; RV64IM-NEXT: srli a0, a0, 63
-; RV64IM-NEXT: addw a0, a1, a0
-; RV64IM-NEXT: ret
- %1 = sdiv i8 %a, 5
- ret i8 %1
-}
-
-define i8 @sdiv8_pow2(i8 %a) nounwind {
-; RV64I-LABEL: sdiv8_pow2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a1, a0, 24
-; RV64I-NEXT: sraiw a1, a1, 24
-; RV64I-NEXT: slli a1, a1, 49
-; RV64I-NEXT: srli a1, a1, 61
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: slli a0, a0, 24
-; RV64I-NEXT: sraiw a0, a0, 27
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: sdiv8_pow2:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: slli a1, a0, 24
-; RV64IM-NEXT: sraiw a1, a1, 24
-; RV64IM-NEXT: slli a1, a1, 49
-; RV64IM-NEXT: srli a1, a1, 61
-; RV64IM-NEXT: add a0, a0, a1
-; RV64IM-NEXT: slli a0, a0, 24
-; RV64IM-NEXT: sraiw a0, a0, 27
-; RV64IM-NEXT: ret
- %1 = sdiv i8 %a, 8
- ret i8 %1
-}
-
-define i8 @sdiv8_constant_lhs(i8 %a) nounwind {
-; RV64I-LABEL: sdiv8_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: slli a0, a0, 24
-; RV64I-NEXT: sraiw a1, a0, 24
-; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: call __divdi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: sdiv8_constant_lhs:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: slli a0, a0, 24
-; RV64IM-NEXT: sraiw a0, a0, 24
-; RV64IM-NEXT: li a1, -10
-; RV64IM-NEXT: divw a0, a1, a0
-; RV64IM-NEXT: ret
- %1 = sdiv i8 -10, %a
- ret i8 %1
-}
-
-define i16 @sdiv16(i16 %a, i16 %b) nounwind {
-; RV64I-LABEL: sdiv16:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: slli a1, a1, 16
-; RV64I-NEXT: sraiw a1, a1, 16
-; RV64I-NEXT: slli a0, a0, 16
-; RV64I-NEXT: sraiw a0, a0, 16
-; RV64I-NEXT: call __divdi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: sdiv16:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: slli a1, a1, 16
-; RV64IM-NEXT: sraiw a1, a1, 16
-; RV64IM-NEXT: slli a0, a0, 16
-; RV64IM-NEXT: sraiw a0, a0, 16
-; RV64IM-NEXT: divw a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = sdiv i16 %a, %b
- ret i16 %1
-}
-
-define i16 @sdiv16_constant(i16 %a) nounwind {
-; RV64I-LABEL: sdiv16_constant:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: slli a0, a0, 16
-; RV64I-NEXT: sraiw a0, a0, 16
-; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __divdi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: sdiv16_constant:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: slli a0, a0, 16
-; RV64IM-NEXT: sraiw a0, a0, 16
-; RV64IM-NEXT: lui a1, 6
-; RV64IM-NEXT: addi a1, a1, 1639
-; RV64IM-NEXT: mul a0, a0, a1
-; RV64IM-NEXT: srliw a1, a0, 31
-; RV64IM-NEXT: sraiw a0, a0, 17
-; RV64IM-NEXT: addw a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = sdiv i16 %a, 5
- ret i16 %1
-}
-
-define i16 @sdiv16_pow2(i16 %a) nounwind {
-; RV64I-LABEL: sdiv16_pow2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a1, a0, 16
-; RV64I-NEXT: sraiw a1, a1, 16
-; RV64I-NEXT: slli a1, a1, 33
-; RV64I-NEXT: srli a1, a1, 61
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: slli a0, a0, 16
-; RV64I-NEXT: sraiw a0, a0, 19
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: sdiv16_pow2:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: slli a1, a0, 16
-; RV64IM-NEXT: sraiw a1, a1, 16
-; RV64IM-NEXT: slli a1, a1, 33
-; RV64IM-NEXT: srli a1, a1, 61
-; RV64IM-NEXT: add a0, a0, a1
-; RV64IM-NEXT: slli a0, a0, 16
-; RV64IM-NEXT: sraiw a0, a0, 19
-; RV64IM-NEXT: ret
- %1 = sdiv i16 %a, 8
- ret i16 %1
-}
-
-define i16 @sdiv16_constant_lhs(i16 %a) nounwind {
-; RV64I-LABEL: sdiv16_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: slli a0, a0, 16
-; RV64I-NEXT: sraiw a1, a0, 16
-; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: call __divdi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: sdiv16_constant_lhs:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: slli a0, a0, 16
-; RV64IM-NEXT: sraiw a0, a0, 16
-; RV64IM-NEXT: li a1, -10
-; RV64IM-NEXT: divw a0, a1, a0
-; RV64IM-NEXT: ret
- %1 = sdiv i16 -10, %a
- ret i16 %1
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/imm.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/imm.ll
deleted file mode 100644
index 5b8f7fe..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/imm.ll
+++ /dev/null
@@ -1,2741 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -riscv-disable-using-constant-pool-for-large-ints -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefixes=RV64I,RV64-NOPOOL
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefixes=RV64I,RV64I-POOL
-; RUN: llc -mtriple=riscv64 -riscv-disable-using-constant-pool-for-large-ints -mattr=+zba \
-; RUN: -riscv-experimental-rv64-legal-i32 -verify-machineinstrs < %s | FileCheck %s -check-prefix=RV64IZBA
-; RUN: llc -mtriple=riscv64 -riscv-disable-using-constant-pool-for-large-ints -mattr=+zbb \
-; RUN: -riscv-experimental-rv64-legal-i32 -verify-machineinstrs < %s | FileCheck %s -check-prefix=RV64IZBB
-; RUN: llc -mtriple=riscv64 -riscv-disable-using-constant-pool-for-large-ints -mattr=+zbs \
-; RUN: -riscv-experimental-rv64-legal-i32 -verify-machineinstrs < %s | FileCheck %s -check-prefix=RV64IZBS
-; RUN: llc -mtriple=riscv64 -riscv-disable-using-constant-pool-for-large-ints -mattr=+xtheadbb \
-; RUN: -riscv-experimental-rv64-legal-i32 -verify-machineinstrs < %s | FileCheck %s -check-prefix=RV64IXTHEADBB
-
-; Materializing constants
-
-; TODO: It would be preferable if anyext constant returns were sign rather
-; than zero extended. See PR39092. For now, mark returns as explicitly signext
-; (this matches what Clang would generate for equivalent C/C++ anyway).
-
-define signext i32 @zero() nounwind {
-; RV64I-LABEL: zero:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a0, 0
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: zero:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: li a0, 0
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: zero:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: li a0, 0
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: zero:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: li a0, 0
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: zero:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: li a0, 0
-; RV64IXTHEADBB-NEXT: ret
- ret i32 0
-}
-
-define signext i32 @pos_small() nounwind {
-; RV64I-LABEL: pos_small:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a0, 2047
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: pos_small:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: li a0, 2047
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: pos_small:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: li a0, 2047
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: pos_small:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: li a0, 2047
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: pos_small:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: li a0, 2047
-; RV64IXTHEADBB-NEXT: ret
- ret i32 2047
-}
-
-define signext i32 @neg_small() nounwind {
-; RV64I-LABEL: neg_small:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a0, -2048
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: neg_small:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: li a0, -2048
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: neg_small:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: li a0, -2048
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: neg_small:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: li a0, -2048
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: neg_small:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: li a0, -2048
-; RV64IXTHEADBB-NEXT: ret
- ret i32 -2048
-}
-
-define signext i32 @pos_i32() nounwind {
-; RV64I-LABEL: pos_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 423811
-; RV64I-NEXT: addiw a0, a0, -1297
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: pos_i32:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 423811
-; RV64IZBA-NEXT: addiw a0, a0, -1297
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: pos_i32:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 423811
-; RV64IZBB-NEXT: addiw a0, a0, -1297
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: pos_i32:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 423811
-; RV64IZBS-NEXT: addiw a0, a0, -1297
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: pos_i32:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 423811
-; RV64IXTHEADBB-NEXT: addiw a0, a0, -1297
-; RV64IXTHEADBB-NEXT: ret
- ret i32 1735928559
-}
-
-define signext i32 @neg_i32() nounwind {
-; RV64I-LABEL: neg_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 912092
-; RV64I-NEXT: addiw a0, a0, -273
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: neg_i32:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 912092
-; RV64IZBA-NEXT: addiw a0, a0, -273
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: neg_i32:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 912092
-; RV64IZBB-NEXT: addiw a0, a0, -273
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: neg_i32:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 912092
-; RV64IZBS-NEXT: addiw a0, a0, -273
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: neg_i32:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 912092
-; RV64IXTHEADBB-NEXT: addiw a0, a0, -273
-; RV64IXTHEADBB-NEXT: ret
- ret i32 -559038737
-}
-
-define signext i32 @pos_i32_hi20_only() nounwind {
-; RV64I-LABEL: pos_i32_hi20_only:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 16
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: pos_i32_hi20_only:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 16
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: pos_i32_hi20_only:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 16
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: pos_i32_hi20_only:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 16
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: pos_i32_hi20_only:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 16
-; RV64IXTHEADBB-NEXT: ret
- ret i32 65536 ; 0x10000
-}
-
-define signext i32 @neg_i32_hi20_only() nounwind {
-; RV64I-LABEL: neg_i32_hi20_only:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 1048560
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: neg_i32_hi20_only:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 1048560
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: neg_i32_hi20_only:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 1048560
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: neg_i32_hi20_only:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 1048560
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: neg_i32_hi20_only:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 1048560
-; RV64IXTHEADBB-NEXT: ret
- ret i32 -65536 ; -0x10000
-}
-
-; This can be materialized with ADDI+SLLI, improving compressibility.
-
-define signext i32 @imm_left_shifted_addi() nounwind {
-; RV64I-LABEL: imm_left_shifted_addi:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 32
-; RV64I-NEXT: addiw a0, a0, -64
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_left_shifted_addi:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 32
-; RV64IZBA-NEXT: addiw a0, a0, -64
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_left_shifted_addi:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 32
-; RV64IZBB-NEXT: addiw a0, a0, -64
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_left_shifted_addi:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 32
-; RV64IZBS-NEXT: addiw a0, a0, -64
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_left_shifted_addi:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 32
-; RV64IXTHEADBB-NEXT: addiw a0, a0, -64
-; RV64IXTHEADBB-NEXT: ret
- ret i32 131008 ; 0x1FFC0
-}
-
-; This can be materialized with ADDI+SRLI, improving compressibility.
-
-define signext i32 @imm_right_shifted_addi() nounwind {
-; RV64I-LABEL: imm_right_shifted_addi:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 524288
-; RV64I-NEXT: addiw a0, a0, -1
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_right_shifted_addi:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 524288
-; RV64IZBA-NEXT: addiw a0, a0, -1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_right_shifted_addi:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 524288
-; RV64IZBB-NEXT: addiw a0, a0, -1
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_right_shifted_addi:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 524288
-; RV64IZBS-NEXT: addiw a0, a0, -1
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_right_shifted_addi:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 524288
-; RV64IXTHEADBB-NEXT: addiw a0, a0, -1
-; RV64IXTHEADBB-NEXT: ret
- ret i32 2147483647 ; 0x7FFFFFFF
-}
-
-; This can be materialized with LUI+SRLI, improving compressibility.
-
-define signext i32 @imm_right_shifted_lui() nounwind {
-; RV64I-LABEL: imm_right_shifted_lui:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 56
-; RV64I-NEXT: addiw a0, a0, 580
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_right_shifted_lui:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 56
-; RV64IZBA-NEXT: addiw a0, a0, 580
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_right_shifted_lui:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 56
-; RV64IZBB-NEXT: addiw a0, a0, 580
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_right_shifted_lui:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 56
-; RV64IZBS-NEXT: addiw a0, a0, 580
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_right_shifted_lui:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 56
-; RV64IXTHEADBB-NEXT: addiw a0, a0, 580
-; RV64IXTHEADBB-NEXT: ret
- ret i32 229956 ; 0x38244
-}
-
-define i64 @imm64_1() nounwind {
-; RV64I-LABEL: imm64_1:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a0, 1
-; RV64I-NEXT: slli a0, a0, 31
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm64_1:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: li a0, 1
-; RV64IZBA-NEXT: slli a0, a0, 31
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm64_1:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: li a0, 1
-; RV64IZBB-NEXT: slli a0, a0, 31
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm64_1:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: bseti a0, zero, 31
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm64_1:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: li a0, 1
-; RV64IXTHEADBB-NEXT: slli a0, a0, 31
-; RV64IXTHEADBB-NEXT: ret
- ret i64 2147483648 ; 0x8000_0000
-}
-
-define i64 @imm64_2() nounwind {
-; RV64I-LABEL: imm64_2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a0, -1
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm64_2:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: li a0, -1
-; RV64IZBA-NEXT: srli a0, a0, 32
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm64_2:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: li a0, -1
-; RV64IZBB-NEXT: srli a0, a0, 32
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm64_2:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: li a0, -1
-; RV64IZBS-NEXT: srli a0, a0, 32
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm64_2:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: li a0, -1
-; RV64IXTHEADBB-NEXT: srli a0, a0, 32
-; RV64IXTHEADBB-NEXT: ret
- ret i64 4294967295 ; 0xFFFF_FFFF
-}
-
-define i64 @imm64_3() nounwind {
-; RV64I-LABEL: imm64_3:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a0, 1
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm64_3:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: li a0, 1
-; RV64IZBA-NEXT: slli a0, a0, 32
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm64_3:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: li a0, 1
-; RV64IZBB-NEXT: slli a0, a0, 32
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm64_3:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: bseti a0, zero, 32
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm64_3:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: li a0, 1
-; RV64IXTHEADBB-NEXT: slli a0, a0, 32
-; RV64IXTHEADBB-NEXT: ret
- ret i64 4294967296 ; 0x1_0000_0000
-}
-
-define i64 @imm64_4() nounwind {
-; RV64I-LABEL: imm64_4:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a0, -1
-; RV64I-NEXT: slli a0, a0, 63
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm64_4:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: li a0, -1
-; RV64IZBA-NEXT: slli a0, a0, 63
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm64_4:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: li a0, -1
-; RV64IZBB-NEXT: slli a0, a0, 63
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm64_4:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: bseti a0, zero, 63
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm64_4:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: li a0, -1
-; RV64IXTHEADBB-NEXT: slli a0, a0, 63
-; RV64IXTHEADBB-NEXT: ret
- ret i64 9223372036854775808 ; 0x8000_0000_0000_0000
-}
-
-define i64 @imm64_5() nounwind {
-; RV64I-LABEL: imm64_5:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a0, -1
-; RV64I-NEXT: slli a0, a0, 63
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm64_5:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: li a0, -1
-; RV64IZBA-NEXT: slli a0, a0, 63
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm64_5:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: li a0, -1
-; RV64IZBB-NEXT: slli a0, a0, 63
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm64_5:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: bseti a0, zero, 63
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm64_5:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: li a0, -1
-; RV64IXTHEADBB-NEXT: slli a0, a0, 63
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -9223372036854775808 ; 0x8000_0000_0000_0000
-}
-
-define i64 @imm64_6() nounwind {
-; RV64I-LABEL: imm64_6:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 9321
-; RV64I-NEXT: addi a0, a0, -1329
-; RV64I-NEXT: slli a0, a0, 35
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm64_6:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 9321
-; RV64IZBA-NEXT: addi a0, a0, -1329
-; RV64IZBA-NEXT: slli a0, a0, 35
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm64_6:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 9321
-; RV64IZBB-NEXT: addi a0, a0, -1329
-; RV64IZBB-NEXT: slli a0, a0, 35
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm64_6:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 9321
-; RV64IZBS-NEXT: addi a0, a0, -1329
-; RV64IZBS-NEXT: slli a0, a0, 35
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm64_6:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 9321
-; RV64IXTHEADBB-NEXT: addi a0, a0, -1329
-; RV64IXTHEADBB-NEXT: slli a0, a0, 35
-; RV64IXTHEADBB-NEXT: ret
- ret i64 1311768464867721216 ; 0x1234_5678_0000_0000
-}
-
-define i64 @imm64_7() nounwind {
-; RV64I-LABEL: imm64_7:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a0, 7
-; RV64I-NEXT: slli a0, a0, 36
-; RV64I-NEXT: addi a0, a0, 11
-; RV64I-NEXT: slli a0, a0, 24
-; RV64I-NEXT: addi a0, a0, 15
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm64_7:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: li a0, 7
-; RV64IZBA-NEXT: slli a0, a0, 36
-; RV64IZBA-NEXT: addi a0, a0, 11
-; RV64IZBA-NEXT: slli a0, a0, 24
-; RV64IZBA-NEXT: addi a0, a0, 15
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm64_7:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: li a0, 7
-; RV64IZBB-NEXT: slli a0, a0, 36
-; RV64IZBB-NEXT: addi a0, a0, 11
-; RV64IZBB-NEXT: slli a0, a0, 24
-; RV64IZBB-NEXT: addi a0, a0, 15
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm64_7:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: li a0, 7
-; RV64IZBS-NEXT: slli a0, a0, 36
-; RV64IZBS-NEXT: addi a0, a0, 11
-; RV64IZBS-NEXT: slli a0, a0, 24
-; RV64IZBS-NEXT: addi a0, a0, 15
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm64_7:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: li a0, 7
-; RV64IXTHEADBB-NEXT: slli a0, a0, 36
-; RV64IXTHEADBB-NEXT: addi a0, a0, 11
-; RV64IXTHEADBB-NEXT: slli a0, a0, 24
-; RV64IXTHEADBB-NEXT: addi a0, a0, 15
-; RV64IXTHEADBB-NEXT: ret
- ret i64 8070450532432478223 ; 0x7000_0000_0B00_000F
-}
-
-; TODO: it can be preferable to put constants that are expensive to materialise
-; into the constant pool, especially for -Os.
-define i64 @imm64_8() nounwind {
-; RV64-NOPOOL-LABEL: imm64_8:
-; RV64-NOPOOL: # %bb.0:
-; RV64-NOPOOL-NEXT: lui a0, 583
-; RV64-NOPOOL-NEXT: addi a0, a0, -1875
-; RV64-NOPOOL-NEXT: slli a0, a0, 14
-; RV64-NOPOOL-NEXT: addi a0, a0, -947
-; RV64-NOPOOL-NEXT: slli a0, a0, 12
-; RV64-NOPOOL-NEXT: addi a0, a0, 1511
-; RV64-NOPOOL-NEXT: slli a0, a0, 13
-; RV64-NOPOOL-NEXT: addi a0, a0, -272
-; RV64-NOPOOL-NEXT: ret
-;
-; RV64I-POOL-LABEL: imm64_8:
-; RV64I-POOL: # %bb.0:
-; RV64I-POOL-NEXT: lui a0, %hi(.LCPI17_0)
-; RV64I-POOL-NEXT: ld a0, %lo(.LCPI17_0)(a0)
-; RV64I-POOL-NEXT: ret
-;
-; RV64IZBA-LABEL: imm64_8:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 596523
-; RV64IZBA-NEXT: addi a0, a0, 965
-; RV64IZBA-NEXT: slli.uw a0, a0, 13
-; RV64IZBA-NEXT: addi a0, a0, -1347
-; RV64IZBA-NEXT: slli a0, a0, 12
-; RV64IZBA-NEXT: addi a0, a0, -529
-; RV64IZBA-NEXT: slli a0, a0, 4
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm64_8:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 583
-; RV64IZBB-NEXT: addi a0, a0, -1875
-; RV64IZBB-NEXT: slli a0, a0, 14
-; RV64IZBB-NEXT: addi a0, a0, -947
-; RV64IZBB-NEXT: slli a0, a0, 12
-; RV64IZBB-NEXT: addi a0, a0, 1511
-; RV64IZBB-NEXT: slli a0, a0, 13
-; RV64IZBB-NEXT: addi a0, a0, -272
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm64_8:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 583
-; RV64IZBS-NEXT: addi a0, a0, -1875
-; RV64IZBS-NEXT: slli a0, a0, 14
-; RV64IZBS-NEXT: addi a0, a0, -947
-; RV64IZBS-NEXT: slli a0, a0, 12
-; RV64IZBS-NEXT: addi a0, a0, 1511
-; RV64IZBS-NEXT: slli a0, a0, 13
-; RV64IZBS-NEXT: addi a0, a0, -272
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm64_8:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 583
-; RV64IXTHEADBB-NEXT: addi a0, a0, -1875
-; RV64IXTHEADBB-NEXT: slli a0, a0, 14
-; RV64IXTHEADBB-NEXT: addi a0, a0, -947
-; RV64IXTHEADBB-NEXT: slli a0, a0, 12
-; RV64IXTHEADBB-NEXT: addi a0, a0, 1511
-; RV64IXTHEADBB-NEXT: slli a0, a0, 13
-; RV64IXTHEADBB-NEXT: addi a0, a0, -272
-; RV64IXTHEADBB-NEXT: ret
- ret i64 1311768467463790320 ; 0x1234_5678_9ABC_DEF0
-}
-
-define i64 @imm64_9() nounwind {
-; RV64I-LABEL: imm64_9:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a0, -1
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm64_9:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: li a0, -1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm64_9:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: li a0, -1
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm64_9:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: li a0, -1
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm64_9:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: li a0, -1
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -1
-}
-
-; Various cases where extraneous ADDIs can be inserted where a (left shifted)
-; LUI suffices.
-
-define i64 @imm_left_shifted_lui_1() nounwind {
-; RV64I-LABEL: imm_left_shifted_lui_1:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 262145
-; RV64I-NEXT: slli a0, a0, 1
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_left_shifted_lui_1:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 262145
-; RV64IZBA-NEXT: slli a0, a0, 1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_left_shifted_lui_1:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 262145
-; RV64IZBB-NEXT: slli a0, a0, 1
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_left_shifted_lui_1:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 262145
-; RV64IZBS-NEXT: slli a0, a0, 1
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_left_shifted_lui_1:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 262145
-; RV64IXTHEADBB-NEXT: slli a0, a0, 1
-; RV64IXTHEADBB-NEXT: ret
- ret i64 2147491840 ; 0x8000_2000
-}
-
-define i64 @imm_left_shifted_lui_2() nounwind {
-; RV64I-LABEL: imm_left_shifted_lui_2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 262145
-; RV64I-NEXT: slli a0, a0, 2
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_left_shifted_lui_2:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 262145
-; RV64IZBA-NEXT: slli a0, a0, 2
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_left_shifted_lui_2:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 262145
-; RV64IZBB-NEXT: slli a0, a0, 2
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_left_shifted_lui_2:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 262145
-; RV64IZBS-NEXT: slli a0, a0, 2
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_left_shifted_lui_2:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 262145
-; RV64IXTHEADBB-NEXT: slli a0, a0, 2
-; RV64IXTHEADBB-NEXT: ret
- ret i64 4294983680 ; 0x1_0000_4000
-}
-
-define i64 @imm_left_shifted_lui_3() nounwind {
-; RV64I-LABEL: imm_left_shifted_lui_3:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 4097
-; RV64I-NEXT: slli a0, a0, 20
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_left_shifted_lui_3:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 4097
-; RV64IZBA-NEXT: slli a0, a0, 20
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_left_shifted_lui_3:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 4097
-; RV64IZBB-NEXT: slli a0, a0, 20
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_left_shifted_lui_3:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 4097
-; RV64IZBS-NEXT: slli a0, a0, 20
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_left_shifted_lui_3:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 4097
-; RV64IXTHEADBB-NEXT: slli a0, a0, 20
-; RV64IXTHEADBB-NEXT: ret
- ret i64 17596481011712 ; 0x1001_0000_0000
-}
-
-; Various cases where extraneous ADDIs can be inserted where a (right shifted)
-; LUI suffices, or where multiple ADDIs can be used instead of a single LUI.
-
-define i64 @imm_right_shifted_lui_1() nounwind {
-; RV64I-LABEL: imm_right_shifted_lui_1:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 983056
-; RV64I-NEXT: srli a0, a0, 16
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_right_shifted_lui_1:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 983056
-; RV64IZBA-NEXT: srli a0, a0, 16
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_right_shifted_lui_1:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 983056
-; RV64IZBB-NEXT: srli a0, a0, 16
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_right_shifted_lui_1:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 983056
-; RV64IZBS-NEXT: srli a0, a0, 16
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_right_shifted_lui_1:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 983056
-; RV64IXTHEADBB-NEXT: srli a0, a0, 16
-; RV64IXTHEADBB-NEXT: ret
- ret i64 281474976706561 ; 0xFFFF_FFFF_F001
-}
-
-define i64 @imm_right_shifted_lui_2() nounwind {
-; RV64I-LABEL: imm_right_shifted_lui_2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 1044481
-; RV64I-NEXT: slli a0, a0, 12
-; RV64I-NEXT: srli a0, a0, 24
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_right_shifted_lui_2:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 1044481
-; RV64IZBA-NEXT: slli a0, a0, 12
-; RV64IZBA-NEXT: srli a0, a0, 24
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_right_shifted_lui_2:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 1044481
-; RV64IZBB-NEXT: slli a0, a0, 12
-; RV64IZBB-NEXT: srli a0, a0, 24
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_right_shifted_lui_2:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 1044481
-; RV64IZBS-NEXT: slli a0, a0, 12
-; RV64IZBS-NEXT: srli a0, a0, 24
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_right_shifted_lui_2:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 1044481
-; RV64IXTHEADBB-NEXT: slli a0, a0, 12
-; RV64IXTHEADBB-NEXT: srli a0, a0, 24
-; RV64IXTHEADBB-NEXT: ret
- ret i64 1099511623681 ; 0xFF_FFFF_F001
-}
-
-; We can materialize the upper bits with a single (shifted) LUI, but that option
-; can be missed due to the lower bits, which aren't just 1s or just 0s.
-
-define i64 @imm_decoupled_lui_addi() nounwind {
-; RV64I-LABEL: imm_decoupled_lui_addi:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 4097
-; RV64I-NEXT: slli a0, a0, 20
-; RV64I-NEXT: addi a0, a0, -3
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_decoupled_lui_addi:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 4097
-; RV64IZBA-NEXT: slli a0, a0, 20
-; RV64IZBA-NEXT: addi a0, a0, -3
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_decoupled_lui_addi:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 4097
-; RV64IZBB-NEXT: slli a0, a0, 20
-; RV64IZBB-NEXT: addi a0, a0, -3
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_decoupled_lui_addi:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 4097
-; RV64IZBS-NEXT: slli a0, a0, 20
-; RV64IZBS-NEXT: addi a0, a0, -3
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_decoupled_lui_addi:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 4097
-; RV64IXTHEADBB-NEXT: slli a0, a0, 20
-; RV64IXTHEADBB-NEXT: addi a0, a0, -3
-; RV64IXTHEADBB-NEXT: ret
- ret i64 17596481011709 ; 0x1000_FFFF_FFFD
-}
-
-; This constant can be materialized for RV64 with LUI+SRLI+XORI.
-
-define i64 @imm_end_xori_1() nounwind {
-; RV64I-LABEL: imm_end_xori_1:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 983040
-; RV64I-NEXT: srli a0, a0, 3
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_end_xori_1:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 983040
-; RV64IZBA-NEXT: srli a0, a0, 3
-; RV64IZBA-NEXT: not a0, a0
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_end_xori_1:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 983040
-; RV64IZBB-NEXT: srli a0, a0, 3
-; RV64IZBB-NEXT: not a0, a0
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_end_xori_1:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 983040
-; RV64IZBS-NEXT: srli a0, a0, 3
-; RV64IZBS-NEXT: not a0, a0
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_end_xori_1:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 983040
-; RV64IXTHEADBB-NEXT: srli a0, a0, 3
-; RV64IXTHEADBB-NEXT: not a0, a0
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -2305843009180139521 ; 0xE000_0000_01FF_FFFF
-}
-
-; This constant can be materialized for RV64 with ADDI+SLLI+ADDI+ADDI.
-
-define i64 @imm_end_2addi_1() nounwind {
-; RV64I-LABEL: imm_end_2addi_1:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a0, -2047
-; RV64I-NEXT: slli a0, a0, 39
-; RV64I-NEXT: addi a0, a0, -2048
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_end_2addi_1:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: li a0, -2047
-; RV64IZBA-NEXT: slli a0, a0, 39
-; RV64IZBA-NEXT: addi a0, a0, -2048
-; RV64IZBA-NEXT: addi a0, a0, -1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_end_2addi_1:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: li a0, -2047
-; RV64IZBB-NEXT: slli a0, a0, 39
-; RV64IZBB-NEXT: addi a0, a0, -2048
-; RV64IZBB-NEXT: addi a0, a0, -1
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_end_2addi_1:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: li a0, -2047
-; RV64IZBS-NEXT: slli a0, a0, 39
-; RV64IZBS-NEXT: addi a0, a0, -2048
-; RV64IZBS-NEXT: addi a0, a0, -1
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_end_2addi_1:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: li a0, -2047
-; RV64IXTHEADBB-NEXT: slli a0, a0, 39
-; RV64IXTHEADBB-NEXT: addi a0, a0, -2048
-; RV64IXTHEADBB-NEXT: addi a0, a0, -1
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -1125350151030785 ; 0xFFFC_007F_FFFF_F7FF
-}
-
-; This constant can be more efficiently materialized for RV64 if we use two
-; registers instead of one.
-
-define i64 @imm_2reg_1() nounwind {
-; RV64I-LABEL: imm_2reg_1:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 74565
-; RV64I-NEXT: addiw a0, a0, 1656
-; RV64I-NEXT: slli a1, a0, 57
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_2reg_1:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 74565
-; RV64IZBA-NEXT: addiw a0, a0, 1656
-; RV64IZBA-NEXT: slli a1, a0, 57
-; RV64IZBA-NEXT: add a0, a0, a1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_2reg_1:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 74565
-; RV64IZBB-NEXT: addiw a0, a0, 1656
-; RV64IZBB-NEXT: slli a1, a0, 57
-; RV64IZBB-NEXT: add a0, a0, a1
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_2reg_1:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 74565
-; RV64IZBS-NEXT: addiw a0, a0, 1656
-; RV64IZBS-NEXT: slli a1, a0, 57
-; RV64IZBS-NEXT: add a0, a0, a1
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_2reg_1:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 74565
-; RV64IXTHEADBB-NEXT: addiw a0, a0, 1656
-; RV64IXTHEADBB-NEXT: slli a1, a0, 57
-; RV64IXTHEADBB-NEXT: add a0, a0, a1
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -1152921504301427080 ; 0xF000_0000_1234_5678
-}
-
-; FIXME: This should use a single ADDI for the immediate.
-define void @imm_store_i16_neg1(ptr %p) nounwind {
-; RV64I-LABEL: imm_store_i16_neg1:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, -1
-; RV64I-NEXT: sh a1, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_store_i16_neg1:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: li a1, -1
-; RV64IZBA-NEXT: sh a1, 0(a0)
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_store_i16_neg1:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: li a1, -1
-; RV64IZBB-NEXT: sh a1, 0(a0)
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_store_i16_neg1:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: li a1, -1
-; RV64IZBS-NEXT: sh a1, 0(a0)
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_store_i16_neg1:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: li a1, -1
-; RV64IXTHEADBB-NEXT: sh a1, 0(a0)
-; RV64IXTHEADBB-NEXT: ret
- store i16 -1, ptr %p
- ret void
-}
-
-; FIXME: This should use a single ADDI for the immediate.
-define void @imm_store_i32_neg1(ptr %p) nounwind {
-; RV64I-LABEL: imm_store_i32_neg1:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, -1
-; RV64I-NEXT: sw a1, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_store_i32_neg1:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: li a1, -1
-; RV64IZBA-NEXT: sw a1, 0(a0)
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_store_i32_neg1:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: li a1, -1
-; RV64IZBB-NEXT: sw a1, 0(a0)
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_store_i32_neg1:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: li a1, -1
-; RV64IZBS-NEXT: sw a1, 0(a0)
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_store_i32_neg1:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: li a1, -1
-; RV64IXTHEADBB-NEXT: sw a1, 0(a0)
-; RV64IXTHEADBB-NEXT: ret
- store i32 -1, ptr %p
- ret void
-}
-
-define i64 @imm_5372288229() {
-; RV64I-LABEL: imm_5372288229:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 160
-; RV64I-NEXT: addiw a0, a0, 437
-; RV64I-NEXT: slli a0, a0, 13
-; RV64I-NEXT: addi a0, a0, -795
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_5372288229:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 655797
-; RV64IZBA-NEXT: slli.uw a0, a0, 1
-; RV64IZBA-NEXT: addi a0, a0, -795
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_5372288229:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 160
-; RV64IZBB-NEXT: addiw a0, a0, 437
-; RV64IZBB-NEXT: slli a0, a0, 13
-; RV64IZBB-NEXT: addi a0, a0, -795
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_5372288229:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 263018
-; RV64IZBS-NEXT: addiw a0, a0, -795
-; RV64IZBS-NEXT: bseti a0, a0, 32
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_5372288229:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 160
-; RV64IXTHEADBB-NEXT: addiw a0, a0, 437
-; RV64IXTHEADBB-NEXT: slli a0, a0, 13
-; RV64IXTHEADBB-NEXT: addi a0, a0, -795
-; RV64IXTHEADBB-NEXT: ret
- ret i64 5372288229
-}
-
-define i64 @imm_neg_5372288229() {
-; RV64I-LABEL: imm_neg_5372288229:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 1048416
-; RV64I-NEXT: addiw a0, a0, -437
-; RV64I-NEXT: slli a0, a0, 13
-; RV64I-NEXT: addi a0, a0, 795
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_neg_5372288229:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 611378
-; RV64IZBA-NEXT: addiw a0, a0, 265
-; RV64IZBA-NEXT: sh1add a0, a0, a0
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_neg_5372288229:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 1048416
-; RV64IZBB-NEXT: addiw a0, a0, -437
-; RV64IZBB-NEXT: slli a0, a0, 13
-; RV64IZBB-NEXT: addi a0, a0, 795
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_neg_5372288229:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 785558
-; RV64IZBS-NEXT: addiw a0, a0, 795
-; RV64IZBS-NEXT: bclri a0, a0, 32
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_neg_5372288229:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 1048416
-; RV64IXTHEADBB-NEXT: addiw a0, a0, -437
-; RV64IXTHEADBB-NEXT: slli a0, a0, 13
-; RV64IXTHEADBB-NEXT: addi a0, a0, 795
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -5372288229
-}
-
-define i64 @imm_8953813715() {
-; RV64I-LABEL: imm_8953813715:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 267
-; RV64I-NEXT: addiw a0, a0, -637
-; RV64I-NEXT: slli a0, a0, 13
-; RV64I-NEXT: addi a0, a0, -1325
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_8953813715:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 437198
-; RV64IZBA-NEXT: addiw a0, a0, -265
-; RV64IZBA-NEXT: sh2add a0, a0, a0
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_8953813715:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 267
-; RV64IZBB-NEXT: addiw a0, a0, -637
-; RV64IZBB-NEXT: slli a0, a0, 13
-; RV64IZBB-NEXT: addi a0, a0, -1325
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_8953813715:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 88838
-; RV64IZBS-NEXT: addiw a0, a0, -1325
-; RV64IZBS-NEXT: bseti a0, a0, 33
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_8953813715:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 267
-; RV64IXTHEADBB-NEXT: addiw a0, a0, -637
-; RV64IXTHEADBB-NEXT: slli a0, a0, 13
-; RV64IXTHEADBB-NEXT: addi a0, a0, -1325
-; RV64IXTHEADBB-NEXT: ret
- ret i64 8953813715
-}
-
-define i64 @imm_neg_8953813715() {
-; RV64I-LABEL: imm_neg_8953813715:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 1048309
-; RV64I-NEXT: addiw a0, a0, 637
-; RV64I-NEXT: slli a0, a0, 13
-; RV64I-NEXT: addi a0, a0, 1325
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_neg_8953813715:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 611378
-; RV64IZBA-NEXT: addiw a0, a0, 265
-; RV64IZBA-NEXT: sh2add a0, a0, a0
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_neg_8953813715:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 1048309
-; RV64IZBB-NEXT: addiw a0, a0, 637
-; RV64IZBB-NEXT: slli a0, a0, 13
-; RV64IZBB-NEXT: addi a0, a0, 1325
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_neg_8953813715:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 959738
-; RV64IZBS-NEXT: addiw a0, a0, 1325
-; RV64IZBS-NEXT: bclri a0, a0, 33
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_neg_8953813715:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 1048309
-; RV64IXTHEADBB-NEXT: addiw a0, a0, 637
-; RV64IXTHEADBB-NEXT: slli a0, a0, 13
-; RV64IXTHEADBB-NEXT: addi a0, a0, 1325
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -8953813715
-}
-
-define i64 @imm_16116864687() {
-; RV64I-LABEL: imm_16116864687:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 961
-; RV64I-NEXT: addiw a0, a0, -1475
-; RV64I-NEXT: slli a0, a0, 12
-; RV64I-NEXT: addi a0, a0, 1711
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_16116864687:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 437198
-; RV64IZBA-NEXT: addiw a0, a0, -265
-; RV64IZBA-NEXT: sh3add a0, a0, a0
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_16116864687:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 961
-; RV64IZBB-NEXT: addiw a0, a0, -1475
-; RV64IZBB-NEXT: slli a0, a0, 12
-; RV64IZBB-NEXT: addi a0, a0, 1711
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_16116864687:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 961
-; RV64IZBS-NEXT: addiw a0, a0, -1475
-; RV64IZBS-NEXT: slli a0, a0, 12
-; RV64IZBS-NEXT: addi a0, a0, 1711
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_16116864687:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 961
-; RV64IXTHEADBB-NEXT: addiw a0, a0, -1475
-; RV64IXTHEADBB-NEXT: slli a0, a0, 12
-; RV64IXTHEADBB-NEXT: addi a0, a0, 1711
-; RV64IXTHEADBB-NEXT: ret
- ret i64 16116864687
-}
-
-define i64 @imm_neg_16116864687() {
-; RV64I-LABEL: imm_neg_16116864687:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 1047615
-; RV64I-NEXT: addiw a0, a0, 1475
-; RV64I-NEXT: slli a0, a0, 12
-; RV64I-NEXT: addi a0, a0, -1711
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_neg_16116864687:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 611378
-; RV64IZBA-NEXT: addiw a0, a0, 265
-; RV64IZBA-NEXT: sh3add a0, a0, a0
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_neg_16116864687:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 1047615
-; RV64IZBB-NEXT: addiw a0, a0, 1475
-; RV64IZBB-NEXT: slli a0, a0, 12
-; RV64IZBB-NEXT: addi a0, a0, -1711
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_neg_16116864687:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 1047615
-; RV64IZBS-NEXT: addiw a0, a0, 1475
-; RV64IZBS-NEXT: slli a0, a0, 12
-; RV64IZBS-NEXT: addi a0, a0, -1711
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_neg_16116864687:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 1047615
-; RV64IXTHEADBB-NEXT: addiw a0, a0, 1475
-; RV64IXTHEADBB-NEXT: slli a0, a0, 12
-; RV64IXTHEADBB-NEXT: addi a0, a0, -1711
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -16116864687
-}
-
-define i64 @imm_2344336315() {
-; RV64I-LABEL: imm_2344336315:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 143087
-; RV64I-NEXT: slli a0, a0, 2
-; RV64I-NEXT: addi a0, a0, -1093
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_2344336315:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 143087
-; RV64IZBA-NEXT: slli a0, a0, 2
-; RV64IZBA-NEXT: addi a0, a0, -1093
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_2344336315:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 143087
-; RV64IZBB-NEXT: slli a0, a0, 2
-; RV64IZBB-NEXT: addi a0, a0, -1093
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_2344336315:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 143087
-; RV64IZBS-NEXT: slli a0, a0, 2
-; RV64IZBS-NEXT: addi a0, a0, -1093
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_2344336315:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 143087
-; RV64IXTHEADBB-NEXT: slli a0, a0, 2
-; RV64IXTHEADBB-NEXT: addi a0, a0, -1093
-; RV64IXTHEADBB-NEXT: ret
- ret i64 2344336315 ; 0x8bbbbbbb
-}
-
-define i64 @imm_70370820078523() {
-; RV64-NOPOOL-LABEL: imm_70370820078523:
-; RV64-NOPOOL: # %bb.0:
-; RV64-NOPOOL-NEXT: lui a0, 256
-; RV64-NOPOOL-NEXT: addiw a0, a0, 31
-; RV64-NOPOOL-NEXT: slli a0, a0, 12
-; RV64-NOPOOL-NEXT: addi a0, a0, -273
-; RV64-NOPOOL-NEXT: slli a0, a0, 14
-; RV64-NOPOOL-NEXT: addi a0, a0, -1093
-; RV64-NOPOOL-NEXT: ret
-;
-; RV64I-POOL-LABEL: imm_70370820078523:
-; RV64I-POOL: # %bb.0:
-; RV64I-POOL-NEXT: lui a0, %hi(.LCPI37_0)
-; RV64I-POOL-NEXT: ld a0, %lo(.LCPI37_0)(a0)
-; RV64I-POOL-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_70370820078523:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 256
-; RV64IZBA-NEXT: addiw a0, a0, 31
-; RV64IZBA-NEXT: slli a0, a0, 12
-; RV64IZBA-NEXT: addi a0, a0, -273
-; RV64IZBA-NEXT: slli a0, a0, 14
-; RV64IZBA-NEXT: addi a0, a0, -1093
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_70370820078523:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 256
-; RV64IZBB-NEXT: addiw a0, a0, 31
-; RV64IZBB-NEXT: slli a0, a0, 12
-; RV64IZBB-NEXT: addi a0, a0, -273
-; RV64IZBB-NEXT: slli a0, a0, 14
-; RV64IZBB-NEXT: addi a0, a0, -1093
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_70370820078523:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 506812
-; RV64IZBS-NEXT: addiw a0, a0, -1093
-; RV64IZBS-NEXT: bseti a0, a0, 46
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_70370820078523:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 256
-; RV64IXTHEADBB-NEXT: addiw a0, a0, 31
-; RV64IXTHEADBB-NEXT: slli a0, a0, 12
-; RV64IXTHEADBB-NEXT: addi a0, a0, -273
-; RV64IXTHEADBB-NEXT: slli a0, a0, 14
-; RV64IXTHEADBB-NEXT: addi a0, a0, -1093
-; RV64IXTHEADBB-NEXT: ret
- ret i64 70370820078523 ; 0x40007bbbbbbb
-}
-
-define i64 @imm_neg_9223372034778874949() {
-; RV64I-LABEL: imm_neg_9223372034778874949:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 506812
-; RV64I-NEXT: addiw a0, a0, -1093
-; RV64I-NEXT: slli a1, a0, 63
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_neg_9223372034778874949:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 506812
-; RV64IZBA-NEXT: addiw a0, a0, -1093
-; RV64IZBA-NEXT: slli a1, a0, 63
-; RV64IZBA-NEXT: add a0, a0, a1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_neg_9223372034778874949:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 506812
-; RV64IZBB-NEXT: addiw a0, a0, -1093
-; RV64IZBB-NEXT: slli a1, a0, 63
-; RV64IZBB-NEXT: add a0, a0, a1
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_neg_9223372034778874949:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 506812
-; RV64IZBS-NEXT: addiw a0, a0, -1093
-; RV64IZBS-NEXT: bseti a0, a0, 63
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_neg_9223372034778874949:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 506812
-; RV64IXTHEADBB-NEXT: addiw a0, a0, -1093
-; RV64IXTHEADBB-NEXT: slli a1, a0, 63
-; RV64IXTHEADBB-NEXT: add a0, a0, a1
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -9223372034778874949 ; 0x800000007bbbbbbb
-}
-
-define i64 @imm_neg_9223301666034697285() {
-; RV64-NOPOOL-LABEL: imm_neg_9223301666034697285:
-; RV64-NOPOOL: # %bb.0:
-; RV64-NOPOOL-NEXT: lui a0, 917505
-; RV64-NOPOOL-NEXT: slli a0, a0, 8
-; RV64-NOPOOL-NEXT: addi a0, a0, 31
-; RV64-NOPOOL-NEXT: slli a0, a0, 12
-; RV64-NOPOOL-NEXT: addi a0, a0, -273
-; RV64-NOPOOL-NEXT: slli a0, a0, 14
-; RV64-NOPOOL-NEXT: addi a0, a0, -1093
-; RV64-NOPOOL-NEXT: ret
-;
-; RV64I-POOL-LABEL: imm_neg_9223301666034697285:
-; RV64I-POOL: # %bb.0:
-; RV64I-POOL-NEXT: lui a0, %hi(.LCPI39_0)
-; RV64I-POOL-NEXT: ld a0, %lo(.LCPI39_0)(a0)
-; RV64I-POOL-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_neg_9223301666034697285:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 917505
-; RV64IZBA-NEXT: slli a0, a0, 8
-; RV64IZBA-NEXT: addi a0, a0, 31
-; RV64IZBA-NEXT: slli a0, a0, 12
-; RV64IZBA-NEXT: addi a0, a0, -273
-; RV64IZBA-NEXT: slli a0, a0, 14
-; RV64IZBA-NEXT: addi a0, a0, -1093
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_neg_9223301666034697285:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 917505
-; RV64IZBB-NEXT: slli a0, a0, 8
-; RV64IZBB-NEXT: addi a0, a0, 31
-; RV64IZBB-NEXT: slli a0, a0, 12
-; RV64IZBB-NEXT: addi a0, a0, -273
-; RV64IZBB-NEXT: slli a0, a0, 14
-; RV64IZBB-NEXT: addi a0, a0, -1093
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_neg_9223301666034697285:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 506812
-; RV64IZBS-NEXT: addiw a0, a0, -1093
-; RV64IZBS-NEXT: bseti a0, a0, 46
-; RV64IZBS-NEXT: bseti a0, a0, 63
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_neg_9223301666034697285:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 917505
-; RV64IXTHEADBB-NEXT: slli a0, a0, 8
-; RV64IXTHEADBB-NEXT: addi a0, a0, 31
-; RV64IXTHEADBB-NEXT: slli a0, a0, 12
-; RV64IXTHEADBB-NEXT: addi a0, a0, -273
-; RV64IXTHEADBB-NEXT: slli a0, a0, 14
-; RV64IXTHEADBB-NEXT: addi a0, a0, -1093
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -9223301666034697285 ; 0x800040007bbbbbbb
-}
-
-define i64 @imm_neg_2219066437() {
-; RV64I-LABEL: imm_neg_2219066437:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 913135
-; RV64I-NEXT: slli a0, a0, 2
-; RV64I-NEXT: addi a0, a0, -1093
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_neg_2219066437:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 913135
-; RV64IZBA-NEXT: slli a0, a0, 2
-; RV64IZBA-NEXT: addi a0, a0, -1093
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_neg_2219066437:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 913135
-; RV64IZBB-NEXT: slli a0, a0, 2
-; RV64IZBB-NEXT: addi a0, a0, -1093
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_neg_2219066437:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 913135
-; RV64IZBS-NEXT: slli a0, a0, 2
-; RV64IZBS-NEXT: addi a0, a0, -1093
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_neg_2219066437:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 913135
-; RV64IXTHEADBB-NEXT: slli a0, a0, 2
-; RV64IXTHEADBB-NEXT: addi a0, a0, -1093
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -2219066437 ; 0xffffffff7bbbbbbb
-}
-
-define i64 @imm_neg_8798043653189() {
-; RV64I-LABEL: imm_neg_8798043653189:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 917475
-; RV64I-NEXT: addiw a0, a0, -273
-; RV64I-NEXT: slli a0, a0, 14
-; RV64I-NEXT: addi a0, a0, -1093
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_neg_8798043653189:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 917475
-; RV64IZBA-NEXT: addiw a0, a0, -273
-; RV64IZBA-NEXT: slli a0, a0, 14
-; RV64IZBA-NEXT: addi a0, a0, -1093
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_neg_8798043653189:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 917475
-; RV64IZBB-NEXT: addiw a0, a0, -273
-; RV64IZBB-NEXT: slli a0, a0, 14
-; RV64IZBB-NEXT: addi a0, a0, -1093
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_neg_8798043653189:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 572348
-; RV64IZBS-NEXT: addiw a0, a0, -1093
-; RV64IZBS-NEXT: bclri a0, a0, 43
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_neg_8798043653189:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 917475
-; RV64IXTHEADBB-NEXT: addiw a0, a0, -273
-; RV64IXTHEADBB-NEXT: slli a0, a0, 14
-; RV64IXTHEADBB-NEXT: addi a0, a0, -1093
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -8798043653189 ; 0xfffff7ff8bbbbbbb
-}
-
-define i64 @imm_9223372034904144827() {
-; RV64I-LABEL: imm_9223372034904144827:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 572348
-; RV64I-NEXT: addiw a0, a0, -1093
-; RV64I-NEXT: slli a1, a0, 63
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_9223372034904144827:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 572348
-; RV64IZBA-NEXT: addiw a0, a0, -1093
-; RV64IZBA-NEXT: slli a1, a0, 63
-; RV64IZBA-NEXT: add a0, a0, a1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_9223372034904144827:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 572348
-; RV64IZBB-NEXT: addiw a0, a0, -1093
-; RV64IZBB-NEXT: slli a1, a0, 63
-; RV64IZBB-NEXT: add a0, a0, a1
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_9223372034904144827:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 572348
-; RV64IZBS-NEXT: addiw a0, a0, -1093
-; RV64IZBS-NEXT: bclri a0, a0, 63
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_9223372034904144827:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 572348
-; RV64IXTHEADBB-NEXT: addiw a0, a0, -1093
-; RV64IXTHEADBB-NEXT: slli a1, a0, 63
-; RV64IXTHEADBB-NEXT: add a0, a0, a1
-; RV64IXTHEADBB-NEXT: ret
- ret i64 9223372034904144827 ; 0x7fffffff8bbbbbbb
-}
-
-define i64 @imm_neg_9223354442718100411() {
-; RV64-NOPOOL-LABEL: imm_neg_9223354442718100411:
-; RV64-NOPOOL: # %bb.0:
-; RV64-NOPOOL-NEXT: lui a0, 524287
-; RV64-NOPOOL-NEXT: slli a0, a0, 6
-; RV64-NOPOOL-NEXT: addi a0, a0, -29
-; RV64-NOPOOL-NEXT: slli a0, a0, 12
-; RV64-NOPOOL-NEXT: addi a0, a0, -273
-; RV64-NOPOOL-NEXT: slli a0, a0, 14
-; RV64-NOPOOL-NEXT: addi a0, a0, -1093
-; RV64-NOPOOL-NEXT: ret
-;
-; RV64I-POOL-LABEL: imm_neg_9223354442718100411:
-; RV64I-POOL: # %bb.0:
-; RV64I-POOL-NEXT: lui a0, %hi(.LCPI43_0)
-; RV64I-POOL-NEXT: ld a0, %lo(.LCPI43_0)(a0)
-; RV64I-POOL-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_neg_9223354442718100411:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 524287
-; RV64IZBA-NEXT: slli a0, a0, 6
-; RV64IZBA-NEXT: addi a0, a0, -29
-; RV64IZBA-NEXT: slli a0, a0, 12
-; RV64IZBA-NEXT: addi a0, a0, -273
-; RV64IZBA-NEXT: slli a0, a0, 14
-; RV64IZBA-NEXT: addi a0, a0, -1093
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_neg_9223354442718100411:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 524287
-; RV64IZBB-NEXT: slli a0, a0, 6
-; RV64IZBB-NEXT: addi a0, a0, -29
-; RV64IZBB-NEXT: slli a0, a0, 12
-; RV64IZBB-NEXT: addi a0, a0, -273
-; RV64IZBB-NEXT: slli a0, a0, 14
-; RV64IZBB-NEXT: addi a0, a0, -1093
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_neg_9223354442718100411:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 572348
-; RV64IZBS-NEXT: addiw a0, a0, -1093
-; RV64IZBS-NEXT: bclri a0, a0, 44
-; RV64IZBS-NEXT: bclri a0, a0, 63
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_neg_9223354442718100411:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 524287
-; RV64IXTHEADBB-NEXT: slli a0, a0, 6
-; RV64IXTHEADBB-NEXT: addi a0, a0, -29
-; RV64IXTHEADBB-NEXT: slli a0, a0, 12
-; RV64IXTHEADBB-NEXT: addi a0, a0, -273
-; RV64IXTHEADBB-NEXT: slli a0, a0, 14
-; RV64IXTHEADBB-NEXT: addi a0, a0, -1093
-; RV64IXTHEADBB-NEXT: ret
- ret i64 9223354442718100411 ; 0x7fffefff8bbbbbbb
-}
-
-define i64 @imm_2863311530() {
-; RV64I-LABEL: imm_2863311530:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 349525
-; RV64I-NEXT: addiw a0, a0, 1365
-; RV64I-NEXT: slli a0, a0, 1
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_2863311530:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 349525
-; RV64IZBA-NEXT: addiw a0, a0, 1365
-; RV64IZBA-NEXT: slli a0, a0, 1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_2863311530:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 349525
-; RV64IZBB-NEXT: addiw a0, a0, 1365
-; RV64IZBB-NEXT: slli a0, a0, 1
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_2863311530:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 349525
-; RV64IZBS-NEXT: addiw a0, a0, 1365
-; RV64IZBS-NEXT: slli a0, a0, 1
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_2863311530:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 349525
-; RV64IXTHEADBB-NEXT: addiw a0, a0, 1365
-; RV64IXTHEADBB-NEXT: slli a0, a0, 1
-; RV64IXTHEADBB-NEXT: ret
- ret i64 2863311530 ; #0xaaaaaaaa
-}
-
-define i64 @imm_neg_2863311530() {
-; RV64I-LABEL: imm_neg_2863311530:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 699051
-; RV64I-NEXT: addiw a0, a0, -1365
-; RV64I-NEXT: slli a0, a0, 1
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_neg_2863311530:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 699051
-; RV64IZBA-NEXT: addiw a0, a0, -1365
-; RV64IZBA-NEXT: slli a0, a0, 1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_neg_2863311530:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 699051
-; RV64IZBB-NEXT: addiw a0, a0, -1365
-; RV64IZBB-NEXT: slli a0, a0, 1
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_neg_2863311530:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 699051
-; RV64IZBS-NEXT: addiw a0, a0, -1365
-; RV64IZBS-NEXT: slli a0, a0, 1
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_neg_2863311530:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 699051
-; RV64IXTHEADBB-NEXT: addiw a0, a0, -1365
-; RV64IXTHEADBB-NEXT: slli a0, a0, 1
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -2863311530 ; #0xffffffff55555556
-}
-
-define i64 @imm_2147486378() {
-; RV64I-LABEL: imm_2147486378:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a0, 1
-; RV64I-NEXT: slli a0, a0, 31
-; RV64I-NEXT: addi a0, a0, 1365
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_2147486378:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: li a0, 1
-; RV64IZBA-NEXT: slli a0, a0, 31
-; RV64IZBA-NEXT: addi a0, a0, 1365
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_2147486378:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: li a0, 1
-; RV64IZBB-NEXT: slli a0, a0, 31
-; RV64IZBB-NEXT: addi a0, a0, 1365
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_2147486378:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: li a0, 1365
-; RV64IZBS-NEXT: bseti a0, a0, 31
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_2147486378:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: li a0, 1
-; RV64IXTHEADBB-NEXT: slli a0, a0, 31
-; RV64IXTHEADBB-NEXT: addi a0, a0, 1365
-; RV64IXTHEADBB-NEXT: ret
- ret i64 2147485013
-}
-
-define i64 @imm_neg_2147485013() {
-; RV64I-LABEL: imm_neg_2147485013:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 524288
-; RV64I-NEXT: addi a0, a0, -1365
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_neg_2147485013:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 524288
-; RV64IZBA-NEXT: addi a0, a0, -1365
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_neg_2147485013:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 524288
-; RV64IZBB-NEXT: addi a0, a0, -1365
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_neg_2147485013:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 524288
-; RV64IZBS-NEXT: addi a0, a0, -1365
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_neg_2147485013:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 524288
-; RV64IXTHEADBB-NEXT: addi a0, a0, -1365
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -2147485013
-}
-
-define i64 @imm_12900924131259() {
-; RV64I-LABEL: imm_12900924131259:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 188
-; RV64I-NEXT: addiw a0, a0, -1093
-; RV64I-NEXT: slli a0, a0, 24
-; RV64I-NEXT: addi a0, a0, 1979
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_12900924131259:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 768955
-; RV64IZBA-NEXT: slli.uw a0, a0, 12
-; RV64IZBA-NEXT: addi a0, a0, 1979
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_12900924131259:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 188
-; RV64IZBB-NEXT: addiw a0, a0, -1093
-; RV64IZBB-NEXT: slli a0, a0, 24
-; RV64IZBB-NEXT: addi a0, a0, 1979
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_12900924131259:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 188
-; RV64IZBS-NEXT: addiw a0, a0, -1093
-; RV64IZBS-NEXT: slli a0, a0, 24
-; RV64IZBS-NEXT: addi a0, a0, 1979
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_12900924131259:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 188
-; RV64IXTHEADBB-NEXT: addiw a0, a0, -1093
-; RV64IXTHEADBB-NEXT: slli a0, a0, 24
-; RV64IXTHEADBB-NEXT: addi a0, a0, 1979
-; RV64IXTHEADBB-NEXT: ret
- ret i64 12900924131259
-}
-
-define i64 @imm_50394234880() {
-; RV64I-LABEL: imm_50394234880:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 188
-; RV64I-NEXT: addiw a0, a0, -1093
-; RV64I-NEXT: slli a0, a0, 16
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_50394234880:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 768955
-; RV64IZBA-NEXT: slli.uw a0, a0, 4
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_50394234880:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 188
-; RV64IZBB-NEXT: addiw a0, a0, -1093
-; RV64IZBB-NEXT: slli a0, a0, 16
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_50394234880:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 188
-; RV64IZBS-NEXT: addiw a0, a0, -1093
-; RV64IZBS-NEXT: slli a0, a0, 16
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_50394234880:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 188
-; RV64IXTHEADBB-NEXT: addiw a0, a0, -1093
-; RV64IXTHEADBB-NEXT: slli a0, a0, 16
-; RV64IXTHEADBB-NEXT: ret
- ret i64 50394234880
-}
-
-define i64 @imm_12900936431479() {
-; RV64I-LABEL: imm_12900936431479:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 192239
-; RV64I-NEXT: slli a0, a0, 2
-; RV64I-NEXT: addi a0, a0, -1093
-; RV64I-NEXT: slli a0, a0, 12
-; RV64I-NEXT: addi a0, a0, 1911
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_12900936431479:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 768956
-; RV64IZBA-NEXT: addi a0, a0, -1093
-; RV64IZBA-NEXT: slli.uw a0, a0, 12
-; RV64IZBA-NEXT: addi a0, a0, 1911
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_12900936431479:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 192239
-; RV64IZBB-NEXT: slli a0, a0, 2
-; RV64IZBB-NEXT: addi a0, a0, -1093
-; RV64IZBB-NEXT: slli a0, a0, 12
-; RV64IZBB-NEXT: addi a0, a0, 1911
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_12900936431479:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 192239
-; RV64IZBS-NEXT: slli a0, a0, 2
-; RV64IZBS-NEXT: addi a0, a0, -1093
-; RV64IZBS-NEXT: slli a0, a0, 12
-; RV64IZBS-NEXT: addi a0, a0, 1911
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_12900936431479:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 192239
-; RV64IXTHEADBB-NEXT: slli a0, a0, 2
-; RV64IXTHEADBB-NEXT: addi a0, a0, -1093
-; RV64IXTHEADBB-NEXT: slli a0, a0, 12
-; RV64IXTHEADBB-NEXT: addi a0, a0, 1911
-; RV64IXTHEADBB-NEXT: ret
- ret i64 12900936431479
-}
-
-define i64 @imm_12900918536874() {
-; RV64I-LABEL: imm_12900918536874:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 384477
-; RV64I-NEXT: addiw a0, a0, 1365
-; RV64I-NEXT: slli a0, a0, 12
-; RV64I-NEXT: addi a0, a0, 1365
-; RV64I-NEXT: slli a0, a0, 1
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_12900918536874:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 768955
-; RV64IZBA-NEXT: addi a0, a0, -1365
-; RV64IZBA-NEXT: slli.uw a0, a0, 12
-; RV64IZBA-NEXT: addi a0, a0, -1366
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_12900918536874:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 384477
-; RV64IZBB-NEXT: addiw a0, a0, 1365
-; RV64IZBB-NEXT: slli a0, a0, 12
-; RV64IZBB-NEXT: addi a0, a0, 1365
-; RV64IZBB-NEXT: slli a0, a0, 1
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_12900918536874:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 384477
-; RV64IZBS-NEXT: addiw a0, a0, 1365
-; RV64IZBS-NEXT: slli a0, a0, 12
-; RV64IZBS-NEXT: addi a0, a0, 1365
-; RV64IZBS-NEXT: slli a0, a0, 1
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_12900918536874:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 384477
-; RV64IXTHEADBB-NEXT: addiw a0, a0, 1365
-; RV64IXTHEADBB-NEXT: slli a0, a0, 12
-; RV64IXTHEADBB-NEXT: addi a0, a0, 1365
-; RV64IXTHEADBB-NEXT: slli a0, a0, 1
-; RV64IXTHEADBB-NEXT: ret
- ret i64 12900918536874
-}
-
-define i64 @imm_12900925247761() {
-; RV64I-LABEL: imm_12900925247761:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 384478
-; RV64I-NEXT: addiw a0, a0, -1911
-; RV64I-NEXT: slli a0, a0, 13
-; RV64I-NEXT: addi a0, a0, -2048
-; RV64I-NEXT: addi a0, a0, -1775
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_12900925247761:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 768955
-; RV64IZBA-NEXT: addi a0, a0, 273
-; RV64IZBA-NEXT: slli.uw a0, a0, 12
-; RV64IZBA-NEXT: addi a0, a0, 273
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_12900925247761:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 384478
-; RV64IZBB-NEXT: addiw a0, a0, -1911
-; RV64IZBB-NEXT: slli a0, a0, 13
-; RV64IZBB-NEXT: addi a0, a0, -2048
-; RV64IZBB-NEXT: addi a0, a0, -1775
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_12900925247761:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 384478
-; RV64IZBS-NEXT: addiw a0, a0, -1911
-; RV64IZBS-NEXT: slli a0, a0, 13
-; RV64IZBS-NEXT: addi a0, a0, -2048
-; RV64IZBS-NEXT: addi a0, a0, -1775
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_12900925247761:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 384478
-; RV64IXTHEADBB-NEXT: addiw a0, a0, -1911
-; RV64IXTHEADBB-NEXT: slli a0, a0, 13
-; RV64IXTHEADBB-NEXT: addi a0, a0, -2048
-; RV64IXTHEADBB-NEXT: addi a0, a0, -1775
-; RV64IXTHEADBB-NEXT: ret
- ret i64 12900925247761
-}
-
-define i64 @imm_7158272001() {
-; RV64I-LABEL: imm_7158272001:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 427
-; RV64I-NEXT: addiw a0, a0, -1367
-; RV64I-NEXT: slli a0, a0, 12
-; RV64I-NEXT: addi a0, a0, 1
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_7158272001:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 349525
-; RV64IZBA-NEXT: sh2add a0, a0, a0
-; RV64IZBA-NEXT: addi a0, a0, 1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_7158272001:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 427
-; RV64IZBB-NEXT: addiw a0, a0, -1367
-; RV64IZBB-NEXT: slli a0, a0, 12
-; RV64IZBB-NEXT: addi a0, a0, 1
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_7158272001:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 427
-; RV64IZBS-NEXT: addiw a0, a0, -1367
-; RV64IZBS-NEXT: slli a0, a0, 12
-; RV64IZBS-NEXT: addi a0, a0, 1
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_7158272001:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 427
-; RV64IXTHEADBB-NEXT: addiw a0, a0, -1367
-; RV64IXTHEADBB-NEXT: slli a0, a0, 12
-; RV64IXTHEADBB-NEXT: addi a0, a0, 1
-; RV64IXTHEADBB-NEXT: ret
- ret i64 7158272001 ; 0x0000_0001_aaaa_9001
-}
-
-define i64 @imm_12884889601() {
-; RV64I-LABEL: imm_12884889601:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 768
-; RV64I-NEXT: addiw a0, a0, -3
-; RV64I-NEXT: slli a0, a0, 12
-; RV64I-NEXT: addi a0, a0, 1
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_12884889601:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 349525
-; RV64IZBA-NEXT: sh3add a0, a0, a0
-; RV64IZBA-NEXT: addi a0, a0, 1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_12884889601:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 768
-; RV64IZBB-NEXT: addiw a0, a0, -3
-; RV64IZBB-NEXT: slli a0, a0, 12
-; RV64IZBB-NEXT: addi a0, a0, 1
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_12884889601:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 768
-; RV64IZBS-NEXT: addiw a0, a0, -3
-; RV64IZBS-NEXT: slli a0, a0, 12
-; RV64IZBS-NEXT: addi a0, a0, 1
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_12884889601:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 768
-; RV64IXTHEADBB-NEXT: addiw a0, a0, -3
-; RV64IXTHEADBB-NEXT: slli a0, a0, 12
-; RV64IXTHEADBB-NEXT: addi a0, a0, 1
-; RV64IXTHEADBB-NEXT: ret
- ret i64 12884889601 ; 0x0000_0002_ffff_d001
-}
-
-define i64 @imm_neg_3435982847() {
-; RV64I-LABEL: imm_neg_3435982847:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 1048371
-; RV64I-NEXT: addiw a0, a0, 817
-; RV64I-NEXT: slli a0, a0, 12
-; RV64I-NEXT: addi a0, a0, 1
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_neg_3435982847:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 768955
-; RV64IZBA-NEXT: sh1add a0, a0, a0
-; RV64IZBA-NEXT: addi a0, a0, 1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_neg_3435982847:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 1048371
-; RV64IZBB-NEXT: addiw a0, a0, 817
-; RV64IZBB-NEXT: slli a0, a0, 12
-; RV64IZBB-NEXT: addi a0, a0, 1
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_neg_3435982847:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 734001
-; RV64IZBS-NEXT: addiw a0, a0, 1
-; RV64IZBS-NEXT: bclri a0, a0, 31
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_neg_3435982847:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 1048371
-; RV64IXTHEADBB-NEXT: addiw a0, a0, 817
-; RV64IXTHEADBB-NEXT: slli a0, a0, 12
-; RV64IXTHEADBB-NEXT: addi a0, a0, 1
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -3435982847 ; 0xffff_ffff_3333_1001
-}
-
-define i64 @imm_neg_5726842879() {
-; RV64I-LABEL: imm_neg_5726842879:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 1048235
-; RV64I-NEXT: addiw a0, a0, -1419
-; RV64I-NEXT: slli a0, a0, 12
-; RV64I-NEXT: addi a0, a0, 1
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_neg_5726842879:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 768945
-; RV64IZBA-NEXT: sh2add a0, a0, a0
-; RV64IZBA-NEXT: addi a0, a0, 1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_neg_5726842879:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 1048235
-; RV64IZBB-NEXT: addiw a0, a0, -1419
-; RV64IZBB-NEXT: slli a0, a0, 12
-; RV64IZBB-NEXT: addi a0, a0, 1
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_neg_5726842879:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 698997
-; RV64IZBS-NEXT: addiw a0, a0, 1
-; RV64IZBS-NEXT: bclri a0, a0, 32
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_neg_5726842879:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 1048235
-; RV64IXTHEADBB-NEXT: addiw a0, a0, -1419
-; RV64IXTHEADBB-NEXT: slli a0, a0, 12
-; RV64IXTHEADBB-NEXT: addi a0, a0, 1
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -5726842879 ; 0xffff_fffe_aaa7_5001
-}
-
-define i64 @imm_neg_10307948543() {
-; RV64I-LABEL: imm_neg_10307948543:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 1047962
-; RV64I-NEXT: addiw a0, a0, -1645
-; RV64I-NEXT: slli a0, a0, 12
-; RV64I-NEXT: addi a0, a0, 1
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm_neg_10307948543:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 768955
-; RV64IZBA-NEXT: sh3add a0, a0, a0
-; RV64IZBA-NEXT: addi a0, a0, 1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm_neg_10307948543:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 1047962
-; RV64IZBB-NEXT: addiw a0, a0, -1645
-; RV64IZBB-NEXT: slli a0, a0, 12
-; RV64IZBB-NEXT: addi a0, a0, 1
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm_neg_10307948543:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 629139
-; RV64IZBS-NEXT: addiw a0, a0, 1
-; RV64IZBS-NEXT: bclri a0, a0, 33
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm_neg_10307948543:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 1047962
-; RV64IXTHEADBB-NEXT: addiw a0, a0, -1645
-; RV64IXTHEADBB-NEXT: slli a0, a0, 12
-; RV64IXTHEADBB-NEXT: addi a0, a0, 1
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -10307948543 ; 0xffff_fffd_9999_3001
-}
-
-define i64 @li_rori_1() {
-; RV64I-LABEL: li_rori_1:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a0, -17
-; RV64I-NEXT: slli a0, a0, 43
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: li_rori_1:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: li a0, -17
-; RV64IZBA-NEXT: slli a0, a0, 43
-; RV64IZBA-NEXT: addi a0, a0, -1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: li_rori_1:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: li a0, -18
-; RV64IZBB-NEXT: rori a0, a0, 21
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: li_rori_1:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: li a0, -17
-; RV64IZBS-NEXT: slli a0, a0, 43
-; RV64IZBS-NEXT: addi a0, a0, -1
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: li_rori_1:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: li a0, -18
-; RV64IXTHEADBB-NEXT: th.srri a0, a0, 21
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -149533581377537
-}
-
-define i64 @li_rori_2() {
-; RV64I-LABEL: li_rori_2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a0, -5
-; RV64I-NEXT: slli a0, a0, 60
-; RV64I-NEXT: addi a0, a0, -6
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: li_rori_2:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: li a0, -5
-; RV64IZBA-NEXT: slli a0, a0, 60
-; RV64IZBA-NEXT: addi a0, a0, -6
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: li_rori_2:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: li a0, -86
-; RV64IZBB-NEXT: rori a0, a0, 4
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: li_rori_2:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: li a0, -5
-; RV64IZBS-NEXT: slli a0, a0, 60
-; RV64IZBS-NEXT: addi a0, a0, -6
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: li_rori_2:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: li a0, -86
-; RV64IXTHEADBB-NEXT: th.srri a0, a0, 4
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -5764607523034234886
-}
-
-define i64 @li_rori_3() {
-; RV64I-LABEL: li_rori_3:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a0, -17
-; RV64I-NEXT: slli a0, a0, 27
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: li_rori_3:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: li a0, -17
-; RV64IZBA-NEXT: slli a0, a0, 27
-; RV64IZBA-NEXT: addi a0, a0, -1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: li_rori_3:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: li a0, -18
-; RV64IZBB-NEXT: rori a0, a0, 37
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: li_rori_3:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: li a0, -17
-; RV64IZBS-NEXT: slli a0, a0, 27
-; RV64IZBS-NEXT: addi a0, a0, -1
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: li_rori_3:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: li a0, -18
-; RV64IXTHEADBB-NEXT: th.srri a0, a0, 37
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -2281701377
-}
-
-; This used to assert when compiled with Zba.
-define i64 @PR54812() {
-; RV64I-LABEL: PR54812:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 1048447
-; RV64I-NEXT: addiw a0, a0, 1407
-; RV64I-NEXT: slli a0, a0, 12
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: PR54812:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 872917
-; RV64IZBA-NEXT: sh1add a0, a0, a0
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: PR54812:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 1048447
-; RV64IZBB-NEXT: addiw a0, a0, 1407
-; RV64IZBB-NEXT: slli a0, a0, 12
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: PR54812:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 1045887
-; RV64IZBS-NEXT: bclri a0, a0, 31
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: PR54812:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 1048447
-; RV64IXTHEADBB-NEXT: addiw a0, a0, 1407
-; RV64IXTHEADBB-NEXT: slli a0, a0, 12
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -2158497792;
-}
-
-define signext i32 @pos_2048() nounwind {
-; RV64I-LABEL: pos_2048:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a0, 1
-; RV64I-NEXT: slli a0, a0, 11
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: pos_2048:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: li a0, 1
-; RV64IZBA-NEXT: slli a0, a0, 11
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: pos_2048:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: li a0, 1
-; RV64IZBB-NEXT: slli a0, a0, 11
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: pos_2048:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: bseti a0, zero, 11
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: pos_2048:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: li a0, 1
-; RV64IXTHEADBB-NEXT: slli a0, a0, 11
-; RV64IXTHEADBB-NEXT: ret
- ret i32 2048
-}
-
-define i64 @imm64_same_lo_hi() nounwind {
-; RV64I-LABEL: imm64_same_lo_hi:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 65793
-; RV64I-NEXT: addiw a0, a0, 16
-; RV64I-NEXT: slli a1, a0, 32
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm64_same_lo_hi:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 65793
-; RV64IZBA-NEXT: addiw a0, a0, 16
-; RV64IZBA-NEXT: slli a1, a0, 32
-; RV64IZBA-NEXT: add a0, a0, a1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm64_same_lo_hi:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 65793
-; RV64IZBB-NEXT: addiw a0, a0, 16
-; RV64IZBB-NEXT: slli a1, a0, 32
-; RV64IZBB-NEXT: add a0, a0, a1
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm64_same_lo_hi:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 65793
-; RV64IZBS-NEXT: addiw a0, a0, 16
-; RV64IZBS-NEXT: slli a1, a0, 32
-; RV64IZBS-NEXT: add a0, a0, a1
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm64_same_lo_hi:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 65793
-; RV64IXTHEADBB-NEXT: addiw a0, a0, 16
-; RV64IXTHEADBB-NEXT: slli a1, a0, 32
-; RV64IXTHEADBB-NEXT: add a0, a0, a1
-; RV64IXTHEADBB-NEXT: ret
- ret i64 1157442765409226768 ; 0x0101010101010101
-}
-
-; Same as above with optsize. Make sure we use constant pool on RV64
-define i64 @imm64_same_lo_hi_optsize() nounwind optsize {
-; RV64-NOPOOL-LABEL: imm64_same_lo_hi_optsize:
-; RV64-NOPOOL: # %bb.0:
-; RV64-NOPOOL-NEXT: lui a0, 65793
-; RV64-NOPOOL-NEXT: addiw a0, a0, 16
-; RV64-NOPOOL-NEXT: slli a1, a0, 32
-; RV64-NOPOOL-NEXT: add a0, a0, a1
-; RV64-NOPOOL-NEXT: ret
-;
-; RV64I-POOL-LABEL: imm64_same_lo_hi_optsize:
-; RV64I-POOL: # %bb.0:
-; RV64I-POOL-NEXT: lui a0, %hi(.LCPI64_0)
-; RV64I-POOL-NEXT: ld a0, %lo(.LCPI64_0)(a0)
-; RV64I-POOL-NEXT: ret
-;
-; RV64IZBA-LABEL: imm64_same_lo_hi_optsize:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 65793
-; RV64IZBA-NEXT: addiw a0, a0, 16
-; RV64IZBA-NEXT: slli a1, a0, 32
-; RV64IZBA-NEXT: add a0, a0, a1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm64_same_lo_hi_optsize:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 65793
-; RV64IZBB-NEXT: addiw a0, a0, 16
-; RV64IZBB-NEXT: slli a1, a0, 32
-; RV64IZBB-NEXT: add a0, a0, a1
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm64_same_lo_hi_optsize:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 65793
-; RV64IZBS-NEXT: addiw a0, a0, 16
-; RV64IZBS-NEXT: slli a1, a0, 32
-; RV64IZBS-NEXT: add a0, a0, a1
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm64_same_lo_hi_optsize:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 65793
-; RV64IXTHEADBB-NEXT: addiw a0, a0, 16
-; RV64IXTHEADBB-NEXT: slli a1, a0, 32
-; RV64IXTHEADBB-NEXT: add a0, a0, a1
-; RV64IXTHEADBB-NEXT: ret
- ret i64 1157442765409226768 ; 0x0101010101010101
-}
-; Hi and lo are the same and also negative.
-define i64 @imm64_same_lo_hi_negative() nounwind {
-; RV64-NOPOOL-LABEL: imm64_same_lo_hi_negative:
-; RV64-NOPOOL: # %bb.0:
-; RV64-NOPOOL-NEXT: lui a0, 983297
-; RV64-NOPOOL-NEXT: slli a0, a0, 4
-; RV64-NOPOOL-NEXT: addi a0, a0, 257
-; RV64-NOPOOL-NEXT: slli a0, a0, 16
-; RV64-NOPOOL-NEXT: addi a0, a0, 257
-; RV64-NOPOOL-NEXT: slli a0, a0, 15
-; RV64-NOPOOL-NEXT: addi a0, a0, 128
-; RV64-NOPOOL-NEXT: ret
-;
-; RV64I-POOL-LABEL: imm64_same_lo_hi_negative:
-; RV64I-POOL: # %bb.0:
-; RV64I-POOL-NEXT: lui a0, %hi(.LCPI65_0)
-; RV64I-POOL-NEXT: ld a0, %lo(.LCPI65_0)(a0)
-; RV64I-POOL-NEXT: ret
-;
-; RV64IZBA-LABEL: imm64_same_lo_hi_negative:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 526344
-; RV64IZBA-NEXT: addi a0, a0, 128
-; RV64IZBA-NEXT: slli a1, a0, 32
-; RV64IZBA-NEXT: add.uw a0, a0, a1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm64_same_lo_hi_negative:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 983297
-; RV64IZBB-NEXT: slli a0, a0, 4
-; RV64IZBB-NEXT: addi a0, a0, 257
-; RV64IZBB-NEXT: slli a0, a0, 16
-; RV64IZBB-NEXT: addi a0, a0, 257
-; RV64IZBB-NEXT: slli a0, a0, 15
-; RV64IZBB-NEXT: addi a0, a0, 128
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm64_same_lo_hi_negative:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: lui a0, 983297
-; RV64IZBS-NEXT: slli a0, a0, 4
-; RV64IZBS-NEXT: addi a0, a0, 257
-; RV64IZBS-NEXT: slli a0, a0, 16
-; RV64IZBS-NEXT: addi a0, a0, 257
-; RV64IZBS-NEXT: slli a0, a0, 15
-; RV64IZBS-NEXT: addi a0, a0, 128
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm64_same_lo_hi_negative:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 983297
-; RV64IXTHEADBB-NEXT: slli a0, a0, 4
-; RV64IXTHEADBB-NEXT: addi a0, a0, 257
-; RV64IXTHEADBB-NEXT: slli a0, a0, 16
-; RV64IXTHEADBB-NEXT: addi a0, a0, 257
-; RV64IXTHEADBB-NEXT: slli a0, a0, 15
-; RV64IXTHEADBB-NEXT: addi a0, a0, 128
-; RV64IXTHEADBB-NEXT: ret
- ret i64 9259542123273814144 ; 0x8080808080808080
-}
-
-define i64 @imm64_0x8000080000000() {
-; RV64I-LABEL: imm64_0x8000080000000:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 256
-; RV64I-NEXT: addiw a0, a0, 1
-; RV64I-NEXT: slli a0, a0, 31
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm64_0x8000080000000:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 256
-; RV64IZBA-NEXT: addiw a0, a0, 1
-; RV64IZBA-NEXT: slli a0, a0, 31
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm64_0x8000080000000:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 256
-; RV64IZBB-NEXT: addiw a0, a0, 1
-; RV64IZBB-NEXT: slli a0, a0, 31
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm64_0x8000080000000:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: bseti a0, zero, 31
-; RV64IZBS-NEXT: bseti a0, a0, 51
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm64_0x8000080000000:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 256
-; RV64IXTHEADBB-NEXT: addiw a0, a0, 1
-; RV64IXTHEADBB-NEXT: slli a0, a0, 31
-; RV64IXTHEADBB-NEXT: ret
- ret i64 2251801961168896 ; 0x8000080000000
-}
-
-define i64 @imm64_0x10000100000000() {
-; RV64I-LABEL: imm64_0x10000100000000:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 256
-; RV64I-NEXT: addi a0, a0, 1
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm64_0x10000100000000:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 256
-; RV64IZBA-NEXT: addi a0, a0, 1
-; RV64IZBA-NEXT: slli a0, a0, 32
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm64_0x10000100000000:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 256
-; RV64IZBB-NEXT: addi a0, a0, 1
-; RV64IZBB-NEXT: slli a0, a0, 32
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm64_0x10000100000000:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: bseti a0, zero, 32
-; RV64IZBS-NEXT: bseti a0, a0, 52
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm64_0x10000100000000:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 256
-; RV64IXTHEADBB-NEXT: addi a0, a0, 1
-; RV64IXTHEADBB-NEXT: slli a0, a0, 32
-; RV64IXTHEADBB-NEXT: ret
- ret i64 4503603922337792 ; 0x10000100000000
-}
-
-define i64 @imm64_0xFF7FFFFF7FFFFFFE() {
-; RV64I-LABEL: imm64_0xFF7FFFFF7FFFFFFE:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 1044480
-; RV64I-NEXT: addiw a0, a0, -1
-; RV64I-NEXT: slli a0, a0, 31
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: ret
-;
-; RV64IZBA-LABEL: imm64_0xFF7FFFFF7FFFFFFE:
-; RV64IZBA: # %bb.0:
-; RV64IZBA-NEXT: lui a0, 1044480
-; RV64IZBA-NEXT: addiw a0, a0, -1
-; RV64IZBA-NEXT: slli a0, a0, 31
-; RV64IZBA-NEXT: addi a0, a0, -1
-; RV64IZBA-NEXT: ret
-;
-; RV64IZBB-LABEL: imm64_0xFF7FFFFF7FFFFFFE:
-; RV64IZBB: # %bb.0:
-; RV64IZBB-NEXT: lui a0, 1044480
-; RV64IZBB-NEXT: addiw a0, a0, -1
-; RV64IZBB-NEXT: slli a0, a0, 31
-; RV64IZBB-NEXT: addi a0, a0, -1
-; RV64IZBB-NEXT: ret
-;
-; RV64IZBS-LABEL: imm64_0xFF7FFFFF7FFFFFFE:
-; RV64IZBS: # %bb.0:
-; RV64IZBS-NEXT: li a0, -1
-; RV64IZBS-NEXT: bclri a0, a0, 31
-; RV64IZBS-NEXT: bclri a0, a0, 55
-; RV64IZBS-NEXT: ret
-;
-; RV64IXTHEADBB-LABEL: imm64_0xFF7FFFFF7FFFFFFE:
-; RV64IXTHEADBB: # %bb.0:
-; RV64IXTHEADBB-NEXT: lui a0, 1044480
-; RV64IXTHEADBB-NEXT: addiw a0, a0, -1
-; RV64IXTHEADBB-NEXT: slli a0, a0, 31
-; RV64IXTHEADBB-NEXT: addi a0, a0, -1
-; RV64IXTHEADBB-NEXT: ret
- ret i64 -36028799166447617 ; 0xFF7FFFFF7FFFFFFE
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/mem.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/mem.ll
deleted file mode 100644
index 456a880..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/mem.ll
+++ /dev/null
@@ -1,92 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck -check-prefix=RV64I %s
-
-; Check indexed and unindexed, sext, zext and anyext loads
-
-define void @lb(ptr %a, ptr %b) nounwind {
-; RV64I-LABEL: lb:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lb a2, 1(a0)
-; RV64I-NEXT: lbu zero, 0(a0)
-; RV64I-NEXT: sw a2, 0(a1)
-; RV64I-NEXT: ret
- %1 = getelementptr i8, ptr %a, i32 1
- %2 = load i8, ptr %1
- %3 = sext i8 %2 to i32
- ; the unused load will produce an anyext for selection
- %4 = load volatile i8, ptr %a
- store i32 %3, ptr %b
- ret void
-}
-
-define void @lbu(ptr %a, ptr %b) nounwind {
-; RV64I-LABEL: lbu:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lbu a0, 1(a0)
-; RV64I-NEXT: sw a0, 0(a1)
-; RV64I-NEXT: ret
- %1 = getelementptr i8, ptr %a, i32 1
- %2 = load i8, ptr %1
- %3 = zext i8 %2 to i32
- store i32 %3, ptr %b
- ret void
-}
-
-define void @lh(ptr %a, ptr %b) nounwind {
-; RV64I-LABEL: lh:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lh a2, 2(a0)
-; RV64I-NEXT: lh zero, 0(a0)
-; RV64I-NEXT: sw a2, 0(a1)
-; RV64I-NEXT: ret
- %1 = getelementptr i16, ptr %a, i32 1
- %2 = load i16, ptr %1
- %3 = sext i16 %2 to i32
- ; the unused load will produce an anyext for selection
- %4 = load volatile i16, ptr %a
- store i32 %3, ptr %b
- ret void
-}
-
-define void @lhu(ptr %a, ptr %b) nounwind {
-; RV64I-LABEL: lhu:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lhu a0, 2(a0)
-; RV64I-NEXT: sw a0, 0(a1)
-; RV64I-NEXT: ret
- %1 = getelementptr i16, ptr %a, i32 1
- %2 = load i16, ptr %1
- %3 = zext i16 %2 to i32
- store i32 %3, ptr %b
- ret void
-}
-
-define void @lw(ptr %a, ptr %b) nounwind {
-; RV64I-LABEL: lw:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lw a2, 4(a0)
-; RV64I-NEXT: lw zero, 0(a0)
-; RV64I-NEXT: sd a2, 0(a1)
-; RV64I-NEXT: ret
- %1 = getelementptr i32, ptr %a, i64 1
- %2 = load i32, ptr %1
- %3 = sext i32 %2 to i64
- ; the unused load will produce an anyext for selection
- %4 = load volatile i32, ptr %a
- store i64 %3, ptr %b
- ret void
-}
-
-define void @lwu(ptr %a, ptr %b) nounwind {
-; RV64I-LABEL: lwu:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lwu a0, 4(a0)
-; RV64I-NEXT: sd a0, 0(a1)
-; RV64I-NEXT: ret
- %1 = getelementptr i32, ptr %a, i64 1
- %2 = load i32, ptr %1
- %3 = zext i32 %2 to i64
- store i64 %3, ptr %b
- ret void
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/mem64.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/mem64.ll
deleted file mode 100644
index de4c21f..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/mem64.ll
+++ /dev/null
@@ -1,341 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck -check-prefix=RV64I %s
-
-; Check indexed and unindexed, sext, zext and anyext loads
-
-define dso_local i64 @lb(ptr %a) nounwind {
-; RV64I-LABEL: lb:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lb a1, 1(a0)
-; RV64I-NEXT: lbu zero, 0(a0)
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: ret
- %1 = getelementptr i8, ptr %a, i32 1
- %2 = load i8, ptr %1
- %3 = sext i8 %2 to i64
- ; the unused load will produce an anyext for selection
- %4 = load volatile i8, ptr %a
- ret i64 %3
-}
-
-define dso_local i64 @lh(ptr %a) nounwind {
-; RV64I-LABEL: lh:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lh a1, 4(a0)
-; RV64I-NEXT: lh zero, 0(a0)
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: ret
- %1 = getelementptr i16, ptr %a, i32 2
- %2 = load i16, ptr %1
- %3 = sext i16 %2 to i64
- ; the unused load will produce an anyext for selection
- %4 = load volatile i16, ptr %a
- ret i64 %3
-}
-
-define dso_local i64 @lw(ptr %a) nounwind {
-; RV64I-LABEL: lw:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lw a1, 12(a0)
-; RV64I-NEXT: lw zero, 0(a0)
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: ret
- %1 = getelementptr i32, ptr %a, i32 3
- %2 = load i32, ptr %1
- %3 = sext i32 %2 to i64
- ; the unused load will produce an anyext for selection
- %4 = load volatile i32, ptr %a
- ret i64 %3
-}
-
-define dso_local i64 @lbu(ptr %a) nounwind {
-; RV64I-LABEL: lbu:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lbu a1, 4(a0)
-; RV64I-NEXT: lbu a0, 0(a0)
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: ret
- %1 = getelementptr i8, ptr %a, i32 4
- %2 = load i8, ptr %1
- %3 = zext i8 %2 to i64
- %4 = load volatile i8, ptr %a
- %5 = zext i8 %4 to i64
- %6 = add i64 %3, %5
- ret i64 %6
-}
-
-define dso_local i64 @lhu(ptr %a) nounwind {
-; RV64I-LABEL: lhu:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lhu a1, 10(a0)
-; RV64I-NEXT: lhu a0, 0(a0)
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: ret
- %1 = getelementptr i16, ptr %a, i32 5
- %2 = load i16, ptr %1
- %3 = zext i16 %2 to i64
- %4 = load volatile i16, ptr %a
- %5 = zext i16 %4 to i64
- %6 = add i64 %3, %5
- ret i64 %6
-}
-
-define dso_local i64 @lwu(ptr %a) nounwind {
-; RV64I-LABEL: lwu:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lwu a1, 24(a0)
-; RV64I-NEXT: lwu a0, 0(a0)
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: ret
- %1 = getelementptr i32, ptr %a, i32 6
- %2 = load i32, ptr %1
- %3 = zext i32 %2 to i64
- %4 = load volatile i32, ptr %a
- %5 = zext i32 %4 to i64
- %6 = add i64 %3, %5
- ret i64 %6
-}
-
-; 64-bit loads and stores
-
-define dso_local i64 @ld(ptr %a) nounwind {
-; RV64I-LABEL: ld:
-; RV64I: # %bb.0:
-; RV64I-NEXT: ld a1, 80(a0)
-; RV64I-NEXT: ld zero, 0(a0)
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: ret
- %1 = getelementptr i64, ptr %a, i32 10
- %2 = load i64, ptr %1
- %3 = load volatile i64, ptr %a
- ret i64 %2
-}
-
-define dso_local void @sd(ptr %a, i64 %b) nounwind {
-; RV64I-LABEL: sd:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sd a1, 0(a0)
-; RV64I-NEXT: sd a1, 88(a0)
-; RV64I-NEXT: ret
- store i64 %b, ptr %a
- %1 = getelementptr i64, ptr %a, i32 11
- store i64 %b, ptr %1
- ret void
-}
-
-; Check load and store to an i1 location
-define dso_local i64 @load_sext_zext_anyext_i1(ptr %a) nounwind {
-; RV64I-LABEL: load_sext_zext_anyext_i1:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lbu a1, 1(a0)
-; RV64I-NEXT: lbu a2, 2(a0)
-; RV64I-NEXT: lbu zero, 0(a0)
-; RV64I-NEXT: sub a0, a2, a1
-; RV64I-NEXT: ret
- ; sextload i1
- %1 = getelementptr i1, ptr %a, i32 1
- %2 = load i1, ptr %1
- %3 = sext i1 %2 to i64
- ; zextload i1
- %4 = getelementptr i1, ptr %a, i32 2
- %5 = load i1, ptr %4
- %6 = zext i1 %5 to i64
- %7 = add i64 %3, %6
- ; extload i1 (anyext). Produced as the load is unused.
- %8 = load volatile i1, ptr %a
- ret i64 %7
-}
-
-define dso_local i16 @load_sext_zext_anyext_i1_i16(ptr %a) nounwind {
-; RV64I-LABEL: load_sext_zext_anyext_i1_i16:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lbu a1, 1(a0)
-; RV64I-NEXT: lbu a2, 2(a0)
-; RV64I-NEXT: lbu zero, 0(a0)
-; RV64I-NEXT: subw a0, a2, a1
-; RV64I-NEXT: ret
- ; sextload i1
- %1 = getelementptr i1, ptr %a, i32 1
- %2 = load i1, ptr %1
- %3 = sext i1 %2 to i16
- ; zextload i1
- %4 = getelementptr i1, ptr %a, i32 2
- %5 = load i1, ptr %4
- %6 = zext i1 %5 to i16
- %7 = add i16 %3, %6
- ; extload i1 (anyext). Produced as the load is unused.
- %8 = load volatile i1, ptr %a
- ret i16 %7
-}
-
-; Check load and store to a global
-@G = dso_local global i64 0
-
-define dso_local i64 @ld_sd_global(i64 %a) nounwind {
-; RV64I-LABEL: ld_sd_global:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a2, %hi(G)
-; RV64I-NEXT: ld a1, %lo(G)(a2)
-; RV64I-NEXT: addi a3, a2, %lo(G)
-; RV64I-NEXT: sd a0, %lo(G)(a2)
-; RV64I-NEXT: ld zero, 72(a3)
-; RV64I-NEXT: sd a0, 72(a3)
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: ret
- %1 = load volatile i64, ptr @G
- store i64 %a, ptr @G
- %2 = getelementptr i64, ptr @G, i64 9
- %3 = load volatile i64, ptr %2
- store i64 %a, ptr %2
- ret i64 %1
-}
-
-define i64 @lw_near_local(ptr %a) {
-; RV64I-LABEL: lw_near_local:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a0, a0, 2047
-; RV64I-NEXT: ld a0, 9(a0)
-; RV64I-NEXT: ret
- %1 = getelementptr inbounds i64, ptr %a, i64 257
- %2 = load volatile i64, ptr %1
- ret i64 %2
-}
-
-define void @st_near_local(ptr %a, i64 %b) {
-; RV64I-LABEL: st_near_local:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a0, a0, 2047
-; RV64I-NEXT: sd a1, 9(a0)
-; RV64I-NEXT: ret
- %1 = getelementptr inbounds i64, ptr %a, i64 257
- store i64 %b, ptr %1
- ret void
-}
-
-define i64 @lw_sw_near_local(ptr %a, i64 %b) {
-; RV64I-LABEL: lw_sw_near_local:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a2, a0, 2047
-; RV64I-NEXT: ld a0, 9(a2)
-; RV64I-NEXT: sd a1, 9(a2)
-; RV64I-NEXT: ret
- %1 = getelementptr inbounds i64, ptr %a, i64 257
- %2 = load volatile i64, ptr %1
- store i64 %b, ptr %1
- ret i64 %2
-}
-
-define i64 @lw_far_local(ptr %a) {
-; RV64I-LABEL: lw_far_local:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 8
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ld a0, -8(a0)
-; RV64I-NEXT: ret
- %1 = getelementptr inbounds i64, ptr %a, i64 4095
- %2 = load volatile i64, ptr %1
- ret i64 %2
-}
-
-define void @st_far_local(ptr %a, i64 %b) {
-; RV64I-LABEL: st_far_local:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a2, 8
-; RV64I-NEXT: add a0, a0, a2
-; RV64I-NEXT: sd a1, -8(a0)
-; RV64I-NEXT: ret
- %1 = getelementptr inbounds i64, ptr %a, i64 4095
- store i64 %b, ptr %1
- ret void
-}
-
-define i64 @lw_sw_far_local(ptr %a, i64 %b) {
-; RV64I-LABEL: lw_sw_far_local:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a2, 8
-; RV64I-NEXT: add a2, a0, a2
-; RV64I-NEXT: ld a0, -8(a2)
-; RV64I-NEXT: sd a1, -8(a2)
-; RV64I-NEXT: ret
- %1 = getelementptr inbounds i64, ptr %a, i64 4095
- %2 = load volatile i64, ptr %1
- store i64 %b, ptr %1
- ret i64 %2
-}
-
-; Make sure we don't fold the addiw into the load offset. The sign extend of the
-; addiw is required.
-define i64 @lw_really_far_local(ptr %a) {
-; RV64I-LABEL: lw_really_far_local:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: addiw a1, a1, -2048
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ld a0, 0(a0)
-; RV64I-NEXT: ret
- %1 = getelementptr inbounds i64, ptr %a, i64 268435200
- %2 = load volatile i64, ptr %1
- ret i64 %2
-}
-
-; Make sure we don't fold the addiw into the store offset. The sign extend of
-; the addiw is required.
-define void @st_really_far_local(ptr %a, i64 %b) {
-; RV64I-LABEL: st_really_far_local:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a2, 524288
-; RV64I-NEXT: addiw a2, a2, -2048
-; RV64I-NEXT: add a0, a0, a2
-; RV64I-NEXT: sd a1, 0(a0)
-; RV64I-NEXT: ret
- %1 = getelementptr inbounds i64, ptr %a, i64 268435200
- store i64 %b, ptr %1
- ret void
-}
-
-; Make sure we don't fold the addiw into the load/store offset. The sign extend
-; of the addiw is required.
-define i64 @lw_sw_really_far_local(ptr %a, i64 %b) {
-; RV64I-LABEL: lw_sw_really_far_local:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a2, 524288
-; RV64I-NEXT: addiw a2, a2, -2048
-; RV64I-NEXT: add a2, a0, a2
-; RV64I-NEXT: ld a0, 0(a2)
-; RV64I-NEXT: sd a1, 0(a2)
-; RV64I-NEXT: ret
- %1 = getelementptr inbounds i64, ptr %a, i64 268435200
- %2 = load volatile i64, ptr %1
- store i64 %b, ptr %1
- ret i64 %2
-}
-
-%struct.quux = type { i32, [0 x i8] }
-
-; Make sure we don't remove the addi and fold the C from
-; (add (addi FrameIndex, C), X) into the store address.
-; FrameIndex cannot be the operand of an ADD. We must keep the ADDI.
-define void @addi_fold_crash(i64 %arg) nounwind {
-; RV64I-LABEL: addi_fold_crash:
-; RV64I: # %bb.0: # %bb
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: addi a1, sp, 4
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: sb zero, 0(a0)
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: call snork
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-bb:
- %tmp = alloca %struct.quux, align 8
- %tmp1 = getelementptr inbounds %struct.quux, ptr %tmp, i64 0, i32 1
- %tmp2 = getelementptr inbounds %struct.quux, ptr %tmp, i64 0, i32 1, i64 %arg
- store i8 0, ptr %tmp2, align 1
- call void @snork(ptr %tmp1)
- ret void
-}
-
-declare void @snork(ptr)
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rem.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rem.ll
deleted file mode 100644
index 9d7b77d..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rem.ll
+++ /dev/null
@@ -1,390 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck -check-prefix=RV64I %s
-; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck -check-prefix=RV64IM %s
-
-define i32 @urem(i32 %a, i32 %b) nounwind {
-; RV64I-LABEL: urem:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: srli a1, a1, 32
-; RV64I-NEXT: call __umoddi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: urem:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: remuw a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = urem i32 %a, %b
- ret i32 %1
-}
-
-define i32 @urem_constant_lhs(i32 %a) nounwind {
-; RV64I-LABEL: urem_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a1, a0, 32
-; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: call __umoddi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: urem_constant_lhs:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 10
-; RV64IM-NEXT: remuw a0, a1, a0
-; RV64IM-NEXT: ret
- %1 = urem i32 10, %a
- ret i32 %1
-}
-
-define i32 @srem(i32 %a, i32 %b) nounwind {
-; RV64I-LABEL: srem:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: sext.w a1, a1
-; RV64I-NEXT: call __moddi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: srem:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: remw a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = srem i32 %a, %b
- ret i32 %1
-}
-
-define i32 @srem_pow2(i32 %a) nounwind {
-; RV64I-LABEL: srem_pow2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sraiw a1, a0, 31
-; RV64I-NEXT: srliw a1, a1, 29
-; RV64I-NEXT: add a1, a0, a1
-; RV64I-NEXT: andi a1, a1, -8
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: srem_pow2:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: sraiw a1, a0, 31
-; RV64IM-NEXT: srliw a1, a1, 29
-; RV64IM-NEXT: add a1, a0, a1
-; RV64IM-NEXT: andi a1, a1, -8
-; RV64IM-NEXT: subw a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = srem i32 %a, 8
- ret i32 %1
-}
-
-define i32 @srem_pow2_2(i32 %a) nounwind {
-; RV64I-LABEL: srem_pow2_2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sraiw a1, a0, 31
-; RV64I-NEXT: srliw a1, a1, 16
-; RV64I-NEXT: add a1, a0, a1
-; RV64I-NEXT: lui a2, 1048560
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: srem_pow2_2:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: sraiw a1, a0, 31
-; RV64IM-NEXT: srliw a1, a1, 16
-; RV64IM-NEXT: add a1, a0, a1
-; RV64IM-NEXT: lui a2, 1048560
-; RV64IM-NEXT: and a1, a1, a2
-; RV64IM-NEXT: subw a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = srem i32 %a, 65536
- ret i32 %1
-}
-
-define i32 @srem_constant_lhs(i32 %a) nounwind {
-; RV64I-LABEL: srem_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sext.w a1, a0
-; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: call __moddi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: srem_constant_lhs:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, -10
-; RV64IM-NEXT: remw a0, a1, a0
-; RV64IM-NEXT: ret
- %1 = srem i32 -10, %a
- ret i32 %1
-}
-
-define i64 @urem64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: urem64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: tail __umoddi3
-;
-; RV64IM-LABEL: urem64:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: remu a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = urem i64 %a, %b
- ret i64 %1
-}
-
-define i64 @urem64_constant_lhs(i64 %a) nounwind {
-; RV64I-LABEL: urem64_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: mv a1, a0
-; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: tail __umoddi3
-;
-; RV64IM-LABEL: urem64_constant_lhs:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 10
-; RV64IM-NEXT: remu a0, a1, a0
-; RV64IM-NEXT: ret
- %1 = urem i64 10, %a
- ret i64 %1
-}
-
-define i64 @srem64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: srem64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: tail __moddi3
-;
-; RV64IM-LABEL: srem64:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: rem a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = srem i64 %a, %b
- ret i64 %1
-}
-
-define i64 @srem64_constant_lhs(i64 %a) nounwind {
-; RV64I-LABEL: srem64_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: mv a1, a0
-; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: tail __moddi3
-;
-; RV64IM-LABEL: srem64_constant_lhs:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, -10
-; RV64IM-NEXT: rem a0, a1, a0
-; RV64IM-NEXT: ret
- %1 = srem i64 -10, %a
- ret i64 %1
-}
-
-define i8 @urem8(i8 %a, i8 %b) nounwind {
-; RV64I-LABEL: urem8:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: andi a0, a0, 255
-; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: call __umoddi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: urem8:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: andi a1, a1, 255
-; RV64IM-NEXT: andi a0, a0, 255
-; RV64IM-NEXT: remuw a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = urem i8 %a, %b
- ret i8 %1
-}
-
-define i8 @urem8_constant_lhs(i8 %a) nounwind {
-; RV64I-LABEL: urem8_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: andi a1, a0, 255
-; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: call __umoddi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: urem8_constant_lhs:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: andi a0, a0, 255
-; RV64IM-NEXT: li a1, 10
-; RV64IM-NEXT: remuw a0, a1, a0
-; RV64IM-NEXT: ret
- %1 = urem i8 10, %a
- ret i8 %1
-}
-
-
-define i8 @srem8(i8 %a, i8 %b) nounwind {
-; RV64I-LABEL: srem8:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: slli a1, a1, 24
-; RV64I-NEXT: sraiw a1, a1, 24
-; RV64I-NEXT: slli a0, a0, 24
-; RV64I-NEXT: sraiw a0, a0, 24
-; RV64I-NEXT: call __moddi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: srem8:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: slli a1, a1, 24
-; RV64IM-NEXT: sraiw a1, a1, 24
-; RV64IM-NEXT: slli a0, a0, 24
-; RV64IM-NEXT: sraiw a0, a0, 24
-; RV64IM-NEXT: remw a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = srem i8 %a, %b
- ret i8 %1
-}
-
-define i8 @srem8_constant_lhs(i8 %a) nounwind {
-; RV64I-LABEL: srem8_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: slli a0, a0, 24
-; RV64I-NEXT: sraiw a1, a0, 24
-; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: call __moddi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: srem8_constant_lhs:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: slli a0, a0, 24
-; RV64IM-NEXT: sraiw a0, a0, 24
-; RV64IM-NEXT: li a1, -10
-; RV64IM-NEXT: remw a0, a1, a0
-; RV64IM-NEXT: ret
- %1 = srem i8 -10, %a
- ret i8 %1
-}
-
-
-define i16 @urem16(i16 %a, i16 %b) nounwind {
-; RV64I-LABEL: urem16:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lui a2, 16
-; RV64I-NEXT: addiw a2, a2, -1
-; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: call __umoddi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: urem16:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: lui a2, 16
-; RV64IM-NEXT: addi a2, a2, -1
-; RV64IM-NEXT: and a1, a1, a2
-; RV64IM-NEXT: and a0, a0, a2
-; RV64IM-NEXT: remuw a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = urem i16 %a, %b
- ret i16 %1
-}
-
-define i16 @urem16_constant_lhs(i16 %a) nounwind {
-; RV64I-LABEL: urem16_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: slli a0, a0, 48
-; RV64I-NEXT: srli a1, a0, 48
-; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: call __umoddi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: urem16_constant_lhs:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: slli a0, a0, 48
-; RV64IM-NEXT: srli a0, a0, 48
-; RV64IM-NEXT: li a1, 10
-; RV64IM-NEXT: remuw a0, a1, a0
-; RV64IM-NEXT: ret
- %1 = urem i16 10, %a
- ret i16 %1
-}
-
-define i16 @srem16(i16 %a, i16 %b) nounwind {
-; RV64I-LABEL: srem16:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: slli a1, a1, 16
-; RV64I-NEXT: sraiw a1, a1, 16
-; RV64I-NEXT: slli a0, a0, 16
-; RV64I-NEXT: sraiw a0, a0, 16
-; RV64I-NEXT: call __moddi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: srem16:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: slli a1, a1, 16
-; RV64IM-NEXT: sraiw a1, a1, 16
-; RV64IM-NEXT: slli a0, a0, 16
-; RV64IM-NEXT: sraiw a0, a0, 16
-; RV64IM-NEXT: remw a0, a0, a1
-; RV64IM-NEXT: ret
- %1 = srem i16 %a, %b
- ret i16 %1
-}
-
-define i16 @srem16_constant_lhs(i16 %a) nounwind {
-; RV64I-LABEL: srem16_constant_lhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: slli a0, a0, 16
-; RV64I-NEXT: sraiw a1, a0, 16
-; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: call __moddi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64IM-LABEL: srem16_constant_lhs:
-; RV64IM: # %bb.0:
-; RV64IM-NEXT: slli a0, a0, 16
-; RV64IM-NEXT: sraiw a0, a0, 16
-; RV64IM-NEXT: li a1, -10
-; RV64IM-NEXT: remw a0, a1, a0
-; RV64IM-NEXT: ret
- %1 = srem i16 -10, %a
- ret i16 %1
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll
deleted file mode 100644
index 80d3add..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll
+++ /dev/null
@@ -1,877 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefix=RV64I
-; RUN: llc -mtriple=riscv64 -mattr=+xtheadbb -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefix=RV64XTHEADBB
-
-declare i32 @llvm.ctlz.i32(i32, i1)
-
-define signext i32 @ctlz_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: ctlz_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB0_2
-; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addi a2, a2, 1365
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addi a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srliw a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB0_2:
-; RV64I-NEXT: li a0, 32
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: ctlz_i32:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: th.extu a0, a0, 31, 0
-; RV64XTHEADBB-NEXT: th.ff1 a0, a0
-; RV64XTHEADBB-NEXT: addi a0, a0, -32
-; RV64XTHEADBB-NEXT: ret
- %1 = call i32 @llvm.ctlz.i32(i32 %a, i1 false)
- ret i32 %1
-}
-
-define signext i32 @log2_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: log2_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB1_2
-; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addi a2, a2, 1365
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addi a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srliw a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: j .LBB1_3
-; RV64I-NEXT: .LBB1_2:
-; RV64I-NEXT: li a0, 32
-; RV64I-NEXT: .LBB1_3: # %cond.end
-; RV64I-NEXT: li a1, 31
-; RV64I-NEXT: subw a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: log2_i32:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: th.extu a0, a0, 31, 0
-; RV64XTHEADBB-NEXT: th.ff1 a0, a0
-; RV64XTHEADBB-NEXT: addi a0, a0, -32
-; RV64XTHEADBB-NEXT: li a1, 31
-; RV64XTHEADBB-NEXT: subw a0, a1, a0
-; RV64XTHEADBB-NEXT: ret
- %1 = call i32 @llvm.ctlz.i32(i32 %a, i1 false)
- %2 = sub i32 31, %1
- ret i32 %2
-}
-
-define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: log2_ceil_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: addiw a0, a0, -1
-; RV64I-NEXT: li s0, 32
-; RV64I-NEXT: li a1, 32
-; RV64I-NEXT: beqz a0, .LBB2_2
-; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addi a2, a2, 1365
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addi a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srliw a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a1, a0, 24
-; RV64I-NEXT: .LBB2_2: # %cond.end
-; RV64I-NEXT: subw a0, s0, a1
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: log2_ceil_i32:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: addi a0, a0, -1
-; RV64XTHEADBB-NEXT: slli a0, a0, 32
-; RV64XTHEADBB-NEXT: srli a0, a0, 32
-; RV64XTHEADBB-NEXT: th.ff1 a0, a0
-; RV64XTHEADBB-NEXT: addi a0, a0, -32
-; RV64XTHEADBB-NEXT: li a1, 32
-; RV64XTHEADBB-NEXT: subw a0, a1, a0
-; RV64XTHEADBB-NEXT: ret
- %1 = sub i32 %a, 1
- %2 = call i32 @llvm.ctlz.i32(i32 %1, i1 false)
- %3 = sub i32 32, %2
- ret i32 %3
-}
-
-define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: findLastSet_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: srliw a0, a0, 1
-; RV64I-NEXT: or a0, s0, a0
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addi a2, a2, 1365
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addi a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srliw a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: xori a0, a0, 31
-; RV64I-NEXT: snez a1, s0
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: findLastSet_i32:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: slli a1, a0, 32
-; RV64XTHEADBB-NEXT: th.ff1 a1, a1
-; RV64XTHEADBB-NEXT: xori a1, a1, 31
-; RV64XTHEADBB-NEXT: snez a0, a0
-; RV64XTHEADBB-NEXT: addi a0, a0, -1
-; RV64XTHEADBB-NEXT: or a0, a0, a1
-; RV64XTHEADBB-NEXT: sext.w a0, a0
-; RV64XTHEADBB-NEXT: ret
- %1 = call i32 @llvm.ctlz.i32(i32 %a, i1 true)
- %2 = xor i32 31, %1
- %3 = icmp eq i32 %a, 0
- %4 = select i1 %3, i32 -1, i32 %2
- ret i32 %4
-}
-
-define i32 @ctlz_lshr_i32(i32 signext %a) {
-; RV64I-LABEL: ctlz_lshr_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a0, a0, 1
-; RV64I-NEXT: beqz a0, .LBB4_2
-; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: .cfi_def_cfa_offset 16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: .cfi_offset ra, -8
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addi a2, a2, 1365
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addi a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srliw a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB4_2:
-; RV64I-NEXT: li a0, 32
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: ctlz_lshr_i32:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: srliw a0, a0, 1
-; RV64XTHEADBB-NEXT: th.ff1 a0, a0
-; RV64XTHEADBB-NEXT: addi a0, a0, -32
-; RV64XTHEADBB-NEXT: ret
- %1 = lshr i32 %a, 1
- %2 = call i32 @llvm.ctlz.i32(i32 %1, i1 false)
- ret i32 %2
-}
-
-declare i64 @llvm.ctlz.i64(i64, i1)
-
-define i64 @ctlz_i64(i64 %a) nounwind {
-; RV64I-LABEL: ctlz_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB5_2
-; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srli a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srli a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srli a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srli a1, a0, 32
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
-; RV64I-NEXT: slli a3, a2, 32
-; RV64I-NEXT: add a2, a2, a3
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw a1, a1, 819
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: slli a1, a0, 8
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: slli a1, a0, 16
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: slli a1, a0, 32
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB5_2:
-; RV64I-NEXT: li a0, 64
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: ctlz_i64:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: th.ff1 a0, a0
-; RV64XTHEADBB-NEXT: ret
- %1 = call i64 @llvm.ctlz.i64(i64 %a, i1 false)
- ret i64 %1
-}
-
-declare i32 @llvm.cttz.i32(i32, i1)
-
-define signext i32 @cttz_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: cttz_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB6_2
-; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: negw a1, a0
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 30667
-; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 27
-; RV64I-NEXT: lui a1, %hi(.LCPI6_0)
-; RV64I-NEXT: addi a1, a1, %lo(.LCPI6_0)
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lbu a0, 0(a0)
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB6_2:
-; RV64I-NEXT: li a0, 32
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: cttz_i32:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: beqz a0, .LBB6_2
-; RV64XTHEADBB-NEXT: # %bb.1: # %cond.false
-; RV64XTHEADBB-NEXT: addi sp, sp, -16
-; RV64XTHEADBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64XTHEADBB-NEXT: negw a1, a0
-; RV64XTHEADBB-NEXT: and a0, a0, a1
-; RV64XTHEADBB-NEXT: lui a1, 30667
-; RV64XTHEADBB-NEXT: addiw a1, a1, 1329
-; RV64XTHEADBB-NEXT: call __muldi3
-; RV64XTHEADBB-NEXT: srliw a0, a0, 27
-; RV64XTHEADBB-NEXT: lui a1, %hi(.LCPI6_0)
-; RV64XTHEADBB-NEXT: addi a1, a1, %lo(.LCPI6_0)
-; RV64XTHEADBB-NEXT: add a0, a1, a0
-; RV64XTHEADBB-NEXT: lbu a0, 0(a0)
-; RV64XTHEADBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64XTHEADBB-NEXT: addi sp, sp, 16
-; RV64XTHEADBB-NEXT: ret
-; RV64XTHEADBB-NEXT: .LBB6_2:
-; RV64XTHEADBB-NEXT: li a0, 32
-; RV64XTHEADBB-NEXT: ret
-; RV64ZBB-LABEL: cttz_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: ctzw a0, a0
-; RV64ZBB-NEXT: ret
- %1 = call i32 @llvm.cttz.i32(i32 %a, i1 false)
- ret i32 %1
-}
-
-define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: cttz_zero_undef_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: negw a1, a0
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 30667
-; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 27
-; RV64I-NEXT: lui a1, %hi(.LCPI7_0)
-; RV64I-NEXT: addi a1, a1, %lo(.LCPI7_0)
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lbu a0, 0(a0)
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: cttz_zero_undef_i32:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: addi sp, sp, -16
-; RV64XTHEADBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64XTHEADBB-NEXT: negw a1, a0
-; RV64XTHEADBB-NEXT: and a0, a0, a1
-; RV64XTHEADBB-NEXT: lui a1, 30667
-; RV64XTHEADBB-NEXT: addiw a1, a1, 1329
-; RV64XTHEADBB-NEXT: call __muldi3
-; RV64XTHEADBB-NEXT: srliw a0, a0, 27
-; RV64XTHEADBB-NEXT: lui a1, %hi(.LCPI7_0)
-; RV64XTHEADBB-NEXT: addi a1, a1, %lo(.LCPI7_0)
-; RV64XTHEADBB-NEXT: add a0, a1, a0
-; RV64XTHEADBB-NEXT: lbu a0, 0(a0)
-; RV64XTHEADBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64XTHEADBB-NEXT: addi sp, sp, 16
-; RV64XTHEADBB-NEXT: ret
- %1 = call i32 @llvm.cttz.i32(i32 %a, i1 true)
- ret i32 %1
-}
-
-define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: findFirstSet_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: negw a0, a0
-; RV64I-NEXT: and a0, s0, a0
-; RV64I-NEXT: lui a1, 30667
-; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 27
-; RV64I-NEXT: lui a1, %hi(.LCPI8_0)
-; RV64I-NEXT: addi a1, a1, %lo(.LCPI8_0)
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lbu a0, 0(a0)
-; RV64I-NEXT: snez a1, s0
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: findFirstSet_i32:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: addi sp, sp, -16
-; RV64XTHEADBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64XTHEADBB-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64XTHEADBB-NEXT: mv s0, a0
-; RV64XTHEADBB-NEXT: negw a0, a0
-; RV64XTHEADBB-NEXT: and a0, s0, a0
-; RV64XTHEADBB-NEXT: lui a1, 30667
-; RV64XTHEADBB-NEXT: addiw a1, a1, 1329
-; RV64XTHEADBB-NEXT: call __muldi3
-; RV64XTHEADBB-NEXT: srliw a0, a0, 27
-; RV64XTHEADBB-NEXT: lui a1, %hi(.LCPI8_0)
-; RV64XTHEADBB-NEXT: addi a1, a1, %lo(.LCPI8_0)
-; RV64XTHEADBB-NEXT: add a0, a1, a0
-; RV64XTHEADBB-NEXT: lbu a0, 0(a0)
-; RV64XTHEADBB-NEXT: snez a1, s0
-; RV64XTHEADBB-NEXT: addiw a1, a1, -1
-; RV64XTHEADBB-NEXT: or a0, a1, a0
-; RV64XTHEADBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64XTHEADBB-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64XTHEADBB-NEXT: addi sp, sp, 16
-; RV64XTHEADBB-NEXT: ret
- %1 = call i32 @llvm.cttz.i32(i32 %a, i1 true)
- %2 = icmp eq i32 %a, 0
- %3 = select i1 %2, i32 -1, i32 %1
- ret i32 %3
-}
-
-define signext i32 @ffs_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: ffs_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: negw a0, a0
-; RV64I-NEXT: and a0, s0, a0
-; RV64I-NEXT: lui a1, 30667
-; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 27
-; RV64I-NEXT: lui a1, %hi(.LCPI9_0)
-; RV64I-NEXT: addi a1, a1, %lo(.LCPI9_0)
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lbu a0, 0(a0)
-; RV64I-NEXT: addiw a0, a0, 1
-; RV64I-NEXT: seqz a1, s0
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: and a0, a1, a0
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: ffs_i32:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: addi sp, sp, -16
-; RV64XTHEADBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64XTHEADBB-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64XTHEADBB-NEXT: mv s0, a0
-; RV64XTHEADBB-NEXT: negw a0, a0
-; RV64XTHEADBB-NEXT: and a0, s0, a0
-; RV64XTHEADBB-NEXT: lui a1, 30667
-; RV64XTHEADBB-NEXT: addiw a1, a1, 1329
-; RV64XTHEADBB-NEXT: call __muldi3
-; RV64XTHEADBB-NEXT: srliw a0, a0, 27
-; RV64XTHEADBB-NEXT: lui a1, %hi(.LCPI9_0)
-; RV64XTHEADBB-NEXT: addi a1, a1, %lo(.LCPI9_0)
-; RV64XTHEADBB-NEXT: add a0, a1, a0
-; RV64XTHEADBB-NEXT: lbu a0, 0(a0)
-; RV64XTHEADBB-NEXT: addiw a0, a0, 1
-; RV64XTHEADBB-NEXT: seqz a1, s0
-; RV64XTHEADBB-NEXT: addiw a1, a1, -1
-; RV64XTHEADBB-NEXT: and a0, a1, a0
-; RV64XTHEADBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64XTHEADBB-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64XTHEADBB-NEXT: addi sp, sp, 16
-; RV64XTHEADBB-NEXT: ret
- %1 = call i32 @llvm.cttz.i32(i32 %a, i1 true)
- %2 = add i32 %1, 1
- %3 = icmp eq i32 %a, 0
- %4 = select i1 %3, i32 0, i32 %2
- ret i32 %4
-}
-
-declare i64 @llvm.cttz.i64(i64, i1)
-
-define i64 @cttz_i64(i64 %a) nounwind {
-; RV64I-LABEL: cttz_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB10_2
-; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: neg a1, a0
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, %hi(.LCPI10_0)
-; RV64I-NEXT: ld a1, %lo(.LCPI10_0)(a1)
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srli a0, a0, 58
-; RV64I-NEXT: lui a1, %hi(.LCPI10_1)
-; RV64I-NEXT: addi a1, a1, %lo(.LCPI10_1)
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lbu a0, 0(a0)
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB10_2:
-; RV64I-NEXT: li a0, 64
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: cttz_i64:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: beqz a0, .LBB10_2
-; RV64XTHEADBB-NEXT: # %bb.1: # %cond.false
-; RV64XTHEADBB-NEXT: addi a1, a0, -1
-; RV64XTHEADBB-NEXT: not a0, a0
-; RV64XTHEADBB-NEXT: and a0, a0, a1
-; RV64XTHEADBB-NEXT: th.ff1 a0, a0
-; RV64XTHEADBB-NEXT: li a1, 64
-; RV64XTHEADBB-NEXT: sub a0, a1, a0
-; RV64XTHEADBB-NEXT: ret
-; RV64XTHEADBB-NEXT: .LBB10_2:
-; RV64XTHEADBB-NEXT: li a0, 64
-; RV64XTHEADBB-NEXT: ret
- %1 = call i64 @llvm.cttz.i64(i64 %a, i1 false)
- ret i64 %1
-}
-
-define signext i32 @sextb_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: sextb_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 56
-; RV64I-NEXT: srai a0, a0, 56
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: sextb_i32:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: th.ext a0, a0, 7, 0
-; RV64XTHEADBB-NEXT: ret
- %shl = shl i32 %a, 24
- %shr = ashr exact i32 %shl, 24
- ret i32 %shr
-}
-
-define i64 @sextb_i64(i64 %a) nounwind {
-; RV64I-LABEL: sextb_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 56
-; RV64I-NEXT: srai a0, a0, 56
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: sextb_i64:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: th.ext a0, a0, 7, 0
-; RV64XTHEADBB-NEXT: ret
- %shl = shl i64 %a, 56
- %shr = ashr exact i64 %shl, 56
- ret i64 %shr
-}
-
-define signext i32 @sexth_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: sexth_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 48
-; RV64I-NEXT: srai a0, a0, 48
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: sexth_i32:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: th.ext a0, a0, 15, 0
-; RV64XTHEADBB-NEXT: ret
- %shl = shl i32 %a, 16
- %shr = ashr exact i32 %shl, 16
- ret i32 %shr
-}
-
-define signext i32 @no_sexth_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: no_sexth_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 17
-; RV64I-NEXT: sraiw a0, a0, 16
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: no_sexth_i32:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: slli a0, a0, 17
-; RV64XTHEADBB-NEXT: sraiw a0, a0, 16
-; RV64XTHEADBB-NEXT: ret
- %shl = shl i32 %a, 17
- %shr = ashr exact i32 %shl, 16
- ret i32 %shr
-}
-
-define i64 @sexth_i64(i64 %a) nounwind {
-; RV64I-LABEL: sexth_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 48
-; RV64I-NEXT: srai a0, a0, 48
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: sexth_i64:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: th.ext a0, a0, 15, 0
-; RV64XTHEADBB-NEXT: ret
- %shl = shl i64 %a, 48
- %shr = ashr exact i64 %shl, 48
- ret i64 %shr
-}
-
-define i64 @no_sexth_i64(i64 %a) nounwind {
-; RV64I-LABEL: no_sexth_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 49
-; RV64I-NEXT: srai a0, a0, 48
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: no_sexth_i64:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: slli a0, a0, 49
-; RV64XTHEADBB-NEXT: srai a0, a0, 48
-; RV64XTHEADBB-NEXT: ret
- %shl = shl i64 %a, 49
- %shr = ashr exact i64 %shl, 48
- ret i64 %shr
-}
-
-define i32 @zexth_i32(i32 %a) nounwind {
-; RV64I-LABEL: zexth_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 48
-; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: zexth_i32:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: th.extu a0, a0, 15, 0
-; RV64XTHEADBB-NEXT: ret
- %and = and i32 %a, 65535
- ret i32 %and
-}
-
-define i64 @zexth_i64(i64 %a) nounwind {
-; RV64I-LABEL: zexth_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 48
-; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: zexth_i64:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: th.extu a0, a0, 15, 0
-; RV64XTHEADBB-NEXT: ret
- %and = and i64 %a, 65535
- ret i64 %and
-}
-
-define i64 @zext_bf_i64(i64 %a) nounwind {
-; RV64I-LABEL: zext_bf_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 47
-; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: zext_bf_i64:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: th.extu a0, a0, 16, 1
-; RV64XTHEADBB-NEXT: ret
- %1 = lshr i64 %a, 1
- %and = and i64 %1, 65535
- ret i64 %and
-}
-
-define i64 @zext_i64_srliw(i64 %a) nounwind {
-; RV64I-LABEL: zext_i64_srliw:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a0, a0, 16
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: zext_i64_srliw:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: srliw a0, a0, 16
-; RV64XTHEADBB-NEXT: ret
- %1 = lshr i64 %a, 16
- %and = and i64 %1, 65535
- ret i64 %and
-}
-
-declare i32 @llvm.bswap.i32(i32)
-
-define signext i32 @bswap_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: bswap_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: lui a2, 16
-; RV64I-NEXT: addiw a2, a2, -256
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: srliw a3, a0, 24
-; RV64I-NEXT: or a1, a1, a3
-; RV64I-NEXT: and a2, a0, a2
-; RV64I-NEXT: slliw a2, a2, 8
-; RV64I-NEXT: slliw a0, a0, 24
-; RV64I-NEXT: or a0, a0, a2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: bswap_i32:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: th.revw a0, a0
-; RV64XTHEADBB-NEXT: ret
- %1 = tail call i32 @llvm.bswap.i32(i32 %a)
- ret i32 %1
-}
-
-; Similar to bswap_i32 but the result is not sign extended.
-define void @bswap_i32_nosext(i32 signext %a, ptr %x) nounwind {
-; RV64I-LABEL: bswap_i32_nosext:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a2, a0, 8
-; RV64I-NEXT: lui a3, 16
-; RV64I-NEXT: addi a3, a3, -256
-; RV64I-NEXT: and a2, a2, a3
-; RV64I-NEXT: srliw a4, a0, 24
-; RV64I-NEXT: or a2, a2, a4
-; RV64I-NEXT: and a3, a0, a3
-; RV64I-NEXT: slli a3, a3, 8
-; RV64I-NEXT: slli a0, a0, 24
-; RV64I-NEXT: or a0, a0, a3
-; RV64I-NEXT: or a0, a0, a2
-; RV64I-NEXT: sw a0, 0(a1)
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: bswap_i32_nosext:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: th.revw a0, a0
-; RV64XTHEADBB-NEXT: sw a0, 0(a1)
-; RV64XTHEADBB-NEXT: ret
- %1 = tail call i32 @llvm.bswap.i32(i32 %a)
- store i32 %1, ptr %x
- ret void
-}
-
-declare i64 @llvm.bswap.i64(i64)
-
-define i64 @bswap_i64(i64 %a) {
-; RV64I-LABEL: bswap_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srli a1, a0, 40
-; RV64I-NEXT: lui a2, 16
-; RV64I-NEXT: addiw a2, a2, -256
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: srli a3, a0, 56
-; RV64I-NEXT: or a1, a1, a3
-; RV64I-NEXT: srli a3, a0, 24
-; RV64I-NEXT: lui a4, 4080
-; RV64I-NEXT: and a3, a3, a4
-; RV64I-NEXT: srli a5, a0, 8
-; RV64I-NEXT: srliw a5, a5, 24
-; RV64I-NEXT: slli a5, a5, 24
-; RV64I-NEXT: or a3, a5, a3
-; RV64I-NEXT: or a1, a3, a1
-; RV64I-NEXT: and a4, a0, a4
-; RV64I-NEXT: slli a4, a4, 24
-; RV64I-NEXT: srliw a3, a0, 24
-; RV64I-NEXT: slli a3, a3, 32
-; RV64I-NEXT: or a3, a4, a3
-; RV64I-NEXT: and a2, a0, a2
-; RV64I-NEXT: slli a2, a2, 40
-; RV64I-NEXT: slli a0, a0, 56
-; RV64I-NEXT: or a0, a0, a2
-; RV64I-NEXT: or a0, a0, a3
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64XTHEADBB-LABEL: bswap_i64:
-; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: th.rev a0, a0
-; RV64XTHEADBB-NEXT: ret
- %1 = call i64 @llvm.bswap.i64(i64 %a)
- ret i64 %1
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zba.ll
deleted file mode 100644
index 7e2e57d..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zba.ll
+++ /dev/null
@@ -1,1937 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefixes=CHECK,RV64I
-; RUN: llc -mtriple=riscv64 -mattr=+m,+zba -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefixes=CHECK,RV64ZBA,RV64ZBANOZBB
-; RUN: llc -mtriple=riscv64 -mattr=+m,+zba,+zbb -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefixes=CHECK,RV64ZBA,RV64ZBAZBB
-
-define i64 @slliuw(i64 %a) nounwind {
-; RV64I-LABEL: slliuw:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 31
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: slliuw:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli.uw a0, a0, 1
-; RV64ZBA-NEXT: ret
- %conv1 = shl i64 %a, 1
- %shl = and i64 %conv1, 8589934590
- ret i64 %shl
-}
-
-define i128 @slliuw_2(i32 signext %0, ptr %1) {
-; RV64I-LABEL: slliuw_2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 28
-; RV64I-NEXT: add a1, a1, a0
-; RV64I-NEXT: ld a0, 0(a1)
-; RV64I-NEXT: ld a1, 8(a1)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: slliuw_2:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli.uw a0, a0, 4
-; RV64ZBA-NEXT: add a1, a1, a0
-; RV64ZBA-NEXT: ld a0, 0(a1)
-; RV64ZBA-NEXT: ld a1, 8(a1)
-; RV64ZBA-NEXT: ret
- %3 = zext i32 %0 to i64
- %4 = getelementptr inbounds i128, ptr %1, i64 %3
- %5 = load i128, ptr %4
- ret i128 %5
-}
-
-define i128 @slliuw_3(i32 signext %0, ptr %1) {
-; RV64I-LABEL: slliuw_3:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a0, a0, 1
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 28
-; RV64I-NEXT: add a1, a1, a0
-; RV64I-NEXT: ld a0, 0(a1)
-; RV64I-NEXT: ld a1, 8(a1)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: slliuw_3:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: addi a0, a0, 1
-; RV64ZBA-NEXT: slli.uw a0, a0, 4
-; RV64ZBA-NEXT: add a1, a1, a0
-; RV64ZBA-NEXT: ld a0, 0(a1)
-; RV64ZBA-NEXT: ld a1, 8(a1)
-; RV64ZBA-NEXT: ret
- %add = add i32 %0, 1
- %3 = zext i32 %add to i64
- %4 = getelementptr inbounds i128, ptr %1, i64 %3
- %5 = load i128, ptr %4
- ret i128 %5
-}
-
-define i64 @adduw(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: adduw:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: srli a1, a1, 32
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: adduw:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: add.uw a0, a1, a0
-; RV64ZBA-NEXT: ret
- %and = and i64 %b, 4294967295
- %add = add i64 %and, %a
- ret i64 %add
-}
-
-define signext i8 @adduw_2(i32 signext %0, ptr %1) {
-; RV64I-LABEL: adduw_2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lb a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: adduw_2:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: add.uw a0, a0, a1
-; RV64ZBA-NEXT: lb a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = zext i32 %0 to i64
- %4 = getelementptr inbounds i8, ptr %1, i64 %3
- %5 = load i8, ptr %4
- ret i8 %5
-}
-
-define signext i8 @adduw_3(i32 signext %0, ptr %1) {
-; RV64I-LABEL: adduw_3:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a0, a0, 1
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lb a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: adduw_3:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: addi a0, a0, 1
-; RV64ZBA-NEXT: add.uw a0, a0, a1
-; RV64ZBA-NEXT: lb a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %add = add i32 %0, 1
- %3 = zext i32 %add to i64
- %4 = getelementptr inbounds i8, ptr %1, i64 %3
- %5 = load i8, ptr %4
- ret i8 %5
-}
-
-define i64 @zextw_i64(i64 %a) nounwind {
-; RV64I-LABEL: zextw_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: zextw_i64:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: zext.w a0, a0
-; RV64ZBA-NEXT: ret
- %and = and i64 %a, 4294967295
- ret i64 %and
-}
-
-; This makes sure targetShrinkDemandedConstant changes the and immmediate to
-; allow zext.w or slli+srli.
-define i64 @zextw_demandedbits_i64(i64 %0) {
-; RV64I-LABEL: zextw_demandedbits_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: ori a0, a0, 1
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: zextw_demandedbits_i64:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: ori a0, a0, 1
-; RV64ZBA-NEXT: zext.w a0, a0
-; RV64ZBA-NEXT: ret
- %2 = and i64 %0, 4294967294
- %3 = or i64 %2, 1
- ret i64 %3
-}
-
-define signext i16 @sh1add(i64 %0, ptr %1) {
-; RV64I-LABEL: sh1add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 1
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lh a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: sh1add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh1add a0, a0, a1
-; RV64ZBA-NEXT: lh a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = getelementptr inbounds i16, ptr %1, i64 %0
- %4 = load i16, ptr %3
- ret i16 %4
-}
-
-define signext i32 @sh2add(i64 %0, ptr %1) {
-; RV64I-LABEL: sh2add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 2
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: sh2add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh2add a0, a0, a1
-; RV64ZBA-NEXT: lw a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = getelementptr inbounds i32, ptr %1, i64 %0
- %4 = load i32, ptr %3
- ret i32 %4
-}
-
-define i64 @sh3add(i64 %0, ptr %1) {
-; RV64I-LABEL: sh3add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 3
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: ld a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: sh3add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh3add a0, a0, a1
-; RV64ZBA-NEXT: ld a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = getelementptr inbounds i64, ptr %1, i64 %0
- %4 = load i64, ptr %3
- ret i64 %4
-}
-
-define signext i16 @sh1adduw(i32 signext %0, ptr %1) {
-; RV64I-LABEL: sh1adduw:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 31
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lh a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: sh1adduw:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh1add.uw a0, a0, a1
-; RV64ZBA-NEXT: lh a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = zext i32 %0 to i64
- %4 = getelementptr inbounds i16, ptr %1, i64 %3
- %5 = load i16, ptr %4
- ret i16 %5
-}
-
-define i64 @sh1adduw_2(i64 %0, i64 %1) {
-; RV64I-LABEL: sh1adduw_2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 31
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: sh1adduw_2:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh1add.uw a0, a0, a1
-; RV64ZBA-NEXT: ret
- %3 = shl i64 %0, 1
- %4 = and i64 %3, 8589934590
- %5 = add i64 %4, %1
- ret i64 %5
-}
-
-define signext i32 @sh2adduw(i32 signext %0, ptr %1) {
-; RV64I-LABEL: sh2adduw:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 30
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: sh2adduw:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh2add.uw a0, a0, a1
-; RV64ZBA-NEXT: lw a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = zext i32 %0 to i64
- %4 = getelementptr inbounds i32, ptr %1, i64 %3
- %5 = load i32, ptr %4
- ret i32 %5
-}
-
-define i64 @sh2adduw_2(i64 %0, i64 %1) {
-; RV64I-LABEL: sh2adduw_2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 30
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: sh2adduw_2:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh2add.uw a0, a0, a1
-; RV64ZBA-NEXT: ret
- %3 = shl i64 %0, 2
- %4 = and i64 %3, 17179869180
- %5 = add i64 %4, %1
- ret i64 %5
-}
-
-define i64 @sh3adduw(i32 signext %0, ptr %1) {
-; RV64I-LABEL: sh3adduw:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 29
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: ld a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: sh3adduw:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh3add.uw a0, a0, a1
-; RV64ZBA-NEXT: ld a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = zext i32 %0 to i64
- %4 = getelementptr inbounds i64, ptr %1, i64 %3
- %5 = load i64, ptr %4
- ret i64 %5
-}
-
-define i64 @sh3adduw_2(i64 %0, i64 %1) {
-; RV64I-LABEL: sh3adduw_2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 29
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: sh3adduw_2:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh3add.uw a0, a0, a1
-; RV64ZBA-NEXT: ret
- %3 = shl i64 %0, 3
- %4 = and i64 %3, 34359738360
- %5 = add i64 %4, %1
- ret i64 %5
-}
-
-; Type legalization inserts a sext_inreg after the first add. That add will be
-; selected as sh2add which does not sign extend. SimplifyDemandedBits is unable
-; to remove the sext_inreg because it has multiple uses. The ashr will use the
-; sext_inreg to become sraiw. This leaves the sext_inreg only used by the shl.
-; If the shl is selected as sllw, we don't need the sext_inreg.
-define i64 @sh2add_extra_sext(i32 %x, i32 %y, i32 %z) {
-; RV64I-LABEL: sh2add_extra_sext:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 2
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: sllw a1, a2, a0
-; RV64I-NEXT: sraiw a0, a0, 2
-; RV64I-NEXT: mul a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: sh2add_extra_sext:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh2add a0, a0, a1
-; RV64ZBA-NEXT: sllw a1, a2, a0
-; RV64ZBA-NEXT: sraiw a0, a0, 2
-; RV64ZBA-NEXT: mul a0, a1, a0
-; RV64ZBA-NEXT: ret
- %a = shl i32 %x, 2
- %b = add i32 %a, %y
- %c = shl i32 %z, %b
- %d = ashr i32 %b, 2
- %e = sext i32 %c to i64
- %f = sext i32 %d to i64
- %g = mul i64 %e, %f
- ret i64 %g
-}
-
-define i64 @addmul6(i64 %a, i64 %b) {
-; RV64I-LABEL: addmul6:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a2, a0, 1
-; RV64I-NEXT: slli a0, a0, 3
-; RV64I-NEXT: sub a0, a0, a2
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: addmul6:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh1add a0, a0, a0
-; RV64ZBA-NEXT: sh1add a0, a0, a1
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 6
- %d = add i64 %c, %b
- ret i64 %d
-}
-
-define i64 @addmul10(i64 %a, i64 %b) {
-; RV64I-LABEL: addmul10:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 10
-; RV64I-NEXT: mul a0, a0, a2
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: addmul10:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh2add a0, a0, a0
-; RV64ZBA-NEXT: sh1add a0, a0, a1
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 10
- %d = add i64 %c, %b
- ret i64 %d
-}
-
-define i64 @addmul12(i64 %a, i64 %b) {
-; RV64I-LABEL: addmul12:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a2, a0, 2
-; RV64I-NEXT: slli a0, a0, 4
-; RV64I-NEXT: sub a0, a0, a2
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: addmul12:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh1add a0, a0, a0
-; RV64ZBA-NEXT: sh2add a0, a0, a1
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 12
- %d = add i64 %c, %b
- ret i64 %d
-}
-
-define i64 @addmul18(i64 %a, i64 %b) {
-; RV64I-LABEL: addmul18:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 18
-; RV64I-NEXT: mul a0, a0, a2
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: addmul18:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh3add a0, a0, a0
-; RV64ZBA-NEXT: sh1add a0, a0, a1
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 18
- %d = add i64 %c, %b
- ret i64 %d
-}
-
-define i64 @addmul20(i64 %a, i64 %b) {
-; RV64I-LABEL: addmul20:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 20
-; RV64I-NEXT: mul a0, a0, a2
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: addmul20:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh2add a0, a0, a0
-; RV64ZBA-NEXT: sh2add a0, a0, a1
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 20
- %d = add i64 %c, %b
- ret i64 %d
-}
-
-define i64 @addmul24(i64 %a, i64 %b) {
-; RV64I-LABEL: addmul24:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a2, a0, 3
-; RV64I-NEXT: slli a0, a0, 5
-; RV64I-NEXT: sub a0, a0, a2
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: addmul24:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh1add a0, a0, a0
-; RV64ZBA-NEXT: sh3add a0, a0, a1
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 24
- %d = add i64 %c, %b
- ret i64 %d
-}
-
-define i64 @addmul36(i64 %a, i64 %b) {
-; RV64I-LABEL: addmul36:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 36
-; RV64I-NEXT: mul a0, a0, a2
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: addmul36:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh3add a0, a0, a0
-; RV64ZBA-NEXT: sh2add a0, a0, a1
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 36
- %d = add i64 %c, %b
- ret i64 %d
-}
-
-define i64 @addmul40(i64 %a, i64 %b) {
-; RV64I-LABEL: addmul40:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 40
-; RV64I-NEXT: mul a0, a0, a2
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: addmul40:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh2add a0, a0, a0
-; RV64ZBA-NEXT: sh3add a0, a0, a1
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 40
- %d = add i64 %c, %b
- ret i64 %d
-}
-
-define i64 @addmul72(i64 %a, i64 %b) {
-; RV64I-LABEL: addmul72:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 72
-; RV64I-NEXT: mul a0, a0, a2
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: addmul72:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh3add a0, a0, a0
-; RV64ZBA-NEXT: sh3add a0, a0, a1
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 72
- %d = add i64 %c, %b
- ret i64 %d
-}
-
-define i64 @mul96(i64 %a) {
-; RV64I-LABEL: mul96:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a1, a0, 5
-; RV64I-NEXT: slli a0, a0, 7
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul96:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh1add a0, a0, a0
-; RV64ZBA-NEXT: slli a0, a0, 5
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 96
- ret i64 %c
-}
-
-define i64 @mul160(i64 %a) {
-; RV64I-LABEL: mul160:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 160
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul160:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh2add a0, a0, a0
-; RV64ZBA-NEXT: slli a0, a0, 5
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 160
- ret i64 %c
-}
-
-define i64 @mul288(i64 %a) {
-; RV64I-LABEL: mul288:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 288
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul288:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh3add a0, a0, a0
-; RV64ZBA-NEXT: slli a0, a0, 5
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 288
- ret i64 %c
-}
-
-define i64 @zext_mul96(i32 signext %a) {
-; RV64I-LABEL: zext_mul96:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a1, a0, 27
-; RV64I-NEXT: srli a0, a0, 25
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: zext_mul96:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli.uw a0, a0, 5
-; RV64ZBA-NEXT: sh1add a0, a0, a0
-; RV64ZBA-NEXT: ret
- %b = zext i32 %a to i64
- %c = mul i64 %b, 96
- ret i64 %c
-}
-
-define i64 @zext_mul160(i32 signext %a) {
-; RV64I-LABEL: zext_mul160:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: slli a1, a1, 37
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: mulhu a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: zext_mul160:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli.uw a0, a0, 5
-; RV64ZBA-NEXT: sh2add a0, a0, a0
-; RV64ZBA-NEXT: ret
- %b = zext i32 %a to i64
- %c = mul i64 %b, 160
- ret i64 %c
-}
-
-define i64 @zext_mul288(i32 signext %a) {
-; RV64I-LABEL: zext_mul288:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 9
-; RV64I-NEXT: slli a1, a1, 37
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: mulhu a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: zext_mul288:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli.uw a0, a0, 5
-; RV64ZBA-NEXT: sh3add a0, a0, a0
-; RV64ZBA-NEXT: ret
- %b = zext i32 %a to i64
- %c = mul i64 %b, 288
- ret i64 %c
-}
-
-; We can't use slli.uw becaues the shift amount is more than 31.
-define i64 @zext_mul12884901888(i32 signext %a) {
-; RV64I-LABEL: zext_mul12884901888:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a1, a0, 32
-; RV64I-NEXT: slli a0, a0, 34
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: zext_mul12884901888:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh1add a0, a0, a0
-; RV64ZBA-NEXT: slli a0, a0, 32
-; RV64ZBA-NEXT: ret
- %b = zext i32 %a to i64
- %c = mul i64 %b, 12884901888
- ret i64 %c
-}
-
-; We can't use slli.uw becaues the shift amount is more than 31.
-define i64 @zext_mul21474836480(i32 signext %a) {
-; RV64I-LABEL: zext_mul21474836480:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: zext_mul21474836480:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh2add a0, a0, a0
-; RV64ZBA-NEXT: slli a0, a0, 32
-; RV64ZBA-NEXT: ret
- %b = zext i32 %a to i64
- %c = mul i64 %b, 21474836480
- ret i64 %c
-}
-
-; We can't use slli.uw becaues the shift amount is more than 31.
-define i64 @zext_mul38654705664(i32 signext %a) {
-; RV64I-LABEL: zext_mul38654705664:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 9
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: zext_mul38654705664:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh3add a0, a0, a0
-; RV64ZBA-NEXT: slli a0, a0, 32
-; RV64ZBA-NEXT: ret
- %b = zext i32 %a to i64
- %c = mul i64 %b, 38654705664
- ret i64 %c
-}
-
-define i64 @sh1add_imm(i64 %0) {
-; CHECK-LABEL: sh1add_imm:
-; CHECK: # %bb.0:
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: addi a0, a0, 5
-; CHECK-NEXT: ret
- %a = shl i64 %0, 1
- %b = add i64 %a, 5
- ret i64 %b
-}
-
-define i64 @sh2add_imm(i64 %0) {
-; CHECK-LABEL: sh2add_imm:
-; CHECK: # %bb.0:
-; CHECK-NEXT: slli a0, a0, 2
-; CHECK-NEXT: addi a0, a0, -6
-; CHECK-NEXT: ret
- %a = shl i64 %0, 2
- %b = add i64 %a, -6
- ret i64 %b
-}
-
-define i64 @sh3add_imm(i64 %0) {
-; CHECK-LABEL: sh3add_imm:
-; CHECK: # %bb.0:
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: addi a0, a0, 7
-; CHECK-NEXT: ret
- %a = shl i64 %0, 3
- %b = add i64 %a, 7
- ret i64 %b
-}
-
-define i64 @sh1adduw_imm(i32 signext %0) {
-; RV64I-LABEL: sh1adduw_imm:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 31
-; RV64I-NEXT: addi a0, a0, 11
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: sh1adduw_imm:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli.uw a0, a0, 1
-; RV64ZBA-NEXT: addi a0, a0, 11
-; RV64ZBA-NEXT: ret
- %a = zext i32 %0 to i64
- %b = shl i64 %a, 1
- %c = add i64 %b, 11
- ret i64 %c
-}
-
-define i64 @sh2adduw_imm(i32 signext %0) {
-; RV64I-LABEL: sh2adduw_imm:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 30
-; RV64I-NEXT: addi a0, a0, -12
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: sh2adduw_imm:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli.uw a0, a0, 2
-; RV64ZBA-NEXT: addi a0, a0, -12
-; RV64ZBA-NEXT: ret
- %a = zext i32 %0 to i64
- %b = shl i64 %a, 2
- %c = add i64 %b, -12
- ret i64 %c
-}
-
-define i64 @sh3adduw_imm(i32 signext %0) {
-; RV64I-LABEL: sh3adduw_imm:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 29
-; RV64I-NEXT: addi a0, a0, 13
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: sh3adduw_imm:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli.uw a0, a0, 3
-; RV64ZBA-NEXT: addi a0, a0, 13
-; RV64ZBA-NEXT: ret
- %a = zext i32 %0 to i64
- %b = shl i64 %a, 3
- %c = add i64 %b, 13
- ret i64 %c
-}
-
-define i64 @adduw_imm(i32 signext %0) nounwind {
-; RV64I-LABEL: adduw_imm:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: addi a0, a0, 5
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: adduw_imm:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: zext.w a0, a0
-; RV64ZBA-NEXT: addi a0, a0, 5
-; RV64ZBA-NEXT: ret
- %a = zext i32 %0 to i64
- %b = add i64 %a, 5
- ret i64 %b
-}
-
-define i64 @mul258(i64 %a) {
-; RV64I-LABEL: mul258:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 258
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul258:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli a1, a0, 8
-; RV64ZBA-NEXT: sh1add a0, a0, a1
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 258
- ret i64 %c
-}
-
-define i64 @mul260(i64 %a) {
-; RV64I-LABEL: mul260:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 260
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul260:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli a1, a0, 8
-; RV64ZBA-NEXT: sh2add a0, a0, a1
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 260
- ret i64 %c
-}
-
-define i64 @mul264(i64 %a) {
-; RV64I-LABEL: mul264:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 264
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul264:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli a1, a0, 8
-; RV64ZBA-NEXT: sh3add a0, a0, a1
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 264
- ret i64 %c
-}
-
-define i64 @imm_zextw() nounwind {
-; RV64I-LABEL: imm_zextw:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a0, 1
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: addi a0, a0, -2
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: imm_zextw:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: li a0, -2
-; RV64ZBA-NEXT: zext.w a0, a0
-; RV64ZBA-NEXT: ret
- ret i64 4294967294 ; -2 in 32 bits.
-}
-
-define i64 @mul11(i64 %a) {
-; RV64I-LABEL: mul11:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 11
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul11:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh2add a1, a0, a0
-; RV64ZBA-NEXT: sh1add a0, a1, a0
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 11
- ret i64 %c
-}
-
-define i64 @mul19(i64 %a) {
-; RV64I-LABEL: mul19:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 19
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul19:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh3add a1, a0, a0
-; RV64ZBA-NEXT: sh1add a0, a1, a0
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 19
- ret i64 %c
-}
-
-define i64 @mul13(i64 %a) {
-; RV64I-LABEL: mul13:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 13
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul13:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh1add a1, a0, a0
-; RV64ZBA-NEXT: sh2add a0, a1, a0
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 13
- ret i64 %c
-}
-
-define i64 @mul21(i64 %a) {
-; RV64I-LABEL: mul21:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 21
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul21:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh2add a1, a0, a0
-; RV64ZBA-NEXT: sh2add a0, a1, a0
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 21
- ret i64 %c
-}
-
-define i64 @mul37(i64 %a) {
-; RV64I-LABEL: mul37:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 37
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul37:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh3add a1, a0, a0
-; RV64ZBA-NEXT: sh2add a0, a1, a0
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 37
- ret i64 %c
-}
-
-define i64 @mul25(i64 %a) {
-; RV64I-LABEL: mul25:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 25
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul25:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh2add a0, a0, a0
-; RV64ZBA-NEXT: sh2add a0, a0, a0
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 25
- ret i64 %c
-}
-
-define i64 @mul41(i64 %a) {
-; RV64I-LABEL: mul41:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 41
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul41:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh2add a1, a0, a0
-; RV64ZBA-NEXT: sh3add a0, a1, a0
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 41
- ret i64 %c
-}
-
-define i64 @mul73(i64 %a) {
-; RV64I-LABEL: mul73:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 73
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul73:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh3add a1, a0, a0
-; RV64ZBA-NEXT: sh3add a0, a1, a0
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 73
- ret i64 %c
-}
-
-define i64 @mul27(i64 %a) {
-; RV64I-LABEL: mul27:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 27
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul27:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh1add a0, a0, a0
-; RV64ZBA-NEXT: sh3add a0, a0, a0
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 27
- ret i64 %c
-}
-
-define i64 @mul45(i64 %a) {
-; RV64I-LABEL: mul45:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 45
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul45:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh2add a0, a0, a0
-; RV64ZBA-NEXT: sh3add a0, a0, a0
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 45
- ret i64 %c
-}
-
-define i64 @mul81(i64 %a) {
-; RV64I-LABEL: mul81:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 81
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul81:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh3add a0, a0, a0
-; RV64ZBA-NEXT: sh3add a0, a0, a0
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 81
- ret i64 %c
-}
-
-define i64 @mul4098(i64 %a) {
-; RV64I-LABEL: mul4098:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a1, a0, 1
-; RV64I-NEXT: slli a0, a0, 12
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul4098:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli a1, a0, 12
-; RV64ZBA-NEXT: sh1add a0, a0, a1
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 4098
- ret i64 %c
-}
-
-define i64 @mul4100(i64 %a) {
-; RV64I-LABEL: mul4100:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a1, a0, 2
-; RV64I-NEXT: slli a0, a0, 12
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul4100:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli a1, a0, 12
-; RV64ZBA-NEXT: sh2add a0, a0, a1
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 4100
- ret i64 %c
-}
-
-define i64 @mul4104(i64 %a) {
-; RV64I-LABEL: mul4104:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a1, a0, 3
-; RV64I-NEXT: slli a0, a0, 12
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: mul4104:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli a1, a0, 12
-; RV64ZBA-NEXT: sh3add a0, a0, a1
-; RV64ZBA-NEXT: ret
- %c = mul i64 %a, 4104
- ret i64 %c
-}
-
-define signext i32 @mulw192(i32 signext %a) {
-; CHECK-LABEL: mulw192:
-; CHECK: # %bb.0:
-; CHECK-NEXT: li a1, 192
-; CHECK-NEXT: mulw a0, a0, a1
-; CHECK-NEXT: ret
- %c = mul i32 %a, 192
- ret i32 %c
-}
-
-define signext i32 @mulw320(i32 signext %a) {
-; CHECK-LABEL: mulw320:
-; CHECK: # %bb.0:
-; CHECK-NEXT: li a1, 320
-; CHECK-NEXT: mulw a0, a0, a1
-; CHECK-NEXT: ret
- %c = mul i32 %a, 320
- ret i32 %c
-}
-
-define signext i32 @mulw576(i32 signext %a) {
-; CHECK-LABEL: mulw576:
-; CHECK: # %bb.0:
-; CHECK-NEXT: li a1, 576
-; CHECK-NEXT: mulw a0, a0, a1
-; CHECK-NEXT: ret
- %c = mul i32 %a, 576
- ret i32 %c
-}
-
-define i64 @add4104(i64 %a) {
-; RV64I-LABEL: add4104:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 1
-; RV64I-NEXT: addiw a1, a1, 8
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: add4104:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: li a1, 1026
-; RV64ZBA-NEXT: sh2add a0, a1, a0
-; RV64ZBA-NEXT: ret
- %c = add i64 %a, 4104
- ret i64 %c
-}
-
-define i64 @add8208(i64 %a) {
-; RV64I-LABEL: add8208:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 2
-; RV64I-NEXT: addiw a1, a1, 16
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: add8208:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: li a1, 1026
-; RV64ZBA-NEXT: sh3add a0, a1, a0
-; RV64ZBA-NEXT: ret
- %c = add i64 %a, 8208
- ret i64 %c
-}
-
-; Make sure we prefer LUI for the 8192 instead of using sh3add.
-define signext i32 @add8192_i32(i32 signext %a) {
-; CHECK-LABEL: add8192_i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, 2
-; CHECK-NEXT: addw a0, a0, a1
-; CHECK-NEXT: ret
- %c = add i32 %a, 8192
- ret i32 %c
-}
-
-; Make sure we prefer LUI for the 8192 instead of using sh3add.
-define i64 @add8192(i64 %a) {
-; CHECK-LABEL: add8192:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, 2
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: ret
- %c = add i64 %a, 8192
- ret i64 %c
-}
-
-define signext i32 @addshl32_5_6(i32 signext %a, i32 signext %b) {
-; RV64I-LABEL: addshl32_5_6:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 5
-; RV64I-NEXT: slli a1, a1, 6
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: addshl32_5_6:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh1add a0, a1, a0
-; RV64ZBA-NEXT: slliw a0, a0, 5
-; RV64ZBA-NEXT: ret
- %c = shl i32 %a, 5
- %d = shl i32 %b, 6
- %e = add i32 %c, %d
- ret i32 %e
-}
-
-define i64 @addshl64_5_6(i64 %a, i64 %b) {
-; RV64I-LABEL: addshl64_5_6:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 5
-; RV64I-NEXT: slli a1, a1, 6
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: addshl64_5_6:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh1add a0, a1, a0
-; RV64ZBA-NEXT: slli a0, a0, 5
-; RV64ZBA-NEXT: ret
- %c = shl i64 %a, 5
- %d = shl i64 %b, 6
- %e = add i64 %c, %d
- ret i64 %e
-}
-
-define signext i32 @addshl32_5_7(i32 signext %a, i32 signext %b) {
-; RV64I-LABEL: addshl32_5_7:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 5
-; RV64I-NEXT: slli a1, a1, 7
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: addshl32_5_7:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh2add a0, a1, a0
-; RV64ZBA-NEXT: slliw a0, a0, 5
-; RV64ZBA-NEXT: ret
- %c = shl i32 %a, 5
- %d = shl i32 %b, 7
- %e = add i32 %c, %d
- ret i32 %e
-}
-
-define i64 @addshl64_5_7(i64 %a, i64 %b) {
-; RV64I-LABEL: addshl64_5_7:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 5
-; RV64I-NEXT: slli a1, a1, 7
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: addshl64_5_7:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh2add a0, a1, a0
-; RV64ZBA-NEXT: slli a0, a0, 5
-; RV64ZBA-NEXT: ret
- %c = shl i64 %a, 5
- %d = shl i64 %b, 7
- %e = add i64 %c, %d
- ret i64 %e
-}
-
-define signext i32 @addshl32_5_8(i32 signext %a, i32 signext %b) {
-; RV64I-LABEL: addshl32_5_8:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 5
-; RV64I-NEXT: slli a1, a1, 8
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: addshl32_5_8:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh3add a0, a1, a0
-; RV64ZBA-NEXT: slliw a0, a0, 5
-; RV64ZBA-NEXT: ret
- %c = shl i32 %a, 5
- %d = shl i32 %b, 8
- %e = add i32 %c, %d
- ret i32 %e
-}
-
-define i64 @addshl64_5_8(i64 %a, i64 %b) {
-; RV64I-LABEL: addshl64_5_8:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 5
-; RV64I-NEXT: slli a1, a1, 8
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: addshl64_5_8:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: sh3add a0, a1, a0
-; RV64ZBA-NEXT: slli a0, a0, 5
-; RV64ZBA-NEXT: ret
- %c = shl i64 %a, 5
- %d = shl i64 %b, 8
- %e = add i64 %c, %d
- ret i64 %e
-}
-
-; Make sure we use sext.h+slli+srli for Zba+Zbb.
-; FIXME: The RV64I and Zba only cases can be done with only 3 shifts.
-define zeroext i32 @sext_ashr_zext_i8(i8 %a) nounwind {
-; RV64I-LABEL: sext_ashr_zext_i8:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 24
-; RV64I-NEXT: sraiw a0, a0, 31
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: ret
-;
-; RV64ZBANOZBB-LABEL: sext_ashr_zext_i8:
-; RV64ZBANOZBB: # %bb.0:
-; RV64ZBANOZBB-NEXT: slli a0, a0, 24
-; RV64ZBANOZBB-NEXT: sraiw a0, a0, 31
-; RV64ZBANOZBB-NEXT: zext.w a0, a0
-; RV64ZBANOZBB-NEXT: ret
-;
-; RV64ZBAZBB-LABEL: sext_ashr_zext_i8:
-; RV64ZBAZBB: # %bb.0:
-; RV64ZBAZBB-NEXT: sext.b a0, a0
-; RV64ZBAZBB-NEXT: sraiw a0, a0, 9
-; RV64ZBAZBB-NEXT: zext.w a0, a0
-; RV64ZBAZBB-NEXT: ret
- %ext = sext i8 %a to i32
- %1 = ashr i32 %ext, 9
- ret i32 %1
-}
-
-; Make sure we use sext.h+slli+srli for Zba+Zbb.
-; FIXME: The RV64I and Zba only cases can be done with only 3 shifts.
-define zeroext i32 @sext_ashr_zext_i16(i16 %a) nounwind {
-; RV64I-LABEL: sext_ashr_zext_i16:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 16
-; RV64I-NEXT: sraiw a0, a0, 25
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: ret
-;
-; RV64ZBANOZBB-LABEL: sext_ashr_zext_i16:
-; RV64ZBANOZBB: # %bb.0:
-; RV64ZBANOZBB-NEXT: slli a0, a0, 16
-; RV64ZBANOZBB-NEXT: sraiw a0, a0, 25
-; RV64ZBANOZBB-NEXT: zext.w a0, a0
-; RV64ZBANOZBB-NEXT: ret
-;
-; RV64ZBAZBB-LABEL: sext_ashr_zext_i16:
-; RV64ZBAZBB: # %bb.0:
-; RV64ZBAZBB-NEXT: slli a0, a0, 48
-; RV64ZBAZBB-NEXT: srai a0, a0, 57
-; RV64ZBAZBB-NEXT: zext.w a0, a0
-; RV64ZBAZBB-NEXT: ret
- %ext = sext i16 %a to i32
- %1 = ashr i32 %ext, 9
- ret i32 %1
-}
-
-; This the IR you get from InstCombine if take the difference of 2 pointers and
-; cast is to unsigned before using as an index.
-define signext i16 @sh1adduw_ptrdiff(i64 %diff, ptr %baseptr) {
-; RV64I-LABEL: sh1adduw_ptrdiff:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: slli a2, a2, 33
-; RV64I-NEXT: addi a2, a2, -2
-; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lh a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: sh1adduw_ptrdiff:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: srli a0, a0, 1
-; RV64ZBA-NEXT: sh1add.uw a0, a0, a1
-; RV64ZBA-NEXT: lh a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %ptrdiff = lshr exact i64 %diff, 1
- %cast = and i64 %ptrdiff, 4294967295
- %ptr = getelementptr inbounds i16, ptr %baseptr, i64 %cast
- %res = load i16, ptr %ptr
- ret i16 %res
-}
-
-define signext i32 @sh2adduw_ptrdiff(i64 %diff, ptr %baseptr) {
-; RV64I-LABEL: sh2adduw_ptrdiff:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: slli a2, a2, 34
-; RV64I-NEXT: addi a2, a2, -4
-; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: sh2adduw_ptrdiff:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: srli a0, a0, 2
-; RV64ZBA-NEXT: sh2add.uw a0, a0, a1
-; RV64ZBA-NEXT: lw a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %ptrdiff = lshr exact i64 %diff, 2
- %cast = and i64 %ptrdiff, 4294967295
- %ptr = getelementptr inbounds i32, ptr %baseptr, i64 %cast
- %res = load i32, ptr %ptr
- ret i32 %res
-}
-
-define i64 @sh3adduw_ptrdiff(i64 %diff, ptr %baseptr) {
-; RV64I-LABEL: sh3adduw_ptrdiff:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: slli a2, a2, 35
-; RV64I-NEXT: addi a2, a2, -8
-; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: ld a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: sh3adduw_ptrdiff:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: srli a0, a0, 3
-; RV64ZBA-NEXT: sh3add.uw a0, a0, a1
-; RV64ZBA-NEXT: ld a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %ptrdiff = lshr exact i64 %diff, 3
- %cast = and i64 %ptrdiff, 4294967295
- %ptr = getelementptr inbounds i64, ptr %baseptr, i64 %cast
- %res = load i64, ptr %ptr
- ret i64 %res
-}
-
-define signext i16 @srliw_1_sh1add(ptr %0, i32 signext %1) {
-; RV64I-LABEL: srliw_1_sh1add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a1, a1, 1
-; RV64I-NEXT: slli a1, a1, 1
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lh a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: srliw_1_sh1add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: srliw a1, a1, 1
-; RV64ZBA-NEXT: sh1add a0, a1, a0
-; RV64ZBA-NEXT: lh a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = lshr i32 %1, 1
- %4 = zext i32 %3 to i64
- %5 = getelementptr inbounds i16, ptr %0, i64 %4
- %6 = load i16, ptr %5, align 2
- ret i16 %6
-}
-
-define i128 @slliuw_ptrdiff(i64 %diff, ptr %baseptr) {
-; RV64I-LABEL: slliuw_ptrdiff:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: slli a2, a2, 36
-; RV64I-NEXT: addi a2, a2, -16
-; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: add a1, a1, a0
-; RV64I-NEXT: ld a0, 0(a1)
-; RV64I-NEXT: ld a1, 8(a1)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: slliuw_ptrdiff:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: srli a0, a0, 4
-; RV64ZBA-NEXT: slli.uw a0, a0, 4
-; RV64ZBA-NEXT: add a1, a1, a0
-; RV64ZBA-NEXT: ld a0, 0(a1)
-; RV64ZBA-NEXT: ld a1, 8(a1)
-; RV64ZBA-NEXT: ret
- %ptrdiff = lshr exact i64 %diff, 4
- %cast = and i64 %ptrdiff, 4294967295
- %ptr = getelementptr inbounds i128, ptr %baseptr, i64 %cast
- %res = load i128, ptr %ptr
- ret i128 %res
-}
-
-define signext i32 @srliw_2_sh2add(ptr %0, i32 signext %1) {
-; RV64I-LABEL: srliw_2_sh2add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a1, a1, 2
-; RV64I-NEXT: slli a1, a1, 2
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: srliw_2_sh2add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: srliw a1, a1, 2
-; RV64ZBA-NEXT: sh2add a0, a1, a0
-; RV64ZBA-NEXT: lw a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = lshr i32 %1, 2
- %4 = zext i32 %3 to i64
- %5 = getelementptr inbounds i32, ptr %0, i64 %4
- %6 = load i32, ptr %5, align 4
- ret i32 %6
-}
-
-define i64 @srliw_3_sh3add(ptr %0, i32 signext %1) {
-; RV64I-LABEL: srliw_3_sh3add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a1, a1, 3
-; RV64I-NEXT: slli a1, a1, 3
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ld a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: srliw_3_sh3add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: srliw a1, a1, 3
-; RV64ZBA-NEXT: sh3add a0, a1, a0
-; RV64ZBA-NEXT: ld a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = lshr i32 %1, 3
- %4 = zext i32 %3 to i64
- %5 = getelementptr inbounds i64, ptr %0, i64 %4
- %6 = load i64, ptr %5, align 8
- ret i64 %6
-}
-
-define signext i32 @srliw_1_sh2add(ptr %0, i32 signext %1) {
-; RV64I-LABEL: srliw_1_sh2add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a1, a1, 1
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: srli a1, a1, 30
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: srliw_1_sh2add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: srliw a1, a1, 1
-; RV64ZBA-NEXT: sh2add a0, a1, a0
-; RV64ZBA-NEXT: lw a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = lshr i32 %1, 1
- %4 = zext i32 %3 to i64
- %5 = getelementptr inbounds i32, ptr %0, i64 %4
- %6 = load i32, ptr %5, align 4
- ret i32 %6
-}
-
-define i64 @srliw_1_sh3add(ptr %0, i32 signext %1) {
-; RV64I-LABEL: srliw_1_sh3add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a1, a1, 1
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: srli a1, a1, 29
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ld a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: srliw_1_sh3add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: srliw a1, a1, 1
-; RV64ZBA-NEXT: sh3add a0, a1, a0
-; RV64ZBA-NEXT: ld a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = lshr i32 %1, 1
- %4 = zext i32 %3 to i64
- %5 = getelementptr inbounds i64, ptr %0, i64 %4
- %6 = load i64, ptr %5, align 8
- ret i64 %6
-}
-
-define i64 @srliw_2_sh3add(ptr %0, i32 signext %1) {
-; RV64I-LABEL: srliw_2_sh3add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a1, a1, 2
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: srli a1, a1, 29
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ld a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: srliw_2_sh3add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: srliw a1, a1, 2
-; RV64ZBA-NEXT: sh3add a0, a1, a0
-; RV64ZBA-NEXT: ld a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = lshr i32 %1, 2
- %4 = zext i32 %3 to i64
- %5 = getelementptr inbounds i64, ptr %0, i64 %4
- %6 = load i64, ptr %5, align 8
- ret i64 %6
-}
-
-define signext i16 @srliw_2_sh1add(ptr %0, i32 signext %1) {
-; RV64I-LABEL: srliw_2_sh1add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a1, a1, 2
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: srli a1, a1, 31
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lh a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: srliw_2_sh1add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: srliw a1, a1, 2
-; RV64ZBA-NEXT: sh1add a0, a1, a0
-; RV64ZBA-NEXT: lh a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = lshr i32 %1, 2
- %4 = zext i32 %3 to i64
- %5 = getelementptr inbounds i16, ptr %0, i64 %4
- %6 = load i16, ptr %5, align 2
- ret i16 %6
-}
-
-
-define signext i32 @srliw_3_sh2add(ptr %0, i32 signext %1) {
-; RV64I-LABEL: srliw_3_sh2add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a1, a1, 3
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: srli a1, a1, 30
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: srliw_3_sh2add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: srliw a1, a1, 3
-; RV64ZBA-NEXT: sh2add a0, a1, a0
-; RV64ZBA-NEXT: lw a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = lshr i32 %1, 3
- %4 = zext i32 %3 to i64
- %5 = getelementptr inbounds i32, ptr %0, i64 %4
- %6 = load i32, ptr %5, align 4
- ret i32 %6
-}
-
-define i64 @srliw_4_sh3add(ptr %0, i32 signext %1) {
-; RV64I-LABEL: srliw_4_sh3add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a1, a1, 4
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: srli a1, a1, 29
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ld a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: srliw_4_sh3add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: srliw a1, a1, 4
-; RV64ZBA-NEXT: sh3add a0, a1, a0
-; RV64ZBA-NEXT: ld a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = lshr i32 %1, 4
- %4 = zext i32 %3 to i64
- %5 = getelementptr inbounds i64, ptr %0, i64 %4
- %6 = load i64, ptr %5, align 8
- ret i64 %6
-}
-
-define signext i32 @srli_1_sh2add(ptr %0, i64 %1) {
-; RV64I-LABEL: srli_1_sh2add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a1, a1, 1
-; RV64I-NEXT: andi a1, a1, -4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: srli_1_sh2add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: srli a1, a1, 1
-; RV64ZBA-NEXT: sh2add a0, a1, a0
-; RV64ZBA-NEXT: lw a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = lshr i64 %1, 1
- %4 = getelementptr inbounds i32, ptr %0, i64 %3
- %5 = load i32, ptr %4, align 4
- ret i32 %5
-}
-
-define i64 @srli_2_sh3add(ptr %0, i64 %1) {
-; RV64I-LABEL: srli_2_sh3add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a1, a1, 1
-; RV64I-NEXT: andi a1, a1, -8
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ld a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: srli_2_sh3add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: srli a1, a1, 2
-; RV64ZBA-NEXT: sh3add a0, a1, a0
-; RV64ZBA-NEXT: ld a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = lshr i64 %1, 2
- %4 = getelementptr inbounds i64, ptr %0, i64 %3
- %5 = load i64, ptr %4, align 8
- ret i64 %5
-}
-
-define signext i16 @srli_2_sh1add(ptr %0, i64 %1) {
-; RV64I-LABEL: srli_2_sh1add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srli a1, a1, 1
-; RV64I-NEXT: andi a1, a1, -2
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lh a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: srli_2_sh1add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: srli a1, a1, 2
-; RV64ZBA-NEXT: sh1add a0, a1, a0
-; RV64ZBA-NEXT: lh a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = lshr i64 %1, 2
- %4 = getelementptr inbounds i16, ptr %0, i64 %3
- %5 = load i16, ptr %4, align 2
- ret i16 %5
-}
-
-define signext i32 @srli_3_sh2add(ptr %0, i64 %1) {
-; RV64I-LABEL: srli_3_sh2add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srli a1, a1, 1
-; RV64I-NEXT: andi a1, a1, -4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: srli_3_sh2add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: srli a1, a1, 3
-; RV64ZBA-NEXT: sh2add a0, a1, a0
-; RV64ZBA-NEXT: lw a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = lshr i64 %1, 3
- %4 = getelementptr inbounds i32, ptr %0, i64 %3
- %5 = load i32, ptr %4, align 4
- ret i32 %5
-}
-
-define i64 @srli_4_sh3add(ptr %0, i64 %1) {
-; RV64I-LABEL: srli_4_sh3add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srli a1, a1, 1
-; RV64I-NEXT: andi a1, a1, -8
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ld a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: srli_4_sh3add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: srli a1, a1, 4
-; RV64ZBA-NEXT: sh3add a0, a1, a0
-; RV64ZBA-NEXT: ld a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = lshr i64 %1, 4
- %4 = getelementptr inbounds i64, ptr %0, i64 %3
- %5 = load i64, ptr %4, align 8
- ret i64 %5
-}
-
-define signext i16 @shl_2_sh1add(ptr %0, i32 signext %1) {
-; RV64I-LABEL: shl_2_sh1add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a1, a1, 2
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: srli a1, a1, 31
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lh a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: shl_2_sh1add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli a1, a1, 2
-; RV64ZBA-NEXT: zext.w a1, a1
-; RV64ZBA-NEXT: sh1add a0, a1, a0
-; RV64ZBA-NEXT: lh a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = shl i32 %1, 2
- %4 = zext i32 %3 to i64
- %5 = getelementptr inbounds i16, ptr %0, i64 %4
- %6 = load i16, ptr %5, align 2
- ret i16 %6
-}
-
-define signext i32 @shl_16_sh2add(ptr %0, i32 signext %1) {
-; RV64I-LABEL: shl_16_sh2add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a1, a1, 16
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: srli a1, a1, 30
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: shl_16_sh2add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli a1, a1, 16
-; RV64ZBA-NEXT: zext.w a1, a1
-; RV64ZBA-NEXT: sh2add a0, a1, a0
-; RV64ZBA-NEXT: lw a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = shl i32 %1, 16
- %4 = zext i32 %3 to i64
- %5 = getelementptr inbounds i32, ptr %0, i64 %4
- %6 = load i32, ptr %5, align 4
- ret i32 %6
-}
-
-define i64 @shl_31_sh3add(ptr %0, i32 signext %1) {
-; RV64I-LABEL: shl_31_sh3add:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a1, a1, 31
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: srli a1, a1, 29
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ld a0, 0(a0)
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: shl_31_sh3add:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli a1, a1, 31
-; RV64ZBA-NEXT: zext.w a1, a1
-; RV64ZBA-NEXT: sh3add a0, a1, a0
-; RV64ZBA-NEXT: ld a0, 0(a0)
-; RV64ZBA-NEXT: ret
- %3 = shl i32 %1, 31
- %4 = zext i32 %3 to i64
- %5 = getelementptr inbounds i64, ptr %0, i64 %4
- %6 = load i64, ptr %5, align 8
- ret i64 %6
-}
-
-define i64 @pack_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: pack_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: pack_i64:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli a1, a1, 32
-; RV64ZBA-NEXT: add.uw a0, a0, a1
-; RV64ZBA-NEXT: ret
- %shl = and i64 %a, 4294967295
- %shl1 = shl i64 %b, 32
- %or = or i64 %shl1, %shl
- ret i64 %or
-}
-
-define i64 @pack_i64_2(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: pack_i64_2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: pack_i64_2:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli a1, a1, 32
-; RV64ZBA-NEXT: add.uw a0, a0, a1
-; RV64ZBA-NEXT: ret
- %zexta = zext i32 %a to i64
- %zextb = zext i32 %b to i64
- %shl1 = shl i64 %zextb, 32
- %or = or i64 %shl1, %zexta
- ret i64 %or
-}
-
-define i64 @pack_i64_3(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: pack_i64_3:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi a0, a0, 1
-; RV64I-NEXT: addi a1, a1, 1
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: pack_i64_3:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: addi a0, a0, 1
-; RV64ZBA-NEXT: addi a1, a1, 1
-; RV64ZBA-NEXT: slli a1, a1, 32
-; RV64ZBA-NEXT: add.uw a0, a0, a1
-; RV64ZBA-NEXT: ret
- %adda = add i32 %a, 1
- %addb = add i32 %b, 1
- %zexta = zext i32 %adda to i64
- %zextb = zext i32 %addb to i64
- %shl1 = shl i64 %zextb, 32
- %or = or i64 %shl1, %zexta
- ret i64 %or
-}
-
-define i64 @pack_i64_disjoint(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: pack_i64_disjoint:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: pack_i64_disjoint:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: add.uw a0, a0, a1
-; RV64ZBA-NEXT: ret
- %shl = and i64 %a, 4294967295
- %or = or disjoint i64 %b, %shl
- ret i64 %or
-}
-
-define i64 @pack_i64_disjoint_2(i32 signext %a, i64 %b) nounwind {
-; RV64I-LABEL: pack_i64_disjoint_2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBA-LABEL: pack_i64_disjoint_2:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: add.uw a0, a0, a1
-; RV64ZBA-NEXT: ret
- %zexta = zext i32 %a to i64
- %or = or disjoint i64 %b, %zexta
- ret i64 %or
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb-intrinsic.ll
deleted file mode 100644
index 1ab3749..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb-intrinsic.ll
+++ /dev/null
@@ -1,77 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefix=RV64ZBB
-
-declare i32 @llvm.riscv.orc.b.i32(i32)
-
-define signext i32 @orcb32(i32 signext %a) nounwind {
-; RV64ZBB-LABEL: orcb32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: orc.b a0, a0
-; RV64ZBB-NEXT: sext.w a0, a0
-; RV64ZBB-NEXT: ret
- %tmp = call i32 @llvm.riscv.orc.b.i32(i32 %a)
- ret i32 %tmp
-}
-
-define zeroext i32 @orcb32_zext(i32 zeroext %a) nounwind {
-; RV64ZBB-LABEL: orcb32_zext:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: orc.b a0, a0
-; RV64ZBB-NEXT: ret
- %tmp = call i32 @llvm.riscv.orc.b.i32(i32 %a)
- ret i32 %tmp
-}
-
-; Second and+or is redundant with the first, make sure we remove them.
-define signext i32 @orcb32_knownbits(i32 signext %a) nounwind {
-; RV64ZBB-LABEL: orcb32_knownbits:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: lui a1, 1044480
-; RV64ZBB-NEXT: and a0, a0, a1
-; RV64ZBB-NEXT: lui a1, 2048
-; RV64ZBB-NEXT: addi a1, a1, 1
-; RV64ZBB-NEXT: or a0, a0, a1
-; RV64ZBB-NEXT: orc.b a0, a0
-; RV64ZBB-NEXT: sext.w a0, a0
-; RV64ZBB-NEXT: ret
- %tmp = and i32 %a, 4278190080 ; 0xFF000000
- %tmp2 = or i32 %tmp, 8388609 ; 0x800001
- %tmp3 = call i32 @llvm.riscv.orc.b.i32(i32 %tmp2)
- %tmp4 = and i32 %tmp3, 4278190080 ; 0xFF000000
- %tmp5 = or i32 %tmp4, 16711935 ; 0xFF00FF
- ret i32 %tmp5
-}
-
-declare i64 @llvm.riscv.orc.b.i64(i64)
-
-define i64 @orcb64(i64 %a) nounwind {
-; RV64ZBB-LABEL: orcb64:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: orc.b a0, a0
-; RV64ZBB-NEXT: ret
- %tmp = call i64 @llvm.riscv.orc.b.i64(i64 %a)
- ret i64 %tmp
-}
-
-; Second and+or is redundant with the first, make sure we remove them.
-define i64 @orcb64_knownbits(i64 %a) nounwind {
-; RV64ZBB-LABEL: orcb64_knownbits:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: lui a1, 65535
-; RV64ZBB-NEXT: slli a1, a1, 12
-; RV64ZBB-NEXT: and a0, a0, a1
-; RV64ZBB-NEXT: lui a1, 256
-; RV64ZBB-NEXT: addiw a1, a1, 8
-; RV64ZBB-NEXT: slli a2, a1, 42
-; RV64ZBB-NEXT: add a1, a1, a2
-; RV64ZBB-NEXT: or a0, a0, a1
-; RV64ZBB-NEXT: orc.b a0, a0
-; RV64ZBB-NEXT: ret
- %tmp = and i64 %a, 1099494850560 ; 0x000000ffff000000
- %tmp2 = or i64 %tmp, 4611721202800525320 ; 0x4000200000100008
- %tmp3 = call i64 @llvm.riscv.orc.b.i64(i64 %tmp2)
- %tmp4 = and i64 %tmp3, 1099494850560 ; 0x000000ffff000000
- %tmp5 = or i64 %tmp4, 18374966855153418495 ; 0xff00ff0000ff00ff
- ret i64 %tmp5
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb-zbkb.ll
deleted file mode 100644
index c98ad45..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb-zbkb.ll
+++ /dev/null
@@ -1,575 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefixes=CHECK,RV64I
-; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefixes=CHECK,RV64ZBB-ZBKB,RV64ZBB
-; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefixes=CHECK,RV64ZBB-ZBKB,RV64ZBKB
-
-define signext i32 @andn_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: andn_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: not a1, a1
-; RV64I-NEXT: and a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: andn_i32:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: andn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT: ret
- %neg = xor i32 %b, -1
- %and = and i32 %neg, %a
- ret i32 %and
-}
-
-define i64 @andn_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: andn_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: not a1, a1
-; RV64I-NEXT: and a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: andn_i64:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: andn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT: ret
- %neg = xor i64 %b, -1
- %and = and i64 %neg, %a
- ret i64 %and
-}
-
-define signext i32 @orn_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: orn_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: not a1, a1
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: orn_i32:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: orn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT: ret
- %neg = xor i32 %b, -1
- %or = or i32 %neg, %a
- ret i32 %or
-}
-
-define i64 @orn_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: orn_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: not a1, a1
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: orn_i64:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: orn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT: ret
- %neg = xor i64 %b, -1
- %or = or i64 %neg, %a
- ret i64 %or
-}
-
-define signext i32 @xnor_i32(i32 signext %a, i32 signext %b) nounwind {
-; CHECK-LABEL: xnor_i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: xor a0, a0, a1
-; CHECK-NEXT: not a0, a0
-; CHECK-NEXT: ret
- %neg = xor i32 %a, -1
- %xor = xor i32 %neg, %b
- ret i32 %xor
-}
-
-define i64 @xnor_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: xnor_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: xnor_i64:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: xnor a0, a0, a1
-; RV64ZBB-ZBKB-NEXT: ret
- %neg = xor i64 %a, -1
- %xor = xor i64 %neg, %b
- ret i64 %xor
-}
-
-declare i32 @llvm.fshl.i32(i32, i32, i32)
-
-define signext i32 @rol_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: rol_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sllw a2, a0, a1
-; RV64I-NEXT: negw a1, a1
-; RV64I-NEXT: srlw a0, a0, a1
-; RV64I-NEXT: or a0, a2, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: rol_i32:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: rolw a0, a0, a1
-; RV64ZBB-ZBKB-NEXT: ret
- %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %b)
- ret i32 %1
-}
-
-; Similar to rol_i32, but doesn't sign extend the result.
-define void @rol_i32_nosext(i32 signext %a, i32 signext %b, ptr %x) nounwind {
-; RV64I-LABEL: rol_i32_nosext:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sllw a3, a0, a1
-; RV64I-NEXT: negw a1, a1
-; RV64I-NEXT: srlw a0, a0, a1
-; RV64I-NEXT: or a0, a3, a0
-; RV64I-NEXT: sw a0, 0(a2)
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: rol_i32_nosext:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: rolw a0, a0, a1
-; RV64ZBB-ZBKB-NEXT: sw a0, 0(a2)
-; RV64ZBB-ZBKB-NEXT: ret
- %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %b)
- store i32 %1, ptr %x
- ret void
-}
-
-define signext i32 @rol_i32_neg_constant_rhs(i32 signext %a) nounwind {
-; RV64I-LABEL: rol_i32_neg_constant_rhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, -2
-; RV64I-NEXT: sllw a2, a1, a0
-; RV64I-NEXT: negw a0, a0
-; RV64I-NEXT: srlw a0, a1, a0
-; RV64I-NEXT: or a0, a2, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: rol_i32_neg_constant_rhs:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: li a1, -2
-; RV64ZBB-ZBKB-NEXT: rolw a0, a1, a0
-; RV64ZBB-ZBKB-NEXT: ret
- %1 = tail call i32 @llvm.fshl.i32(i32 -2, i32 -2, i32 %a)
- ret i32 %1
-}
-
-declare i64 @llvm.fshl.i64(i64, i64, i64)
-
-define i64 @rol_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: rol_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sll a2, a0, a1
-; RV64I-NEXT: negw a1, a1
-; RV64I-NEXT: srl a0, a0, a1
-; RV64I-NEXT: or a0, a2, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: rol_i64:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: rol a0, a0, a1
-; RV64ZBB-ZBKB-NEXT: ret
- %or = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 %b)
- ret i64 %or
-}
-
-declare i32 @llvm.fshr.i32(i32, i32, i32)
-
-define signext i32 @ror_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: ror_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srlw a2, a0, a1
-; RV64I-NEXT: negw a1, a1
-; RV64I-NEXT: sllw a0, a0, a1
-; RV64I-NEXT: or a0, a2, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: ror_i32:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: rorw a0, a0, a1
-; RV64ZBB-ZBKB-NEXT: ret
- %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %b)
- ret i32 %1
-}
-
-; Similar to ror_i32, but doesn't sign extend the result.
-define void @ror_i32_nosext(i32 signext %a, i32 signext %b, ptr %x) nounwind {
-; RV64I-LABEL: ror_i32_nosext:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srlw a3, a0, a1
-; RV64I-NEXT: negw a1, a1
-; RV64I-NEXT: sllw a0, a0, a1
-; RV64I-NEXT: or a0, a3, a0
-; RV64I-NEXT: sw a0, 0(a2)
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: ror_i32_nosext:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: rorw a0, a0, a1
-; RV64ZBB-ZBKB-NEXT: sw a0, 0(a2)
-; RV64ZBB-ZBKB-NEXT: ret
- %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %b)
- store i32 %1, ptr %x
- ret void
-}
-
-define signext i32 @ror_i32_neg_constant_rhs(i32 signext %a) nounwind {
-; RV64I-LABEL: ror_i32_neg_constant_rhs:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, -2
-; RV64I-NEXT: srlw a2, a1, a0
-; RV64I-NEXT: negw a0, a0
-; RV64I-NEXT: sllw a0, a1, a0
-; RV64I-NEXT: or a0, a2, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: ror_i32_neg_constant_rhs:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: li a1, -2
-; RV64ZBB-ZBKB-NEXT: rorw a0, a1, a0
-; RV64ZBB-ZBKB-NEXT: ret
- %1 = tail call i32 @llvm.fshr.i32(i32 -2, i32 -2, i32 %a)
- ret i32 %1
-}
-
-declare i64 @llvm.fshr.i64(i64, i64, i64)
-
-define i64 @ror_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: ror_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srl a2, a0, a1
-; RV64I-NEXT: negw a1, a1
-; RV64I-NEXT: sll a0, a0, a1
-; RV64I-NEXT: or a0, a2, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: ror_i64:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: ror a0, a0, a1
-; RV64ZBB-ZBKB-NEXT: ret
- %or = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %b)
- ret i64 %or
-}
-
-define signext i32 @rori_i32_fshl(i32 signext %a) nounwind {
-; RV64I-LABEL: rori_i32_fshl:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: slliw a0, a0, 31
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: rori_i32_fshl:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: roriw a0, a0, 1
-; RV64ZBB-ZBKB-NEXT: ret
- %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 31)
- ret i32 %1
-}
-
-; Similar to rori_i32_fshl, but doesn't sign extend the result.
-define void @rori_i32_fshl_nosext(i32 signext %a, ptr %x) nounwind {
-; RV64I-LABEL: rori_i32_fshl_nosext:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a2, a0, 1
-; RV64I-NEXT: slli a0, a0, 31
-; RV64I-NEXT: or a0, a0, a2
-; RV64I-NEXT: sw a0, 0(a1)
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: rori_i32_fshl_nosext:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: roriw a0, a0, 1
-; RV64ZBB-ZBKB-NEXT: sw a0, 0(a1)
-; RV64ZBB-ZBKB-NEXT: ret
- %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 31)
- store i32 %1, ptr %x
- ret void
-}
-
-define signext i32 @rori_i32_fshr(i32 signext %a) nounwind {
-; RV64I-LABEL: rori_i32_fshr:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slliw a1, a0, 1
-; RV64I-NEXT: srliw a0, a0, 31
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: rori_i32_fshr:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: roriw a0, a0, 31
-; RV64ZBB-ZBKB-NEXT: ret
- %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 31)
- ret i32 %1
-}
-
-; Similar to rori_i32_fshr, but doesn't sign extend the result.
-define void @rori_i32_fshr_nosext(i32 signext %a, ptr %x) nounwind {
-; RV64I-LABEL: rori_i32_fshr_nosext:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a2, a0, 1
-; RV64I-NEXT: srliw a0, a0, 31
-; RV64I-NEXT: or a0, a0, a2
-; RV64I-NEXT: sw a0, 0(a1)
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: rori_i32_fshr_nosext:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: roriw a0, a0, 31
-; RV64ZBB-ZBKB-NEXT: sw a0, 0(a1)
-; RV64ZBB-ZBKB-NEXT: ret
- %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 31)
- store i32 %1, ptr %x
- ret void
-}
-
-; This test is similar to the type legalized version of the fshl/fshr tests, but
-; instead of having the same input to both shifts it has different inputs. Make
-; sure we don't match it as a roriw.
-define signext i32 @not_rori_i32(i32 signext %x, i32 signext %y) nounwind {
-; CHECK-LABEL: not_rori_i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: slliw a0, a0, 31
-; CHECK-NEXT: srliw a1, a1, 1
-; CHECK-NEXT: or a0, a0, a1
-; CHECK-NEXT: ret
- %a = shl i32 %x, 31
- %b = lshr i32 %y, 1
- %c = or i32 %a, %b
- ret i32 %c
-}
-
-; This is similar to the type legalized roriw pattern, but the and mask is more
-; than 32 bits so the lshr doesn't shift zeroes into the lower 32 bits. Make
-; sure we don't match it to roriw.
-define i64 @roriw_bug(i64 %x) nounwind {
-; CHECK-LABEL: roriw_bug:
-; CHECK: # %bb.0:
-; CHECK-NEXT: slli a1, a0, 31
-; CHECK-NEXT: andi a2, a0, -2
-; CHECK-NEXT: srli a0, a0, 1
-; CHECK-NEXT: or a0, a1, a0
-; CHECK-NEXT: sext.w a0, a0
-; CHECK-NEXT: xor a0, a2, a0
-; CHECK-NEXT: ret
- %a = shl i64 %x, 31
- %b = and i64 %x, 18446744073709551614
- %c = lshr i64 %b, 1
- %d = or i64 %a, %c
- %e = shl i64 %d, 32
- %f = ashr i64 %e, 32
- %g = xor i64 %b, %f ; to increase the use count on %b to disable SimplifyDemandedBits.
- ret i64 %g
-}
-
-define i64 @rori_i64_fshl(i64 %a) nounwind {
-; RV64I-LABEL: rori_i64_fshl:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: slli a0, a0, 63
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: rori_i64_fshl:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: rori a0, a0, 1
-; RV64ZBB-ZBKB-NEXT: ret
- %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 63)
- ret i64 %1
-}
-
-define i64 @rori_i64_fshr(i64 %a) nounwind {
-; RV64I-LABEL: rori_i64_fshr:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a1, a0, 1
-; RV64I-NEXT: srli a0, a0, 63
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: rori_i64_fshr:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: rori a0, a0, 63
-; RV64ZBB-ZBKB-NEXT: ret
- %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 63)
- ret i64 %1
-}
-
-define signext i32 @not_shl_one_i32(i32 signext %x) {
-; RV64I-LABEL: not_shl_one_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 1
-; RV64I-NEXT: sllw a0, a1, a0
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: not_shl_one_i32:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: li a1, -2
-; RV64ZBB-ZBKB-NEXT: rolw a0, a1, a0
-; RV64ZBB-ZBKB-NEXT: ret
- %1 = shl i32 1, %x
- %2 = xor i32 %1, -1
- ret i32 %2
-}
-
-define i64 @not_shl_one_i64(i64 %x) {
-; RV64I-LABEL: not_shl_one_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 1
-; RV64I-NEXT: sll a0, a1, a0
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: not_shl_one_i64:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: li a1, -2
-; RV64ZBB-ZBKB-NEXT: rol a0, a1, a0
-; RV64ZBB-ZBKB-NEXT: ret
- %1 = shl i64 1, %x
- %2 = xor i64 %1, -1
- ret i64 %2
-}
-
-define i8 @srli_i8(i8 %a) nounwind {
-; CHECK-LABEL: srli_i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srli a0, a0, 62
-; CHECK-NEXT: ret
- %1 = lshr i8 %a, 6
- ret i8 %1
-}
-
-; We could use sext.b+srai, but slli+srai offers more opportunities for
-; comppressed instructions.
-define i8 @srai_i8(i8 %a) nounwind {
-; RV64I-LABEL: srai_i8:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 24
-; RV64I-NEXT: sraiw a0, a0, 29
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: srai_i8:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: slli a0, a0, 56
-; RV64ZBB-NEXT: srai a0, a0, 61
-; RV64ZBB-NEXT: ret
-;
-; RV64ZBKB-LABEL: srai_i8:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: slli a0, a0, 24
-; RV64ZBKB-NEXT: sraiw a0, a0, 29
-; RV64ZBKB-NEXT: ret
- %1 = ashr i8 %a, 5
- ret i8 %1
-}
-
-; We could use zext.h+srli, but slli+srli offers more opportunities for
-; comppressed instructions.
-define i16 @srli_i16(i16 %a) nounwind {
-; CHECK-LABEL: srli_i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: slli a0, a0, 48
-; CHECK-NEXT: srli a0, a0, 54
-; CHECK-NEXT: ret
- %1 = lshr i16 %a, 6
- ret i16 %1
-}
-
-; We could use sext.h+srai, but slli+srai offers more opportunities for
-; comppressed instructions.
-define i16 @srai_i16(i16 %a) nounwind {
-; RV64I-LABEL: srai_i16:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 16
-; RV64I-NEXT: sraiw a0, a0, 25
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: srai_i16:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: slli a0, a0, 48
-; RV64ZBB-NEXT: srai a0, a0, 57
-; RV64ZBB-NEXT: ret
-;
-; RV64ZBKB-LABEL: srai_i16:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: slli a0, a0, 16
-; RV64ZBKB-NEXT: sraiw a0, a0, 25
-; RV64ZBKB-NEXT: ret
- %1 = ashr i16 %a, 9
- ret i16 %1
-}
-
-define i1 @andn_seqz_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: andn_seqz_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: seqz a0, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: andn_seqz_i32:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: andn a0, a1, a0
-; RV64ZBB-ZBKB-NEXT: seqz a0, a0
-; RV64ZBB-ZBKB-NEXT: ret
- %and = and i32 %a, %b
- %cmpeq = icmp eq i32 %and, %b
- ret i1 %cmpeq
-}
-
-define i1 @andn_seqz_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: andn_seqz_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: seqz a0, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: andn_seqz_i64:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: andn a0, a1, a0
-; RV64ZBB-ZBKB-NEXT: seqz a0, a0
-; RV64ZBB-ZBKB-NEXT: ret
- %and = and i64 %a, %b
- %cmpeq = icmp eq i64 %and, %b
- ret i1 %cmpeq
-}
-
-define i1 @andn_snez_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: andn_snez_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: snez a0, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: andn_snez_i32:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: andn a0, a1, a0
-; RV64ZBB-ZBKB-NEXT: snez a0, a0
-; RV64ZBB-ZBKB-NEXT: ret
- %and = and i32 %a, %b
- %cmpeq = icmp ne i32 %and, %b
- ret i1 %cmpeq
-}
-
-define i1 @andn_snez_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: andn_snez_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: snez a0, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBB-ZBKB-LABEL: andn_snez_i64:
-; RV64ZBB-ZBKB: # %bb.0:
-; RV64ZBB-ZBKB-NEXT: andn a0, a1, a0
-; RV64ZBB-ZBKB-NEXT: snez a0, a0
-; RV64ZBB-ZBKB-NEXT: ret
- %and = and i64 %a, %b
- %cmpeq = icmp ne i64 %and, %b
- ret i1 %cmpeq
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll
deleted file mode 100644
index b0e447b..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll
+++ /dev/null
@@ -1,1051 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefix=RV64I
-; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefix=RV64ZBB
-
-declare i32 @llvm.ctlz.i32(i32, i1)
-
-define signext i32 @ctlz_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: ctlz_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB0_2
-; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addi a2, a2, 1365
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addi a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srliw a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB0_2:
-; RV64I-NEXT: li a0, 32
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: ctlz_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: clzw a0, a0
-; RV64ZBB-NEXT: ret
- %1 = call i32 @llvm.ctlz.i32(i32 %a, i1 false)
- ret i32 %1
-}
-
-define signext i32 @log2_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: log2_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB1_2
-; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addi a2, a2, 1365
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addi a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srliw a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: j .LBB1_3
-; RV64I-NEXT: .LBB1_2:
-; RV64I-NEXT: li a0, 32
-; RV64I-NEXT: .LBB1_3: # %cond.end
-; RV64I-NEXT: li a1, 31
-; RV64I-NEXT: subw a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: log2_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: clzw a0, a0
-; RV64ZBB-NEXT: li a1, 31
-; RV64ZBB-NEXT: subw a0, a1, a0
-; RV64ZBB-NEXT: ret
- %1 = call i32 @llvm.ctlz.i32(i32 %a, i1 false)
- %2 = sub i32 31, %1
- ret i32 %2
-}
-
-define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: log2_ceil_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: addiw a0, a0, -1
-; RV64I-NEXT: li s0, 32
-; RV64I-NEXT: li a1, 32
-; RV64I-NEXT: beqz a0, .LBB2_2
-; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addi a2, a2, 1365
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addi a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srliw a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a1, a0, 24
-; RV64I-NEXT: .LBB2_2: # %cond.end
-; RV64I-NEXT: subw a0, s0, a1
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: log2_ceil_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: addi a0, a0, -1
-; RV64ZBB-NEXT: clzw a0, a0
-; RV64ZBB-NEXT: li a1, 32
-; RV64ZBB-NEXT: subw a0, a1, a0
-; RV64ZBB-NEXT: ret
- %1 = sub i32 %a, 1
- %2 = call i32 @llvm.ctlz.i32(i32 %1, i1 false)
- %3 = sub i32 32, %2
- ret i32 %3
-}
-
-define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: findLastSet_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: srliw a0, a0, 1
-; RV64I-NEXT: or a0, s0, a0
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addi a2, a2, 1365
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addi a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srliw a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: xori a0, a0, 31
-; RV64I-NEXT: snez a1, s0
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: findLastSet_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: clzw a1, a0
-; RV64ZBB-NEXT: xori a1, a1, 31
-; RV64ZBB-NEXT: snez a0, a0
-; RV64ZBB-NEXT: addiw a0, a0, -1
-; RV64ZBB-NEXT: or a0, a0, a1
-; RV64ZBB-NEXT: ret
- %1 = call i32 @llvm.ctlz.i32(i32 %a, i1 true)
- %2 = xor i32 31, %1
- %3 = icmp eq i32 %a, 0
- %4 = select i1 %3, i32 -1, i32 %2
- ret i32 %4
-}
-
-define i32 @ctlz_lshr_i32(i32 signext %a) {
-; RV64I-LABEL: ctlz_lshr_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a0, a0, 1
-; RV64I-NEXT: beqz a0, .LBB4_2
-; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: .cfi_def_cfa_offset 16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: .cfi_offset ra, -8
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addi a2, a2, 1365
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addi a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srliw a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB4_2:
-; RV64I-NEXT: li a0, 32
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: ctlz_lshr_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: srliw a0, a0, 1
-; RV64ZBB-NEXT: clzw a0, a0
-; RV64ZBB-NEXT: ret
- %1 = lshr i32 %a, 1
- %2 = call i32 @llvm.ctlz.i32(i32 %1, i1 false)
- ret i32 %2
-}
-
-declare i64 @llvm.ctlz.i64(i64, i1)
-
-define i64 @ctlz_i64(i64 %a) nounwind {
-; RV64I-LABEL: ctlz_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB5_2
-; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srli a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srli a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srli a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srli a1, a0, 32
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
-; RV64I-NEXT: slli a3, a2, 32
-; RV64I-NEXT: add a2, a2, a3
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw a1, a1, 819
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: slli a1, a0, 8
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: slli a1, a0, 16
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: slli a1, a0, 32
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB5_2:
-; RV64I-NEXT: li a0, 64
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: ctlz_i64:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: clz a0, a0
-; RV64ZBB-NEXT: ret
- %1 = call i64 @llvm.ctlz.i64(i64 %a, i1 false)
- ret i64 %1
-}
-
-declare i32 @llvm.cttz.i32(i32, i1)
-
-define signext i32 @cttz_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: cttz_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB6_2
-; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: negw a1, a0
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 30667
-; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 27
-; RV64I-NEXT: lui a1, %hi(.LCPI6_0)
-; RV64I-NEXT: addi a1, a1, %lo(.LCPI6_0)
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lbu a0, 0(a0)
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB6_2:
-; RV64I-NEXT: li a0, 32
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: cttz_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: ctzw a0, a0
-; RV64ZBB-NEXT: ret
- %1 = call i32 @llvm.cttz.i32(i32 %a, i1 false)
- ret i32 %1
-}
-
-define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: cttz_zero_undef_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: negw a1, a0
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 30667
-; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 27
-; RV64I-NEXT: lui a1, %hi(.LCPI7_0)
-; RV64I-NEXT: addi a1, a1, %lo(.LCPI7_0)
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lbu a0, 0(a0)
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: cttz_zero_undef_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: ctzw a0, a0
-; RV64ZBB-NEXT: ret
- %1 = call i32 @llvm.cttz.i32(i32 %a, i1 true)
- ret i32 %1
-}
-
-define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: findFirstSet_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: negw a0, a0
-; RV64I-NEXT: and a0, s0, a0
-; RV64I-NEXT: lui a1, 30667
-; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 27
-; RV64I-NEXT: lui a1, %hi(.LCPI8_0)
-; RV64I-NEXT: addi a1, a1, %lo(.LCPI8_0)
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lbu a0, 0(a0)
-; RV64I-NEXT: snez a1, s0
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: findFirstSet_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: ctzw a1, a0
-; RV64ZBB-NEXT: snez a0, a0
-; RV64ZBB-NEXT: addiw a0, a0, -1
-; RV64ZBB-NEXT: or a0, a0, a1
-; RV64ZBB-NEXT: ret
- %1 = call i32 @llvm.cttz.i32(i32 %a, i1 true)
- %2 = icmp eq i32 %a, 0
- %3 = select i1 %2, i32 -1, i32 %1
- ret i32 %3
-}
-
-define signext i32 @ffs_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: ffs_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: negw a0, a0
-; RV64I-NEXT: and a0, s0, a0
-; RV64I-NEXT: lui a1, 30667
-; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 27
-; RV64I-NEXT: lui a1, %hi(.LCPI9_0)
-; RV64I-NEXT: addi a1, a1, %lo(.LCPI9_0)
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lbu a0, 0(a0)
-; RV64I-NEXT: addiw a0, a0, 1
-; RV64I-NEXT: seqz a1, s0
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: and a0, a1, a0
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: ffs_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: ctzw a1, a0
-; RV64ZBB-NEXT: addiw a1, a1, 1
-; RV64ZBB-NEXT: seqz a0, a0
-; RV64ZBB-NEXT: addiw a0, a0, -1
-; RV64ZBB-NEXT: and a0, a0, a1
-; RV64ZBB-NEXT: ret
- %1 = call i32 @llvm.cttz.i32(i32 %a, i1 true)
- %2 = add i32 %1, 1
- %3 = icmp eq i32 %a, 0
- %4 = select i1 %3, i32 0, i32 %2
- ret i32 %4
-}
-
-declare i64 @llvm.cttz.i64(i64, i1)
-
-define i64 @cttz_i64(i64 %a) nounwind {
-; RV64I-LABEL: cttz_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB10_2
-; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: neg a1, a0
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, %hi(.LCPI10_0)
-; RV64I-NEXT: ld a1, %lo(.LCPI10_0)(a1)
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srli a0, a0, 58
-; RV64I-NEXT: lui a1, %hi(.LCPI10_1)
-; RV64I-NEXT: addi a1, a1, %lo(.LCPI10_1)
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lbu a0, 0(a0)
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB10_2:
-; RV64I-NEXT: li a0, 64
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: cttz_i64:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: ctz a0, a0
-; RV64ZBB-NEXT: ret
- %1 = call i64 @llvm.cttz.i64(i64 %a, i1 false)
- ret i64 %1
-}
-
-declare i32 @llvm.ctpop.i32(i32)
-
-define signext i32 @ctpop_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: ctpop_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addi a2, a2, 1365
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addi a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srliw a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: ctpop_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: cpopw a0, a0
-; RV64ZBB-NEXT: ret
- %1 = call i32 @llvm.ctpop.i32(i32 %a)
- ret i32 %1
-}
-
-define signext i32 @ctpop_i32_load(ptr %p) nounwind {
-; RV64I-LABEL: ctpop_i32_load:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addi a2, a2, 1365
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addi a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srliw a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: ctpop_i32_load:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: lw a0, 0(a0)
-; RV64ZBB-NEXT: cpopw a0, a0
-; RV64ZBB-NEXT: ret
- %a = load i32, ptr %p
- %1 = call i32 @llvm.ctpop.i32(i32 %a)
- ret i32 %1
-}
-
-declare i64 @llvm.ctpop.i64(i64)
-
-define i64 @ctpop_i64(i64 %a) nounwind {
-; RV64I-LABEL: ctpop_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
-; RV64I-NEXT: slli a3, a2, 32
-; RV64I-NEXT: add a2, a2, a3
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw a1, a1, 819
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: slli a1, a0, 8
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: slli a1, a0, 16
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: slli a1, a0, 32
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: ctpop_i64:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: cpop a0, a0
-; RV64ZBB-NEXT: ret
- %1 = call i64 @llvm.ctpop.i64(i64 %a)
- ret i64 %1
-}
-
-define signext i32 @sextb_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: sextb_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 56
-; RV64I-NEXT: srai a0, a0, 56
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: sextb_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: sext.b a0, a0
-; RV64ZBB-NEXT: ret
- %shl = shl i32 %a, 24
- %shr = ashr exact i32 %shl, 24
- ret i32 %shr
-}
-
-define i64 @sextb_i64(i64 %a) nounwind {
-; RV64I-LABEL: sextb_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 56
-; RV64I-NEXT: srai a0, a0, 56
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: sextb_i64:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: sext.b a0, a0
-; RV64ZBB-NEXT: ret
- %shl = shl i64 %a, 56
- %shr = ashr exact i64 %shl, 56
- ret i64 %shr
-}
-
-define signext i32 @sexth_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: sexth_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 48
-; RV64I-NEXT: srai a0, a0, 48
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: sexth_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: sext.h a0, a0
-; RV64ZBB-NEXT: ret
- %shl = shl i32 %a, 16
- %shr = ashr exact i32 %shl, 16
- ret i32 %shr
-}
-
-define i64 @sexth_i64(i64 %a) nounwind {
-; RV64I-LABEL: sexth_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 48
-; RV64I-NEXT: srai a0, a0, 48
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: sexth_i64:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: sext.h a0, a0
-; RV64ZBB-NEXT: ret
- %shl = shl i64 %a, 48
- %shr = ashr exact i64 %shl, 48
- ret i64 %shr
-}
-
-define signext i32 @min_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: min_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: blt a0, a1, .LBB18_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: .LBB18_2:
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: min_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: min a0, a0, a1
-; RV64ZBB-NEXT: ret
- %cmp = icmp slt i32 %a, %b
- %cond = select i1 %cmp, i32 %a, i32 %b
- ret i32 %cond
-}
-
-define i64 @min_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: min_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: blt a0, a1, .LBB19_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: .LBB19_2:
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: min_i64:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: min a0, a0, a1
-; RV64ZBB-NEXT: ret
- %cmp = icmp slt i64 %a, %b
- %cond = select i1 %cmp, i64 %a, i64 %b
- ret i64 %cond
-}
-
-define signext i32 @max_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: max_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: blt a1, a0, .LBB20_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: .LBB20_2:
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: max_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: max a0, a0, a1
-; RV64ZBB-NEXT: ret
- %cmp = icmp sgt i32 %a, %b
- %cond = select i1 %cmp, i32 %a, i32 %b
- ret i32 %cond
-}
-
-define i64 @max_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: max_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: blt a1, a0, .LBB21_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: .LBB21_2:
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: max_i64:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: max a0, a0, a1
-; RV64ZBB-NEXT: ret
- %cmp = icmp sgt i64 %a, %b
- %cond = select i1 %cmp, i64 %a, i64 %b
- ret i64 %cond
-}
-
-define signext i32 @minu_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: minu_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: bltu a0, a1, .LBB22_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: .LBB22_2:
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: minu_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: minu a0, a0, a1
-; RV64ZBB-NEXT: ret
- %cmp = icmp ult i32 %a, %b
- %cond = select i1 %cmp, i32 %a, i32 %b
- ret i32 %cond
-}
-
-define i64 @minu_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: minu_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: bltu a0, a1, .LBB23_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: .LBB23_2:
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: minu_i64:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: minu a0, a0, a1
-; RV64ZBB-NEXT: ret
- %cmp = icmp ult i64 %a, %b
- %cond = select i1 %cmp, i64 %a, i64 %b
- ret i64 %cond
-}
-
-define signext i32 @maxu_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: maxu_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: bltu a1, a0, .LBB24_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: .LBB24_2:
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: maxu_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: maxu a0, a0, a1
-; RV64ZBB-NEXT: ret
- %cmp = icmp ugt i32 %a, %b
- %cond = select i1 %cmp, i32 %a, i32 %b
- ret i32 %cond
-}
-
-define i64 @maxu_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: maxu_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: bltu a1, a0, .LBB25_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: .LBB25_2:
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: maxu_i64:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: maxu a0, a0, a1
-; RV64ZBB-NEXT: ret
- %cmp = icmp ugt i64 %a, %b
- %cond = select i1 %cmp, i64 %a, i64 %b
- ret i64 %cond
-}
-
-declare i32 @llvm.abs.i32(i32, i1 immarg)
-
-define i32 @abs_i32(i32 %x) {
-; RV64I-LABEL: abs_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sraiw a1, a0, 31
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: abs_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: sraiw a1, a0, 31
-; RV64ZBB-NEXT: xor a0, a0, a1
-; RV64ZBB-NEXT: subw a0, a0, a1
-; RV64ZBB-NEXT: ret
- %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
- ret i32 %abs
-}
-
-define signext i32 @abs_i32_sext(i32 signext %x) {
-; RV64I-LABEL: abs_i32_sext:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sraiw a1, a0, 31
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: abs_i32_sext:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: sraiw a1, a0, 31
-; RV64ZBB-NEXT: xor a0, a0, a1
-; RV64ZBB-NEXT: subw a0, a0, a1
-; RV64ZBB-NEXT: ret
- %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
- ret i32 %abs
-}
-
-declare i64 @llvm.abs.i64(i64, i1 immarg)
-
-define i64 @abs_i64(i64 %x) {
-; RV64I-LABEL: abs_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srai a1, a0, 63
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: abs_i64:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: neg a1, a0
-; RV64ZBB-NEXT: max a0, a0, a1
-; RV64ZBB-NEXT: ret
- %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true)
- ret i64 %abs
-}
-
-define i32 @zexth_i32(i32 %a) nounwind {
-; RV64I-LABEL: zexth_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 48
-; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: zexth_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: zext.h a0, a0
-; RV64ZBB-NEXT: ret
- %and = and i32 %a, 65535
- ret i32 %and
-}
-
-define i64 @zexth_i64(i64 %a) nounwind {
-; RV64I-LABEL: zexth_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 48
-; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: zexth_i64:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: zext.h a0, a0
-; RV64ZBB-NEXT: ret
- %and = and i64 %a, 65535
- ret i64 %and
-}
-
-declare i32 @llvm.bswap.i32(i32)
-
-define signext i32 @bswap_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: bswap_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: lui a2, 16
-; RV64I-NEXT: addiw a2, a2, -256
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: srliw a3, a0, 24
-; RV64I-NEXT: or a1, a1, a3
-; RV64I-NEXT: and a2, a0, a2
-; RV64I-NEXT: slliw a2, a2, 8
-; RV64I-NEXT: slliw a0, a0, 24
-; RV64I-NEXT: or a0, a0, a2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: bswap_i32:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: rev8 a0, a0
-; RV64ZBB-NEXT: srai a0, a0, 32
-; RV64ZBB-NEXT: ret
- %1 = tail call i32 @llvm.bswap.i32(i32 %a)
- ret i32 %1
-}
-
-; Similar to bswap_i32 but the result is not sign extended.
-define void @bswap_i32_nosext(i32 signext %a, ptr %x) nounwind {
-; RV64I-LABEL: bswap_i32_nosext:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a2, a0, 8
-; RV64I-NEXT: lui a3, 16
-; RV64I-NEXT: addi a3, a3, -256
-; RV64I-NEXT: and a2, a2, a3
-; RV64I-NEXT: srliw a4, a0, 24
-; RV64I-NEXT: or a2, a2, a4
-; RV64I-NEXT: and a3, a0, a3
-; RV64I-NEXT: slli a3, a3, 8
-; RV64I-NEXT: slli a0, a0, 24
-; RV64I-NEXT: or a0, a0, a3
-; RV64I-NEXT: or a0, a0, a2
-; RV64I-NEXT: sw a0, 0(a1)
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: bswap_i32_nosext:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: rev8 a0, a0
-; RV64ZBB-NEXT: srli a0, a0, 32
-; RV64ZBB-NEXT: sw a0, 0(a1)
-; RV64ZBB-NEXT: ret
- %1 = tail call i32 @llvm.bswap.i32(i32 %a)
- store i32 %1, ptr %x
- ret void
-}
-
-declare i64 @llvm.bswap.i64(i64)
-
-define i64 @bswap_i64(i64 %a) {
-; RV64I-LABEL: bswap_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srli a1, a0, 40
-; RV64I-NEXT: lui a2, 16
-; RV64I-NEXT: addiw a2, a2, -256
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: srli a3, a0, 56
-; RV64I-NEXT: or a1, a1, a3
-; RV64I-NEXT: srli a3, a0, 24
-; RV64I-NEXT: lui a4, 4080
-; RV64I-NEXT: and a3, a3, a4
-; RV64I-NEXT: srli a5, a0, 8
-; RV64I-NEXT: srliw a5, a5, 24
-; RV64I-NEXT: slli a5, a5, 24
-; RV64I-NEXT: or a3, a5, a3
-; RV64I-NEXT: or a1, a3, a1
-; RV64I-NEXT: and a4, a0, a4
-; RV64I-NEXT: slli a4, a4, 24
-; RV64I-NEXT: srliw a3, a0, 24
-; RV64I-NEXT: slli a3, a3, 32
-; RV64I-NEXT: or a3, a4, a3
-; RV64I-NEXT: and a2, a0, a2
-; RV64I-NEXT: slli a2, a2, 40
-; RV64I-NEXT: slli a0, a0, 56
-; RV64I-NEXT: or a0, a0, a2
-; RV64I-NEXT: or a0, a0, a3
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBB-LABEL: bswap_i64:
-; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: rev8 a0, a0
-; RV64ZBB-NEXT: ret
- %1 = call i64 @llvm.bswap.i64(i64 %a)
- ret i64 %1
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbc-intrinsic.ll
deleted file mode 100644
index 9b37e87..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbc-intrinsic.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+zbc -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefix=RV64ZBC
-
-declare i64 @llvm.riscv.clmulr.i64(i64 %a, i64 %b)
-
-define i64 @clmul64r(i64 %a, i64 %b) nounwind {
-; RV64ZBC-LABEL: clmul64r:
-; RV64ZBC: # %bb.0:
-; RV64ZBC-NEXT: clmulr a0, a0, a1
-; RV64ZBC-NEXT: ret
- %tmp = call i64 @llvm.riscv.clmulr.i64(i64 %a, i64 %b)
- ret i64 %tmp
-}
-
-declare i32 @llvm.riscv.clmulr.i32(i32 %a, i32 %b)
-
-define signext i32 @clmul32r(i32 signext %a, i32 signext %b) nounwind {
-; RV64ZBC-LABEL: clmul32r:
-; RV64ZBC: # %bb.0:
-; RV64ZBC-NEXT: slli a1, a1, 32
-; RV64ZBC-NEXT: slli a0, a0, 32
-; RV64ZBC-NEXT: clmulr a0, a0, a1
-; RV64ZBC-NEXT: srai a0, a0, 32
-; RV64ZBC-NEXT: ret
- %tmp = call i32 @llvm.riscv.clmulr.i32(i32 %a, i32 %b)
- ret i32 %tmp
-}
-
-; FIXME: We could avoid the slli instructions by using clmul+srli+sext.w since
-; the inputs are zero extended.
-define signext i32 @clmul32r_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
-; RV64ZBC-LABEL: clmul32r_zext:
-; RV64ZBC: # %bb.0:
-; RV64ZBC-NEXT: slli a1, a1, 32
-; RV64ZBC-NEXT: slli a0, a0, 32
-; RV64ZBC-NEXT: clmulr a0, a0, a1
-; RV64ZBC-NEXT: srai a0, a0, 32
-; RV64ZBC-NEXT: ret
- %tmp = call i32 @llvm.riscv.clmulr.i32(i32 %a, i32 %b)
- ret i32 %tmp
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbc-zbkc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbc-zbkc-intrinsic.ll
deleted file mode 100644
index e0c9740..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbc-zbkc-intrinsic.ll
+++ /dev/null
@@ -1,67 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+zbc -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefix=RV64ZBC-ZBKC
-; RUN: llc -mtriple=riscv64 -mattr=+zbkc -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefix=RV64ZBC-ZBKC
-
-declare i64 @llvm.riscv.clmul.i64(i64 %a, i64 %b)
-
-define i64 @clmul64(i64 %a, i64 %b) nounwind {
-; RV64ZBC-ZBKC-LABEL: clmul64:
-; RV64ZBC-ZBKC: # %bb.0:
-; RV64ZBC-ZBKC-NEXT: clmul a0, a0, a1
-; RV64ZBC-ZBKC-NEXT: ret
- %tmp = call i64 @llvm.riscv.clmul.i64(i64 %a, i64 %b)
- ret i64 %tmp
-}
-
-declare i64 @llvm.riscv.clmulh.i64(i64 %a, i64 %b)
-
-define i64 @clmul64h(i64 %a, i64 %b) nounwind {
-; RV64ZBC-ZBKC-LABEL: clmul64h:
-; RV64ZBC-ZBKC: # %bb.0:
-; RV64ZBC-ZBKC-NEXT: clmulh a0, a0, a1
-; RV64ZBC-ZBKC-NEXT: ret
- %tmp = call i64 @llvm.riscv.clmulh.i64(i64 %a, i64 %b)
- ret i64 %tmp
-}
-
-declare i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b)
-
-define signext i32 @clmul32(i32 signext %a, i32 signext %b) nounwind {
-; RV64ZBC-ZBKC-LABEL: clmul32:
-; RV64ZBC-ZBKC: # %bb.0:
-; RV64ZBC-ZBKC-NEXT: clmul a0, a0, a1
-; RV64ZBC-ZBKC-NEXT: sext.w a0, a0
-; RV64ZBC-ZBKC-NEXT: ret
- %tmp = call i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b)
- ret i32 %tmp
-}
-
-declare i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b)
-
-define signext i32 @clmul32h(i32 signext %a, i32 signext %b) nounwind {
-; RV64ZBC-ZBKC-LABEL: clmul32h:
-; RV64ZBC-ZBKC: # %bb.0:
-; RV64ZBC-ZBKC-NEXT: slli a1, a1, 32
-; RV64ZBC-ZBKC-NEXT: slli a0, a0, 32
-; RV64ZBC-ZBKC-NEXT: clmulh a0, a0, a1
-; RV64ZBC-ZBKC-NEXT: srai a0, a0, 32
-; RV64ZBC-ZBKC-NEXT: ret
- %tmp = call i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b)
- ret i32 %tmp
-}
-
-; FIXME: We could avoid the slli instructions by using clmul+srai since the
-; inputs are zero extended.
-define signext i32 @clmul32h_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
-; RV64ZBC-ZBKC-LABEL: clmul32h_zext:
-; RV64ZBC-ZBKC: # %bb.0:
-; RV64ZBC-ZBKC-NEXT: slli a1, a1, 32
-; RV64ZBC-ZBKC-NEXT: slli a0, a0, 32
-; RV64ZBC-ZBKC-NEXT: clmulh a0, a0, a1
-; RV64ZBC-ZBKC-NEXT: srai a0, a0, 32
-; RV64ZBC-ZBKC-NEXT: ret
- %tmp = call i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b)
- ret i32 %tmp
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbkb-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbkb-intrinsic.ll
deleted file mode 100644
index 3169f65..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbkb-intrinsic.ll
+++ /dev/null
@@ -1,73 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \
-; RUN: | FileCheck %s -check-prefix=RV64ZBKB
-
-declare i64 @llvm.riscv.brev8.i64(i64)
-
-define i64 @brev8(i64 %a) nounwind {
-; RV64ZBKB-LABEL: brev8:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: brev8 a0, a0
-; RV64ZBKB-NEXT: ret
- %val = call i64 @llvm.riscv.brev8.i64(i64 %a)
- ret i64 %val
-}
-
-; Test that rev8 is recognized as preserving zero extension.
-define zeroext i16 @brev8_knownbits(i16 zeroext %a) nounwind {
-; RV64ZBKB-LABEL: brev8_knownbits:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: brev8 a0, a0
-; RV64ZBKB-NEXT: ret
- %zext = zext i16 %a to i64
- %val = call i64 @llvm.riscv.brev8.i64(i64 %zext)
- %trunc = trunc i64 %val to i16
- ret i16 %trunc
-}
-
-declare i64 @llvm.bswap.i64(i64)
-
-define i64 @rev8_i64(i64 %a) {
-; RV64ZBKB-LABEL: rev8_i64:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: rev8 a0, a0
-; RV64ZBKB-NEXT: ret
- %1 = call i64 @llvm.bswap.i64(i64 %a)
- ret i64 %1
-}
-
-declare i32 @llvm.riscv.brev8.i32(i32)
-
-define signext i32 @brev8_i32(i32 signext %a) nounwind {
-; RV64ZBKB-LABEL: brev8_i32:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: brev8 a0, a0
-; RV64ZBKB-NEXT: sext.w a0, a0
-; RV64ZBKB-NEXT: ret
- %val = call i32 @llvm.riscv.brev8.i32(i32 %a)
- ret i32 %val
-}
-
-; Test that rev8 is recognized as preserving zero extension.
-define zeroext i16 @brev8_i32_knownbits(i16 zeroext %a) nounwind {
-; RV64ZBKB-LABEL: brev8_i32_knownbits:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: brev8 a0, a0
-; RV64ZBKB-NEXT: ret
- %zext = zext i16 %a to i32
- %val = call i32 @llvm.riscv.brev8.i32(i32 %zext)
- %trunc = trunc i32 %val to i16
- ret i16 %trunc
-}
-
-declare i32 @llvm.bswap.i32(i32)
-
-define signext i32 @rev8_i32(i32 signext %a) {
-; RV64ZBKB-LABEL: rev8_i32:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: rev8 a0, a0
-; RV64ZBKB-NEXT: srai a0, a0, 32
-; RV64ZBKB-NEXT: ret
- %1 = call i32 @llvm.bswap.i32(i32 %a)
- ret i32 %1
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbkb.ll
deleted file mode 100644
index 8e265983..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbkb.ll
+++ /dev/null
@@ -1,370 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefix=RV64I
-; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefix=RV64ZBKB
-
-define signext i32 @pack_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: pack_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 48
-; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: slliw a1, a1, 16
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBKB-LABEL: pack_i32:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: packw a0, a0, a1
-; RV64ZBKB-NEXT: ret
- %shl = and i32 %a, 65535
- %shl1 = shl i32 %b, 16
- %or = or i32 %shl1, %shl
- ret i32 %or
-}
-
-define signext i32 @pack_i32_2(i16 zeroext %a, i16 zeroext %b) nounwind {
-; RV64I-LABEL: pack_i32_2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slliw a1, a1, 16
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBKB-LABEL: pack_i32_2:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: packw a0, a0, a1
-; RV64ZBKB-NEXT: ret
- %zexta = zext i16 %a to i32
- %zextb = zext i16 %b to i32
- %shl1 = shl i32 %zextb, 16
- %or = or i32 %shl1, %zexta
- ret i32 %or
-}
-
-; Test case where we don't have a sign_extend_inreg after the or.
-define signext i32 @pack_i32_3(i16 zeroext %0, i16 zeroext %1, i32 signext %2) {
-; RV64I-LABEL: pack_i32_3:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: addw a0, a0, a2
-; RV64I-NEXT: ret
-;
-; RV64ZBKB-LABEL: pack_i32_3:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: packw a0, a1, a0
-; RV64ZBKB-NEXT: addw a0, a0, a2
-; RV64ZBKB-NEXT: ret
- %4 = zext i16 %0 to i32
- %5 = shl nuw i32 %4, 16
- %6 = zext i16 %1 to i32
- %7 = or i32 %5, %6
- %8 = add i32 %7, %2
- ret i32 %8
-}
-
-define i64 @pack_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: pack_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBKB-LABEL: pack_i64:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: pack a0, a0, a1
-; RV64ZBKB-NEXT: ret
- %shl = and i64 %a, 4294967295
- %shl1 = shl i64 %b, 32
- %or = or i64 %shl1, %shl
- ret i64 %or
-}
-
-define i64 @pack_i64_2(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: pack_i64_2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBKB-LABEL: pack_i64_2:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: pack a0, a0, a1
-; RV64ZBKB-NEXT: ret
- %zexta = zext i32 %a to i64
- %zextb = zext i32 %b to i64
- %shl1 = shl i64 %zextb, 32
- %or = or i64 %shl1, %zexta
- ret i64 %or
-}
-
-define i64 @pack_i64_3(ptr %0, ptr %1) {
-; RV64I-LABEL: pack_i64_3:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: lwu a1, 0(a1)
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBKB-LABEL: pack_i64_3:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: lw a0, 0(a0)
-; RV64ZBKB-NEXT: lwu a1, 0(a1)
-; RV64ZBKB-NEXT: pack a0, a1, a0
-; RV64ZBKB-NEXT: ret
- %3 = load i32, ptr %0, align 4
- %4 = zext i32 %3 to i64
- %5 = shl i64 %4, 32
- %6 = load i32, ptr %1, align 4
- %7 = zext i32 %6 to i64
- %8 = or i64 %5, %7
- ret i64 %8
-}
-
-define signext i32 @packh_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: packh_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 255
-; RV64I-NEXT: slli a1, a1, 56
-; RV64I-NEXT: srli a1, a1, 48
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBKB-LABEL: packh_i32:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: packh a0, a0, a1
-; RV64ZBKB-NEXT: ret
- %and = and i32 %a, 255
- %and1 = shl i32 %b, 8
- %shl = and i32 %and1, 65280
- %or = or i32 %shl, %and
- ret i32 %or
-}
-
-define i32 @packh_i32_2(i32 %a, i32 %b) nounwind {
-; RV64I-LABEL: packh_i32_2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 255
-; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: slliw a1, a1, 8
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBKB-LABEL: packh_i32_2:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: packh a0, a0, a1
-; RV64ZBKB-NEXT: ret
- %and = and i32 %a, 255
- %and1 = and i32 %b, 255
- %shl = shl i32 %and1, 8
- %or = or i32 %shl, %and
- ret i32 %or
-}
-
-define i64 @packh_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: packh_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 255
-; RV64I-NEXT: slli a1, a1, 56
-; RV64I-NEXT: srli a1, a1, 48
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBKB-LABEL: packh_i64:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: packh a0, a0, a1
-; RV64ZBKB-NEXT: ret
- %and = and i64 %a, 255
- %and1 = shl i64 %b, 8
- %shl = and i64 %and1, 65280
- %or = or i64 %shl, %and
- ret i64 %or
-}
-
-define i64 @packh_i64_2(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: packh_i64_2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 255
-; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: slli a1, a1, 8
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBKB-LABEL: packh_i64_2:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: packh a0, a0, a1
-; RV64ZBKB-NEXT: ret
- %and = and i64 %a, 255
- %and1 = and i64 %b, 255
- %shl = shl i64 %and1, 8
- %or = or i64 %shl, %and
- ret i64 %or
-}
-
-define zeroext i16 @packh_i16(i8 zeroext %a, i8 zeroext %b) nounwind {
-; RV64I-LABEL: packh_i16:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slliw a1, a1, 8
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBKB-LABEL: packh_i16:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: packh a0, a0, a1
-; RV64ZBKB-NEXT: ret
- %zext = zext i8 %a to i16
- %zext1 = zext i8 %b to i16
- %shl = shl i16 %zext1, 8
- %or = or i16 %shl, %zext
- ret i16 %or
-}
-
-define zeroext i16 @packh_i16_2(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2) {
-; RV64I-LABEL: packh_i16_2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: slli a0, a0, 8
-; RV64I-NEXT: or a0, a0, a2
-; RV64I-NEXT: slli a0, a0, 48
-; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: ret
-;
-; RV64ZBKB-LABEL: packh_i16_2:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: add a0, a1, a0
-; RV64ZBKB-NEXT: packh a0, a2, a0
-; RV64ZBKB-NEXT: ret
- %4 = add i8 %1, %0
- %5 = zext i8 %4 to i16
- %6 = shl i16 %5, 8
- %7 = zext i8 %2 to i16
- %8 = or i16 %6, %7
- ret i16 %8
-}
-
-define i64 @pack_i64_allWUsers(i32 signext %0, i32 signext %1, i32 signext %2) {
-; RV64I-LABEL: pack_i64_allWUsers:
-; RV64I: # %bb.0:
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: slli a2, a2, 32
-; RV64I-NEXT: srli a2, a2, 32
-; RV64I-NEXT: or a0, a0, a2
-; RV64I-NEXT: ret
-;
-; RV64ZBKB-LABEL: pack_i64_allWUsers:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: add a0, a1, a0
-; RV64ZBKB-NEXT: pack a0, a2, a0
-; RV64ZBKB-NEXT: ret
- %4 = add i32 %1, %0
- %5 = zext i32 %4 to i64
- %6 = shl i64 %5, 32
- %7 = zext i32 %2 to i64
- %8 = or i64 %6, %7
- ret i64 %8
-}
-
-define signext i32 @pack_i32_allWUsers(i16 zeroext %0, i16 zeroext %1, i16 zeroext %2) {
-; RV64I-LABEL: pack_i32_allWUsers:
-; RV64I: # %bb.0:
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: slliw a0, a0, 16
-; RV64I-NEXT: or a0, a0, a2
-; RV64I-NEXT: ret
-;
-; RV64ZBKB-LABEL: pack_i32_allWUsers:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: add a0, a1, a0
-; RV64ZBKB-NEXT: packw a0, a2, a0
-; RV64ZBKB-NEXT: ret
- %4 = add i16 %1, %0
- %5 = zext i16 %4 to i32
- %6 = shl i32 %5, 16
- %7 = zext i16 %2 to i32
- %8 = or i32 %6, %7
- ret i32 %8
-}
-
-define i64 @pack_i64_imm() {
-; RV64I-LABEL: pack_i64_imm:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 65793
-; RV64I-NEXT: addiw a0, a0, 16
-; RV64I-NEXT: slli a1, a0, 32
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBKB-LABEL: pack_i64_imm:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: lui a0, 65793
-; RV64ZBKB-NEXT: addi a0, a0, 16
-; RV64ZBKB-NEXT: pack a0, a0, a0
-; RV64ZBKB-NEXT: ret
- ret i64 1157442765409226768 ; 0x0101010101010101
-}
-
-define i32 @zexth_i32(i32 %a) nounwind {
-; RV64I-LABEL: zexth_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 48
-; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: ret
-;
-; RV64ZBKB-LABEL: zexth_i32:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: zext.h a0, a0
-; RV64ZBKB-NEXT: ret
- %and = and i32 %a, 65535
- ret i32 %and
-}
-
-define i64 @zexth_i64(i64 %a) nounwind {
-; RV64I-LABEL: zexth_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 48
-; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: ret
-;
-; RV64ZBKB-LABEL: zexth_i64:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: zext.h a0, a0
-; RV64ZBKB-NEXT: ret
- %and = and i64 %a, 65535
- ret i64 %and
-}
-
-define i32 @zext_i16_to_i32(i16 %a) nounwind {
-; RV64I-LABEL: zext_i16_to_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 48
-; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: ret
-;
-; RV64ZBKB-LABEL: zext_i16_to_i32:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: zext.h a0, a0
-; RV64ZBKB-NEXT: ret
- %1 = zext i16 %a to i32
- ret i32 %1
-}
-
-define i64 @zext_i16_to_i64(i16 %a) nounwind {
-; RV64I-LABEL: zext_i16_to_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 48
-; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: ret
-;
-; RV64ZBKB-LABEL: zext_i16_to_i64:
-; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: zext.h a0, a0
-; RV64ZBKB-NEXT: ret
- %1 = zext i16 %a to i64
- ret i64 %1
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbs.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbs.ll
deleted file mode 100644
index 2db8e2c..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbs.ll
+++ /dev/null
@@ -1,1159 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefixes=CHECK,RV64I
-; RUN: llc -mtriple=riscv64 -mattr=+zbs -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefixes=CHECK,RV64ZBS
-
-define signext i32 @bclr_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: bclr_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: sllw a1, a2, a1
-; RV64I-NEXT: not a1, a1
-; RV64I-NEXT: and a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bclr_i32:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: andi a1, a1, 31
-; RV64ZBS-NEXT: bclr a0, a0, a1
-; RV64ZBS-NEXT: sext.w a0, a0
-; RV64ZBS-NEXT: ret
- %and = and i32 %b, 31
- %shl = shl nuw i32 1, %and
- %neg = xor i32 %shl, -1
- %and1 = and i32 %neg, %a
- ret i32 %and1
-}
-
-define signext i32 @bclr_i32_no_mask(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: bclr_i32_no_mask:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: sllw a1, a2, a1
-; RV64I-NEXT: not a1, a1
-; RV64I-NEXT: and a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bclr_i32_no_mask:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bclr a0, a0, a1
-; RV64ZBS-NEXT: sext.w a0, a0
-; RV64ZBS-NEXT: ret
- %shl = shl i32 1, %b
- %neg = xor i32 %shl, -1
- %and1 = and i32 %neg, %a
- ret i32 %and1
-}
-
-define signext i32 @bclr_i32_load(ptr %p, i32 signext %b) nounwind {
-; RV64I-LABEL: bclr_i32_load:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: sllw a1, a2, a1
-; RV64I-NEXT: not a1, a1
-; RV64I-NEXT: and a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bclr_i32_load:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: lw a0, 0(a0)
-; RV64ZBS-NEXT: bclr a0, a0, a1
-; RV64ZBS-NEXT: sext.w a0, a0
-; RV64ZBS-NEXT: ret
- %a = load i32, ptr %p
- %shl = shl i32 1, %b
- %neg = xor i32 %shl, -1
- %and1 = and i32 %neg, %a
- ret i32 %and1
-}
-
-define i64 @bclr_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: bclr_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: sll a1, a2, a1
-; RV64I-NEXT: not a1, a1
-; RV64I-NEXT: and a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bclr_i64:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bclr a0, a0, a1
-; RV64ZBS-NEXT: ret
- %and = and i64 %b, 63
- %shl = shl nuw i64 1, %and
- %neg = xor i64 %shl, -1
- %and1 = and i64 %neg, %a
- ret i64 %and1
-}
-
-define i64 @bclr_i64_no_mask(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: bclr_i64_no_mask:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: sll a1, a2, a1
-; RV64I-NEXT: not a1, a1
-; RV64I-NEXT: and a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bclr_i64_no_mask:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bclr a0, a0, a1
-; RV64ZBS-NEXT: ret
- %shl = shl i64 1, %b
- %neg = xor i64 %shl, -1
- %and1 = and i64 %neg, %a
- ret i64 %and1
-}
-
-define signext i32 @bset_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: bset_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: sllw a1, a2, a1
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bset_i32:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: andi a1, a1, 31
-; RV64ZBS-NEXT: bset a0, a0, a1
-; RV64ZBS-NEXT: sext.w a0, a0
-; RV64ZBS-NEXT: ret
- %and = and i32 %b, 31
- %shl = shl nuw i32 1, %and
- %or = or i32 %shl, %a
- ret i32 %or
-}
-
-define signext i32 @bset_i32_no_mask(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: bset_i32_no_mask:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: sllw a1, a2, a1
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bset_i32_no_mask:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bset a0, a0, a1
-; RV64ZBS-NEXT: sext.w a0, a0
-; RV64ZBS-NEXT: ret
- %shl = shl i32 1, %b
- %or = or i32 %shl, %a
- ret i32 %or
-}
-
-define signext i32 @bset_i32_load(ptr %p, i32 signext %b) nounwind {
-; RV64I-LABEL: bset_i32_load:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: sllw a1, a2, a1
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bset_i32_load:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: lw a0, 0(a0)
-; RV64ZBS-NEXT: bset a0, a0, a1
-; RV64ZBS-NEXT: sext.w a0, a0
-; RV64ZBS-NEXT: ret
- %a = load i32, ptr %p
- %shl = shl i32 1, %b
- %or = or i32 %shl, %a
- ret i32 %or
-}
-
-; We can use bsetw for 1 << x by setting the first source to zero.
-define signext i32 @bset_i32_zero(i32 signext %a) nounwind {
-; RV64I-LABEL: bset_i32_zero:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 1
-; RV64I-NEXT: sllw a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bset_i32_zero:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bset a0, zero, a0
-; RV64ZBS-NEXT: sext.w a0, a0
-; RV64ZBS-NEXT: ret
- %shl = shl i32 1, %a
- ret i32 %shl
-}
-
-define i64 @bset_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: bset_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: sll a1, a2, a1
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bset_i64:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bset a0, a0, a1
-; RV64ZBS-NEXT: ret
- %conv = and i64 %b, 63
- %shl = shl nuw i64 1, %conv
- %or = or i64 %shl, %a
- ret i64 %or
-}
-
-define i64 @bset_i64_no_mask(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: bset_i64_no_mask:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: sll a1, a2, a1
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bset_i64_no_mask:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bset a0, a0, a1
-; RV64ZBS-NEXT: ret
- %shl = shl i64 1, %b
- %or = or i64 %shl, %a
- ret i64 %or
-}
-
-; We can use bsetw for 1 << x by setting the first source to zero.
-define signext i64 @bset_i64_zero(i64 signext %a) nounwind {
-; RV64I-LABEL: bset_i64_zero:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 1
-; RV64I-NEXT: sll a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bset_i64_zero:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bset a0, zero, a0
-; RV64ZBS-NEXT: ret
- %shl = shl i64 1, %a
- ret i64 %shl
-}
-
-define signext i32 @binv_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: binv_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: sllw a1, a2, a1
-; RV64I-NEXT: xor a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: binv_i32:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: andi a1, a1, 31
-; RV64ZBS-NEXT: binv a0, a0, a1
-; RV64ZBS-NEXT: sext.w a0, a0
-; RV64ZBS-NEXT: ret
- %and = and i32 %b, 31
- %shl = shl nuw i32 1, %and
- %xor = xor i32 %shl, %a
- ret i32 %xor
-}
-
-define signext i32 @binv_i32_no_mask(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: binv_i32_no_mask:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: sllw a1, a2, a1
-; RV64I-NEXT: xor a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: binv_i32_no_mask:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: binv a0, a0, a1
-; RV64ZBS-NEXT: sext.w a0, a0
-; RV64ZBS-NEXT: ret
- %shl = shl i32 1, %b
- %xor = xor i32 %shl, %a
- ret i32 %xor
-}
-
-define signext i32 @binv_i32_load(ptr %p, i32 signext %b) nounwind {
-; RV64I-LABEL: binv_i32_load:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: sllw a1, a2, a1
-; RV64I-NEXT: xor a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: binv_i32_load:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: lw a0, 0(a0)
-; RV64ZBS-NEXT: binv a0, a0, a1
-; RV64ZBS-NEXT: sext.w a0, a0
-; RV64ZBS-NEXT: ret
- %a = load i32, ptr %p
- %shl = shl i32 1, %b
- %xor = xor i32 %shl, %a
- ret i32 %xor
-}
-
-define i64 @binv_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: binv_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: sll a1, a2, a1
-; RV64I-NEXT: xor a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: binv_i64:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: binv a0, a0, a1
-; RV64ZBS-NEXT: ret
- %conv = and i64 %b, 63
- %shl = shl nuw i64 1, %conv
- %xor = xor i64 %shl, %a
- ret i64 %xor
-}
-
-define i64 @binv_i64_no_mask(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: binv_i64_no_mask:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: sll a1, a2, a1
-; RV64I-NEXT: xor a0, a1, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: binv_i64_no_mask:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: binv a0, a0, a1
-; RV64ZBS-NEXT: ret
- %shl = shl nuw i64 1, %b
- %xor = xor i64 %shl, %a
- ret i64 %xor
-}
-
-define signext i32 @bext_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: bext_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srlw a0, a0, a1
-; RV64I-NEXT: andi a0, a0, 1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bext_i32:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: andi a1, a1, 31
-; RV64ZBS-NEXT: bext a0, a0, a1
-; RV64ZBS-NEXT: ret
- %and = and i32 %b, 31
- %shr = lshr i32 %a, %and
- %and1 = and i32 %shr, 1
- ret i32 %and1
-}
-
-define signext i32 @bext_i32_no_mask(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: bext_i32_no_mask:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srlw a0, a0, a1
-; RV64I-NEXT: andi a0, a0, 1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bext_i32_no_mask:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bext a0, a0, a1
-; RV64ZBS-NEXT: ret
- %shr = lshr i32 %a, %b
- %and1 = and i32 %shr, 1
- ret i32 %and1
-}
-
-; This gets previous converted to (i1 (truncate (srl X, Y)). Make sure we are
-; able to use bext.
-define void @bext_i32_trunc(i32 signext %0, i32 signext %1) {
-; RV64I-LABEL: bext_i32_trunc:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srlw a0, a0, a1
-; RV64I-NEXT: andi a0, a0, 1
-; RV64I-NEXT: beqz a0, .LBB19_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB19_2:
-; RV64I-NEXT: tail bar
-;
-; RV64ZBS-LABEL: bext_i32_trunc:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bext a0, a0, a1
-; RV64ZBS-NEXT: beqz a0, .LBB19_2
-; RV64ZBS-NEXT: # %bb.1:
-; RV64ZBS-NEXT: ret
-; RV64ZBS-NEXT: .LBB19_2:
-; RV64ZBS-NEXT: tail bar
- %3 = shl i32 1, %1
- %4 = and i32 %3, %0
- %5 = icmp eq i32 %4, 0
- br i1 %5, label %6, label %7
-
-6: ; preds = %2
- tail call void @bar()
- br label %7
-
-7: ; preds = %6, %2
- ret void
-}
-
-declare void @bar()
-
-define i64 @bext_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: bext_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srl a0, a0, a1
-; RV64I-NEXT: andi a0, a0, 1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bext_i64:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bext a0, a0, a1
-; RV64ZBS-NEXT: ret
- %conv = and i64 %b, 63
- %shr = lshr i64 %a, %conv
- %and1 = and i64 %shr, 1
- ret i64 %and1
-}
-
-define i64 @bext_i64_no_mask(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: bext_i64_no_mask:
-; RV64I: # %bb.0:
-; RV64I-NEXT: srl a0, a0, a1
-; RV64I-NEXT: andi a0, a0, 1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bext_i64_no_mask:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bext a0, a0, a1
-; RV64ZBS-NEXT: ret
- %shr = lshr i64 %a, %b
- %and1 = and i64 %shr, 1
- ret i64 %and1
-}
-
-define signext i32 @bexti_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: bexti_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 58
-; RV64I-NEXT: srli a0, a0, 63
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bexti_i32:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bexti a0, a0, 5
-; RV64ZBS-NEXT: ret
- %shr = lshr i32 %a, 5
- %and = and i32 %shr, 1
- ret i32 %and
-}
-
-define i64 @bexti_i64(i64 %a) nounwind {
-; RV64I-LABEL: bexti_i64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 58
-; RV64I-NEXT: srli a0, a0, 63
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bexti_i64:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bexti a0, a0, 5
-; RV64ZBS-NEXT: ret
- %shr = lshr i64 %a, 5
- %and = and i64 %shr, 1
- ret i64 %and
-}
-
-define signext i32 @bexti_i32_cmp(i32 signext %a) nounwind {
-; CHECK-LABEL: bexti_i32_cmp:
-; CHECK: # %bb.0:
-; CHECK-NEXT: andi a0, a0, 32
-; CHECK-NEXT: snez a0, a0
-; CHECK-NEXT: ret
- %and = and i32 %a, 32
- %cmp = icmp ne i32 %and, 0
- %zext = zext i1 %cmp to i32
- ret i32 %zext
-}
-
-define i64 @bexti_i64_cmp(i64 %a) nounwind {
-; RV64I-LABEL: bexti_i64_cmp:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 58
-; RV64I-NEXT: srli a0, a0, 63
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bexti_i64_cmp:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bexti a0, a0, 5
-; RV64ZBS-NEXT: ret
- %and = and i64 %a, 32
- %cmp = icmp ne i64 %and, 0
- %zext = zext i1 %cmp to i64
- ret i64 %zext
-}
-
-define signext i32 @bclri_i32_10(i32 signext %a) nounwind {
-; CHECK-LABEL: bclri_i32_10:
-; CHECK: # %bb.0:
-; CHECK-NEXT: andi a0, a0, -1025
-; CHECK-NEXT: ret
- %and = and i32 %a, -1025
- ret i32 %and
-}
-
-define signext i32 @bclri_i32_11(i32 signext %a) nounwind {
-; RV64I-LABEL: bclri_i32_11:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 1048575
-; RV64I-NEXT: addiw a1, a1, 2047
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bclri_i32_11:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bclri a0, a0, 11
-; RV64ZBS-NEXT: ret
- %and = and i32 %a, -2049
- ret i32 %and
-}
-
-define signext i32 @bclri_i32_30(i32 signext %a) nounwind {
-; RV64I-LABEL: bclri_i32_30:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 786432
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bclri_i32_30:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bclri a0, a0, 30
-; RV64ZBS-NEXT: ret
- %and = and i32 %a, -1073741825
- ret i32 %and
-}
-
-define signext i32 @bclri_i32_31(i32 signext %a) nounwind {
-; RV64I-LABEL: bclri_i32_31:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 33
-; RV64I-NEXT: srli a0, a0, 33
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bclri_i32_31:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bclri a0, a0, 31
-; RV64ZBS-NEXT: sext.w a0, a0
-; RV64ZBS-NEXT: ret
- %and = and i32 %a, -2147483649
- ret i32 %and
-}
-
-define i64 @bclri_i64_10(i64 %a) nounwind {
-; CHECK-LABEL: bclri_i64_10:
-; CHECK: # %bb.0:
-; CHECK-NEXT: andi a0, a0, -1025
-; CHECK-NEXT: ret
- %and = and i64 %a, -1025
- ret i64 %and
-}
-
-define i64 @bclri_i64_11(i64 %a) nounwind {
-; RV64I-LABEL: bclri_i64_11:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 1048575
-; RV64I-NEXT: addiw a1, a1, 2047
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bclri_i64_11:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bclri a0, a0, 11
-; RV64ZBS-NEXT: ret
- %and = and i64 %a, -2049
- ret i64 %and
-}
-
-define i64 @bclri_i64_30(i64 %a) nounwind {
-; RV64I-LABEL: bclri_i64_30:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 786432
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bclri_i64_30:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bclri a0, a0, 30
-; RV64ZBS-NEXT: ret
- %and = and i64 %a, -1073741825
- ret i64 %and
-}
-
-define i64 @bclri_i64_31(i64 %a) nounwind {
-; RV64I-LABEL: bclri_i64_31:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: addi a1, a1, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bclri_i64_31:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bclri a0, a0, 31
-; RV64ZBS-NEXT: ret
- %and = and i64 %a, -2147483649
- ret i64 %and
-}
-
-define i64 @bclri_i64_62(i64 %a) nounwind {
-; RV64I-LABEL: bclri_i64_62:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, -1
-; RV64I-NEXT: slli a1, a1, 62
-; RV64I-NEXT: addi a1, a1, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bclri_i64_62:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bclri a0, a0, 62
-; RV64ZBS-NEXT: ret
- %and = and i64 %a, -4611686018427387905
- ret i64 %and
-}
-
-define i64 @bclri_i64_63(i64 %a) nounwind {
-; RV64I-LABEL: bclri_i64_63:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 1
-; RV64I-NEXT: srli a0, a0, 1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bclri_i64_63:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bclri a0, a0, 63
-; RV64ZBS-NEXT: ret
- %and = and i64 %a, -9223372036854775809
- ret i64 %and
-}
-
-define i64 @bclri_i64_large0(i64 %a) nounwind {
-; RV64I-LABEL: bclri_i64_large0:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 1044480
-; RV64I-NEXT: addiw a1, a1, -256
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bclri_i64_large0:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: andi a0, a0, -256
-; RV64ZBS-NEXT: bclri a0, a0, 24
-; RV64ZBS-NEXT: ret
- %and = and i64 %a, -16777472
- ret i64 %and
-}
-
-define i64 @bclri_i64_large1(i64 %a) nounwind {
-; RV64I-LABEL: bclri_i64_large1:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 1044464
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bclri_i64_large1:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bclri a0, a0, 16
-; RV64ZBS-NEXT: bclri a0, a0, 24
-; RV64ZBS-NEXT: ret
- %and = and i64 %a, -16842753
- ret i64 %and
-}
-
-define signext i32 @bseti_i32_10(i32 signext %a) nounwind {
-; CHECK-LABEL: bseti_i32_10:
-; CHECK: # %bb.0:
-; CHECK-NEXT: ori a0, a0, 1024
-; CHECK-NEXT: ret
- %or = or i32 %a, 1024
- ret i32 %or
-}
-
-define signext i32 @bseti_i32_11(i32 signext %a) nounwind {
-; RV64I-LABEL: bseti_i32_11:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 1
-; RV64I-NEXT: slliw a1, a1, 11
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bseti_i32_11:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bseti a0, a0, 11
-; RV64ZBS-NEXT: ret
- %or = or i32 %a, 2048
- ret i32 %or
-}
-
-define signext i32 @bseti_i32_30(i32 signext %a) nounwind {
-; RV64I-LABEL: bseti_i32_30:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 262144
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bseti_i32_30:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bseti a0, a0, 30
-; RV64ZBS-NEXT: ret
- %or = or i32 %a, 1073741824
- ret i32 %or
-}
-
-define signext i32 @bseti_i32_31(i32 signext %a) nounwind {
-; RV64I-LABEL: bseti_i32_31:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bseti_i32_31:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bseti a0, a0, 31
-; RV64ZBS-NEXT: sext.w a0, a0
-; RV64ZBS-NEXT: ret
- %or = or i32 %a, 2147483648
- ret i32 %or
-}
-
-define i64 @bseti_i64_10(i64 %a) nounwind {
-; CHECK-LABEL: bseti_i64_10:
-; CHECK: # %bb.0:
-; CHECK-NEXT: ori a0, a0, 1024
-; CHECK-NEXT: ret
- %or = or i64 %a, 1024
- ret i64 %or
-}
-
-define i64 @bseti_i64_11(i64 %a) nounwind {
-; RV64I-LABEL: bseti_i64_11:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 1
-; RV64I-NEXT: slli a1, a1, 11
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bseti_i64_11:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bseti a0, a0, 11
-; RV64ZBS-NEXT: ret
- %or = or i64 %a, 2048
- ret i64 %or
-}
-
-define i64 @bseti_i64_30(i64 %a) nounwind {
-; RV64I-LABEL: bseti_i64_30:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 262144
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bseti_i64_30:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bseti a0, a0, 30
-; RV64ZBS-NEXT: ret
- %or = or i64 %a, 1073741824
- ret i64 %or
-}
-
-define i64 @bseti_i64_31(i64 %a) nounwind {
-; RV64I-LABEL: bseti_i64_31:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 1
-; RV64I-NEXT: slli a1, a1, 31
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bseti_i64_31:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bseti a0, a0, 31
-; RV64ZBS-NEXT: ret
- %or = or i64 %a, 2147483648
- ret i64 %or
-}
-
-define i64 @bseti_i64_62(i64 %a) nounwind {
-; RV64I-LABEL: bseti_i64_62:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 1
-; RV64I-NEXT: slli a1, a1, 62
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bseti_i64_62:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bseti a0, a0, 62
-; RV64ZBS-NEXT: ret
- %or = or i64 %a, 4611686018427387904
- ret i64 %or
-}
-
-define i64 @bseti_i64_63(i64 %a) nounwind {
-; RV64I-LABEL: bseti_i64_63:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, -1
-; RV64I-NEXT: slli a1, a1, 63
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bseti_i64_63:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bseti a0, a0, 63
-; RV64ZBS-NEXT: ret
- %or = or i64 %a, 9223372036854775808
- ret i64 %or
-}
-
-define signext i32 @binvi_i32_10(i32 signext %a) nounwind {
-; CHECK-LABEL: binvi_i32_10:
-; CHECK: # %bb.0:
-; CHECK-NEXT: xori a0, a0, 1024
-; CHECK-NEXT: ret
- %xor = xor i32 %a, 1024
- ret i32 %xor
-}
-
-define signext i32 @binvi_i32_11(i32 signext %a) nounwind {
-; RV64I-LABEL: binvi_i32_11:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 1
-; RV64I-NEXT: slliw a1, a1, 11
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: binvi_i32_11:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: binvi a0, a0, 11
-; RV64ZBS-NEXT: ret
- %xor = xor i32 %a, 2048
- ret i32 %xor
-}
-
-define signext i32 @binvi_i32_30(i32 signext %a) nounwind {
-; RV64I-LABEL: binvi_i32_30:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 262144
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: binvi_i32_30:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: binvi a0, a0, 30
-; RV64ZBS-NEXT: ret
- %xor = xor i32 %a, 1073741824
- ret i32 %xor
-}
-
-define signext i32 @binvi_i32_31(i32 signext %a) nounwind {
-; RV64I-LABEL: binvi_i32_31:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: binvi_i32_31:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: binvi a0, a0, 31
-; RV64ZBS-NEXT: sext.w a0, a0
-; RV64ZBS-NEXT: ret
- %xor = xor i32 %a, 2147483648
- ret i32 %xor
-}
-
-define i64 @binvi_i64_10(i64 %a) nounwind {
-; CHECK-LABEL: binvi_i64_10:
-; CHECK: # %bb.0:
-; CHECK-NEXT: xori a0, a0, 1024
-; CHECK-NEXT: ret
- %xor = xor i64 %a, 1024
- ret i64 %xor
-}
-
-define i64 @binvi_i64_11(i64 %a) nounwind {
-; RV64I-LABEL: binvi_i64_11:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 1
-; RV64I-NEXT: slli a1, a1, 11
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: binvi_i64_11:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: binvi a0, a0, 11
-; RV64ZBS-NEXT: ret
- %xor = xor i64 %a, 2048
- ret i64 %xor
-}
-
-define i64 @binvi_i64_30(i64 %a) nounwind {
-; RV64I-LABEL: binvi_i64_30:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 262144
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: binvi_i64_30:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: binvi a0, a0, 30
-; RV64ZBS-NEXT: ret
- %xor = xor i64 %a, 1073741824
- ret i64 %xor
-}
-
-define i64 @binvi_i64_31(i64 %a) nounwind {
-; RV64I-LABEL: binvi_i64_31:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 1
-; RV64I-NEXT: slli a1, a1, 31
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: binvi_i64_31:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: binvi a0, a0, 31
-; RV64ZBS-NEXT: ret
- %xor = xor i64 %a, 2147483648
- ret i64 %xor
-}
-
-define i64 @binvi_i64_62(i64 %a) nounwind {
-; RV64I-LABEL: binvi_i64_62:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 1
-; RV64I-NEXT: slli a1, a1, 62
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: binvi_i64_62:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: binvi a0, a0, 62
-; RV64ZBS-NEXT: ret
- %xor = xor i64 %a, 4611686018427387904
- ret i64 %xor
-}
-
-define i64 @binvi_i64_63(i64 %a) nounwind {
-; RV64I-LABEL: binvi_i64_63:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, -1
-; RV64I-NEXT: slli a1, a1, 63
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: binvi_i64_63:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: binvi a0, a0, 63
-; RV64ZBS-NEXT: ret
- %xor = xor i64 %a, 9223372036854775808
- ret i64 %xor
-}
-
-define i64 @xor_i64_large(i64 %a) nounwind {
-; RV64I-LABEL: xor_i64_large:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 1
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: addi a1, a1, 1
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: xor_i64_large:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: binvi a0, a0, 0
-; RV64ZBS-NEXT: binvi a0, a0, 32
-; RV64ZBS-NEXT: ret
- %xor = xor i64 %a, 4294967297
- ret i64 %xor
-}
-
-define i64 @xor_i64_4099(i64 %a) nounwind {
-; RV64I-LABEL: xor_i64_4099:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 1
-; RV64I-NEXT: addiw a1, a1, 3
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: xor_i64_4099:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: xori a0, a0, 3
-; RV64ZBS-NEXT: binvi a0, a0, 12
-; RV64ZBS-NEXT: ret
- %xor = xor i64 %a, 4099
- ret i64 %xor
-}
-
-define i64 @xor_i64_96(i64 %a) nounwind {
-; CHECK-LABEL: xor_i64_96:
-; CHECK: # %bb.0:
-; CHECK-NEXT: xori a0, a0, 96
-; CHECK-NEXT: ret
- %xor = xor i64 %a, 96
- ret i64 %xor
-}
-
-define i64 @or_i64_large(i64 %a) nounwind {
-; RV64I-LABEL: or_i64_large:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 1
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: addi a1, a1, 1
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: or_i64_large:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bseti a0, a0, 0
-; RV64ZBS-NEXT: bseti a0, a0, 32
-; RV64ZBS-NEXT: ret
- %or = or i64 %a, 4294967297
- ret i64 %or
-}
-
-define i64 @xor_i64_66901(i64 %a) nounwind {
-; RV64I-LABEL: xor_i64_66901:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 16
-; RV64I-NEXT: addiw a1, a1, 1365
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: xor_i64_66901:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: xori a0, a0, 1365
-; RV64ZBS-NEXT: binvi a0, a0, 16
-; RV64ZBS-NEXT: ret
- %xor = xor i64 %a, 66901
- ret i64 %xor
-}
-
-define i64 @or_i64_4099(i64 %a) nounwind {
-; RV64I-LABEL: or_i64_4099:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 1
-; RV64I-NEXT: addiw a1, a1, 3
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: or_i64_4099:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: ori a0, a0, 3
-; RV64ZBS-NEXT: bseti a0, a0, 12
-; RV64ZBS-NEXT: ret
- %or = or i64 %a, 4099
- ret i64 %or
-}
-
-define i64 @or_i64_96(i64 %a) nounwind {
-; CHECK-LABEL: or_i64_96:
-; CHECK: # %bb.0:
-; CHECK-NEXT: ori a0, a0, 96
-; CHECK-NEXT: ret
- %or = or i64 %a, 96
- ret i64 %or
-}
-
-define i64 @or_i64_66901(i64 %a) nounwind {
-; RV64I-LABEL: or_i64_66901:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 16
-; RV64I-NEXT: addiw a1, a1, 1365
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: or_i64_66901:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: ori a0, a0, 1365
-; RV64ZBS-NEXT: bseti a0, a0, 16
-; RV64ZBS-NEXT: ret
- %or = or i64 %a, 66901
- ret i64 %or
-}
-
-define signext i32 @bset_trailing_ones_i32_mask(i32 signext %a) nounwind {
-; RV64I-LABEL: bset_trailing_ones_i32_mask:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, -1
-; RV64I-NEXT: sllw a0, a1, a0
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bset_trailing_ones_i32_mask:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: andi a0, a0, 31
-; RV64ZBS-NEXT: bset a0, zero, a0
-; RV64ZBS-NEXT: addiw a0, a0, -1
-; RV64ZBS-NEXT: ret
- %and = and i32 %a, 31
- %shift = shl nsw i32 -1, %and
- %not = xor i32 %shift, -1
- ret i32 %not
-}
-
-define signext i32 @bset_trailing_ones_i32_no_mask(i32 signext %a) nounwind {
-; RV64I-LABEL: bset_trailing_ones_i32_no_mask:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, -1
-; RV64I-NEXT: sllw a0, a1, a0
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bset_trailing_ones_i32_no_mask:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bset a0, zero, a0
-; RV64ZBS-NEXT: addiw a0, a0, -1
-; RV64ZBS-NEXT: ret
- %shift = shl nsw i32 -1, %a
- %not = xor i32 %shift, -1
- ret i32 %not
-}
-
-define signext i64 @bset_trailing_ones_i64_mask(i64 signext %a) nounwind {
-; RV64I-LABEL: bset_trailing_ones_i64_mask:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, -1
-; RV64I-NEXT: sll a0, a1, a0
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bset_trailing_ones_i64_mask:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bset a0, zero, a0
-; RV64ZBS-NEXT: addi a0, a0, -1
-; RV64ZBS-NEXT: ret
- %and = and i64 %a, 63
- %shift = shl nsw i64 -1, %and
- %not = xor i64 %shift, -1
- ret i64 %not
-}
-
-define signext i64 @bset_trailing_ones_i64_no_mask(i64 signext %a) nounwind {
-; RV64I-LABEL: bset_trailing_ones_i64_no_mask:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, -1
-; RV64I-NEXT: sll a0, a1, a0
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: ret
-;
-; RV64ZBS-LABEL: bset_trailing_ones_i64_no_mask:
-; RV64ZBS: # %bb.0:
-; RV64ZBS-NEXT: bset a0, zero, a0
-; RV64ZBS-NEXT: addi a0, a0, -1
-; RV64ZBS-NEXT: ret
- %shift = shl nsw i64 -1, %a
- %not = xor i64 %shift, -1
- ret i64 %not
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/sadd_sat.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/sadd_sat.ll
deleted file mode 100644
index 080ac2b..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/sadd_sat.ll
+++ /dev/null
@@ -1,151 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefixes=RV64,RV64I
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefixes=RV64,RV64IZbb
-
-declare i4 @llvm.sadd.sat.i4(i4, i4)
-declare i8 @llvm.sadd.sat.i8(i8, i8)
-declare i16 @llvm.sadd.sat.i16(i16, i16)
-declare i32 @llvm.sadd.sat.i32(i32, i32)
-declare i64 @llvm.sadd.sat.i64(i64, i64)
-
-define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
-; RV64I-LABEL: func:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addw a2, a0, a1
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: beq a0, a2, .LBB0_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: sraiw a0, a0, 31
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: .LBB0_2:
-; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: add a0, a0, a1
-; RV64IZbb-NEXT: lui a1, 524288
-; RV64IZbb-NEXT: addiw a2, a1, -1
-; RV64IZbb-NEXT: min a0, a0, a2
-; RV64IZbb-NEXT: max a0, a0, a1
-; RV64IZbb-NEXT: ret
- %tmp = call i32 @llvm.sadd.sat.i32(i32 %x, i32 %y);
- ret i32 %tmp;
-}
-
-define i64 @func2(i64 %x, i64 %y) nounwind {
-; RV64-LABEL: func2:
-; RV64: # %bb.0:
-; RV64-NEXT: mv a2, a0
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: slt a2, a0, a2
-; RV64-NEXT: slti a1, a1, 0
-; RV64-NEXT: beq a1, a2, .LBB1_2
-; RV64-NEXT: # %bb.1:
-; RV64-NEXT: srai a0, a0, 63
-; RV64-NEXT: li a1, -1
-; RV64-NEXT: slli a1, a1, 63
-; RV64-NEXT: xor a0, a0, a1
-; RV64-NEXT: .LBB1_2:
-; RV64-NEXT: ret
- %tmp = call i64 @llvm.sadd.sat.i64(i64 %x, i64 %y);
- ret i64 %tmp;
-}
-
-define signext i16 @func16(i16 signext %x, i16 signext %y) nounwind {
-; RV64I-LABEL: func16:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: lui a1, 8
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: bge a0, a1, .LBB2_3
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: lui a1, 1048568
-; RV64I-NEXT: bge a1, a0, .LBB2_4
-; RV64I-NEXT: .LBB2_2:
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB2_3:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: lui a1, 1048568
-; RV64I-NEXT: blt a1, a0, .LBB2_2
-; RV64I-NEXT: .LBB2_4:
-; RV64I-NEXT: lui a0, 1048568
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func16:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: addw a0, a0, a1
-; RV64IZbb-NEXT: lui a1, 8
-; RV64IZbb-NEXT: addiw a1, a1, -1
-; RV64IZbb-NEXT: min a0, a0, a1
-; RV64IZbb-NEXT: lui a1, 1048568
-; RV64IZbb-NEXT: max a0, a0, a1
-; RV64IZbb-NEXT: ret
- %tmp = call i16 @llvm.sadd.sat.i16(i16 %x, i16 %y);
- ret i16 %tmp;
-}
-
-define signext i8 @func8(i8 signext %x, i8 signext %y) nounwind {
-; RV64I-LABEL: func8:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: li a1, 127
-; RV64I-NEXT: bge a0, a1, .LBB3_3
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: li a1, -127
-; RV64I-NEXT: blt a0, a1, .LBB3_4
-; RV64I-NEXT: .LBB3_2:
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB3_3:
-; RV64I-NEXT: li a0, 127
-; RV64I-NEXT: li a1, -127
-; RV64I-NEXT: bge a0, a1, .LBB3_2
-; RV64I-NEXT: .LBB3_4:
-; RV64I-NEXT: li a0, -128
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func8:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: addw a0, a0, a1
-; RV64IZbb-NEXT: li a1, 127
-; RV64IZbb-NEXT: min a0, a0, a1
-; RV64IZbb-NEXT: li a1, -128
-; RV64IZbb-NEXT: max a0, a0, a1
-; RV64IZbb-NEXT: ret
- %tmp = call i8 @llvm.sadd.sat.i8(i8 %x, i8 %y);
- ret i8 %tmp;
-}
-
-define signext i4 @func3(i4 signext %x, i4 signext %y) nounwind {
-; RV64I-LABEL: func3:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: li a1, 7
-; RV64I-NEXT: bge a0, a1, .LBB4_3
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: li a1, -7
-; RV64I-NEXT: blt a0, a1, .LBB4_4
-; RV64I-NEXT: .LBB4_2:
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB4_3:
-; RV64I-NEXT: li a0, 7
-; RV64I-NEXT: li a1, -7
-; RV64I-NEXT: bge a0, a1, .LBB4_2
-; RV64I-NEXT: .LBB4_4:
-; RV64I-NEXT: li a0, -8
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func3:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: addw a0, a0, a1
-; RV64IZbb-NEXT: li a1, 7
-; RV64IZbb-NEXT: min a0, a0, a1
-; RV64IZbb-NEXT: li a1, -8
-; RV64IZbb-NEXT: max a0, a0, a1
-; RV64IZbb-NEXT: ret
- %tmp = call i4 @llvm.sadd.sat.i4(i4 %x, i4 %y);
- ret i4 %tmp;
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/sadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/sadd_sat_plus.ll
deleted file mode 100644
index 70c70d3..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/sadd_sat_plus.ll
+++ /dev/null
@@ -1,185 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefixes=RV64,RV64I
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefixes=RV64,RV64IZbb
-
-declare i4 @llvm.sadd.sat.i4(i4, i4)
-declare i8 @llvm.sadd.sat.i8(i8, i8)
-declare i16 @llvm.sadd.sat.i16(i16, i16)
-declare i32 @llvm.sadd.sat.i32(i32, i32)
-declare i64 @llvm.sadd.sat.i64(i64, i64)
-
-define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
-; RV64I-LABEL: func32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: mulw a1, a1, a2
-; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: addw a2, a0, a1
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: beq a0, a2, .LBB0_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: sraiw a0, a0, 31
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: .LBB0_2:
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func32:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: mulw a1, a1, a2
-; RV64IZbb-NEXT: sext.w a0, a0
-; RV64IZbb-NEXT: add a0, a0, a1
-; RV64IZbb-NEXT: lui a1, 524288
-; RV64IZbb-NEXT: addiw a2, a1, -1
-; RV64IZbb-NEXT: min a0, a0, a2
-; RV64IZbb-NEXT: max a0, a0, a1
-; RV64IZbb-NEXT: ret
- %a = mul i32 %y, %z
- %tmp = call i32 @llvm.sadd.sat.i32(i32 %x, i32 %a)
- ret i32 %tmp
-}
-
-define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
-; RV64-LABEL: func64:
-; RV64: # %bb.0:
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: add a0, a0, a2
-; RV64-NEXT: slt a1, a0, a1
-; RV64-NEXT: slti a2, a2, 0
-; RV64-NEXT: beq a2, a1, .LBB1_2
-; RV64-NEXT: # %bb.1:
-; RV64-NEXT: srai a0, a0, 63
-; RV64-NEXT: li a1, -1
-; RV64-NEXT: slli a1, a1, 63
-; RV64-NEXT: xor a0, a0, a1
-; RV64-NEXT: .LBB1_2:
-; RV64-NEXT: ret
- %a = mul i64 %y, %z
- %tmp = call i64 @llvm.sadd.sat.i64(i64 %x, i64 %z)
- ret i64 %tmp
-}
-
-define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
-; RV64I-LABEL: func16:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 16
-; RV64I-NEXT: sraiw a0, a0, 16
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: slli a1, a1, 16
-; RV64I-NEXT: sraiw a1, a1, 16
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: lui a1, 8
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: bge a0, a1, .LBB2_3
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: lui a1, 1048568
-; RV64I-NEXT: bge a1, a0, .LBB2_4
-; RV64I-NEXT: .LBB2_2:
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB2_3:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: lui a1, 1048568
-; RV64I-NEXT: blt a1, a0, .LBB2_2
-; RV64I-NEXT: .LBB2_4:
-; RV64I-NEXT: lui a0, 1048568
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func16:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: sext.h a0, a0
-; RV64IZbb-NEXT: mul a1, a1, a2
-; RV64IZbb-NEXT: sext.h a1, a1
-; RV64IZbb-NEXT: addw a0, a0, a1
-; RV64IZbb-NEXT: lui a1, 8
-; RV64IZbb-NEXT: addiw a1, a1, -1
-; RV64IZbb-NEXT: min a0, a0, a1
-; RV64IZbb-NEXT: lui a1, 1048568
-; RV64IZbb-NEXT: max a0, a0, a1
-; RV64IZbb-NEXT: ret
- %a = mul i16 %y, %z
- %tmp = call i16 @llvm.sadd.sat.i16(i16 %x, i16 %a)
- ret i16 %tmp
-}
-
-define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
-; RV64I-LABEL: func8:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 24
-; RV64I-NEXT: sraiw a0, a0, 24
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: slli a1, a1, 24
-; RV64I-NEXT: sraiw a1, a1, 24
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: li a1, 127
-; RV64I-NEXT: bge a0, a1, .LBB3_3
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: li a1, -127
-; RV64I-NEXT: blt a0, a1, .LBB3_4
-; RV64I-NEXT: .LBB3_2:
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB3_3:
-; RV64I-NEXT: li a0, 127
-; RV64I-NEXT: li a1, -127
-; RV64I-NEXT: bge a0, a1, .LBB3_2
-; RV64I-NEXT: .LBB3_4:
-; RV64I-NEXT: li a0, -128
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func8:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: sext.b a0, a0
-; RV64IZbb-NEXT: mul a1, a1, a2
-; RV64IZbb-NEXT: sext.b a1, a1
-; RV64IZbb-NEXT: addw a0, a0, a1
-; RV64IZbb-NEXT: li a1, 127
-; RV64IZbb-NEXT: min a0, a0, a1
-; RV64IZbb-NEXT: li a1, -128
-; RV64IZbb-NEXT: max a0, a0, a1
-; RV64IZbb-NEXT: ret
- %a = mul i8 %y, %z
- %tmp = call i8 @llvm.sadd.sat.i8(i8 %x, i8 %a)
- ret i8 %tmp
-}
-
-define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
-; RV64I-LABEL: func4:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 28
-; RV64I-NEXT: sraiw a0, a0, 28
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: slli a1, a1, 28
-; RV64I-NEXT: sraiw a1, a1, 28
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: li a1, 7
-; RV64I-NEXT: bge a0, a1, .LBB4_3
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: li a1, -7
-; RV64I-NEXT: blt a0, a1, .LBB4_4
-; RV64I-NEXT: .LBB4_2:
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB4_3:
-; RV64I-NEXT: li a0, 7
-; RV64I-NEXT: li a1, -7
-; RV64I-NEXT: bge a0, a1, .LBB4_2
-; RV64I-NEXT: .LBB4_4:
-; RV64I-NEXT: li a0, -8
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func4:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: slli a0, a0, 28
-; RV64IZbb-NEXT: sraiw a0, a0, 28
-; RV64IZbb-NEXT: mul a1, a1, a2
-; RV64IZbb-NEXT: slli a1, a1, 28
-; RV64IZbb-NEXT: sraiw a1, a1, 28
-; RV64IZbb-NEXT: addw a0, a0, a1
-; RV64IZbb-NEXT: li a1, 7
-; RV64IZbb-NEXT: min a0, a0, a1
-; RV64IZbb-NEXT: li a1, -8
-; RV64IZbb-NEXT: max a0, a0, a1
-; RV64IZbb-NEXT: ret
- %a = mul i4 %y, %z
- %tmp = call i4 @llvm.sadd.sat.i4(i4 %x, i4 %a)
- ret i4 %tmp
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/ssub_sat.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/ssub_sat.ll
deleted file mode 100644
index 0a566ee..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/ssub_sat.ll
+++ /dev/null
@@ -1,151 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefixes=RV64,RV64I
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefixes=RV64,RV64IZbb
-
-declare i4 @llvm.ssub.sat.i4(i4, i4)
-declare i8 @llvm.ssub.sat.i8(i8, i8)
-declare i16 @llvm.ssub.sat.i16(i16, i16)
-declare i32 @llvm.ssub.sat.i32(i32, i32)
-declare i64 @llvm.ssub.sat.i64(i64, i64)
-
-define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
-; RV64I-LABEL: func:
-; RV64I: # %bb.0:
-; RV64I-NEXT: subw a2, a0, a1
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: beq a0, a2, .LBB0_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: sraiw a0, a0, 31
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: .LBB0_2:
-; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: sub a0, a0, a1
-; RV64IZbb-NEXT: lui a1, 524288
-; RV64IZbb-NEXT: addiw a2, a1, -1
-; RV64IZbb-NEXT: min a0, a0, a2
-; RV64IZbb-NEXT: max a0, a0, a1
-; RV64IZbb-NEXT: ret
- %tmp = call i32 @llvm.ssub.sat.i32(i32 %x, i32 %y);
- ret i32 %tmp;
-}
-
-define i64 @func2(i64 %x, i64 %y) nounwind {
-; RV64-LABEL: func2:
-; RV64: # %bb.0:
-; RV64-NEXT: mv a2, a0
-; RV64-NEXT: sgtz a3, a1
-; RV64-NEXT: sub a0, a0, a1
-; RV64-NEXT: slt a1, a0, a2
-; RV64-NEXT: beq a3, a1, .LBB1_2
-; RV64-NEXT: # %bb.1:
-; RV64-NEXT: srai a0, a0, 63
-; RV64-NEXT: li a1, -1
-; RV64-NEXT: slli a1, a1, 63
-; RV64-NEXT: xor a0, a0, a1
-; RV64-NEXT: .LBB1_2:
-; RV64-NEXT: ret
- %tmp = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %y);
- ret i64 %tmp;
-}
-
-define signext i16 @func16(i16 signext %x, i16 signext %y) nounwind {
-; RV64I-LABEL: func16:
-; RV64I: # %bb.0:
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: lui a1, 8
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: bge a0, a1, .LBB2_3
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: lui a1, 1048568
-; RV64I-NEXT: bge a1, a0, .LBB2_4
-; RV64I-NEXT: .LBB2_2:
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB2_3:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: lui a1, 1048568
-; RV64I-NEXT: blt a1, a0, .LBB2_2
-; RV64I-NEXT: .LBB2_4:
-; RV64I-NEXT: lui a0, 1048568
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func16:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: subw a0, a0, a1
-; RV64IZbb-NEXT: lui a1, 8
-; RV64IZbb-NEXT: addiw a1, a1, -1
-; RV64IZbb-NEXT: min a0, a0, a1
-; RV64IZbb-NEXT: lui a1, 1048568
-; RV64IZbb-NEXT: max a0, a0, a1
-; RV64IZbb-NEXT: ret
- %tmp = call i16 @llvm.ssub.sat.i16(i16 %x, i16 %y);
- ret i16 %tmp;
-}
-
-define signext i8 @func8(i8 signext %x, i8 signext %y) nounwind {
-; RV64I-LABEL: func8:
-; RV64I: # %bb.0:
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: li a1, 127
-; RV64I-NEXT: bge a0, a1, .LBB3_3
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: li a1, -127
-; RV64I-NEXT: blt a0, a1, .LBB3_4
-; RV64I-NEXT: .LBB3_2:
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB3_3:
-; RV64I-NEXT: li a0, 127
-; RV64I-NEXT: li a1, -127
-; RV64I-NEXT: bge a0, a1, .LBB3_2
-; RV64I-NEXT: .LBB3_4:
-; RV64I-NEXT: li a0, -128
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func8:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: subw a0, a0, a1
-; RV64IZbb-NEXT: li a1, 127
-; RV64IZbb-NEXT: min a0, a0, a1
-; RV64IZbb-NEXT: li a1, -128
-; RV64IZbb-NEXT: max a0, a0, a1
-; RV64IZbb-NEXT: ret
- %tmp = call i8 @llvm.ssub.sat.i8(i8 %x, i8 %y);
- ret i8 %tmp;
-}
-
-define signext i4 @func3(i4 signext %x, i4 signext %y) nounwind {
-; RV64I-LABEL: func3:
-; RV64I: # %bb.0:
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: li a1, 7
-; RV64I-NEXT: bge a0, a1, .LBB4_3
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: li a1, -7
-; RV64I-NEXT: blt a0, a1, .LBB4_4
-; RV64I-NEXT: .LBB4_2:
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB4_3:
-; RV64I-NEXT: li a0, 7
-; RV64I-NEXT: li a1, -7
-; RV64I-NEXT: bge a0, a1, .LBB4_2
-; RV64I-NEXT: .LBB4_4:
-; RV64I-NEXT: li a0, -8
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func3:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: subw a0, a0, a1
-; RV64IZbb-NEXT: li a1, 7
-; RV64IZbb-NEXT: min a0, a0, a1
-; RV64IZbb-NEXT: li a1, -8
-; RV64IZbb-NEXT: max a0, a0, a1
-; RV64IZbb-NEXT: ret
- %tmp = call i4 @llvm.ssub.sat.i4(i4 %x, i4 %y);
- ret i4 %tmp;
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/ssub_sat_plus.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/ssub_sat_plus.ll
deleted file mode 100644
index bc73ba7..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/ssub_sat_plus.ll
+++ /dev/null
@@ -1,185 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefixes=RV64,RV64I
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefixes=RV64,RV64IZbb
-
-declare i4 @llvm.ssub.sat.i4(i4, i4)
-declare i8 @llvm.ssub.sat.i8(i8, i8)
-declare i16 @llvm.ssub.sat.i16(i16, i16)
-declare i32 @llvm.ssub.sat.i32(i32, i32)
-declare i64 @llvm.ssub.sat.i64(i64, i64)
-
-define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
-; RV64I-LABEL: func32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: mulw a1, a1, a2
-; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: subw a2, a0, a1
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: beq a0, a2, .LBB0_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: sraiw a0, a0, 31
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: .LBB0_2:
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func32:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: mulw a1, a1, a2
-; RV64IZbb-NEXT: sext.w a0, a0
-; RV64IZbb-NEXT: sub a0, a0, a1
-; RV64IZbb-NEXT: lui a1, 524288
-; RV64IZbb-NEXT: addiw a2, a1, -1
-; RV64IZbb-NEXT: min a0, a0, a2
-; RV64IZbb-NEXT: max a0, a0, a1
-; RV64IZbb-NEXT: ret
- %a = mul i32 %y, %z
- %tmp = call i32 @llvm.ssub.sat.i32(i32 %x, i32 %a)
- ret i32 %tmp
-}
-
-define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
-; RV64-LABEL: func64:
-; RV64: # %bb.0:
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: sgtz a3, a2
-; RV64-NEXT: sub a0, a0, a2
-; RV64-NEXT: slt a1, a0, a1
-; RV64-NEXT: beq a3, a1, .LBB1_2
-; RV64-NEXT: # %bb.1:
-; RV64-NEXT: srai a0, a0, 63
-; RV64-NEXT: li a1, -1
-; RV64-NEXT: slli a1, a1, 63
-; RV64-NEXT: xor a0, a0, a1
-; RV64-NEXT: .LBB1_2:
-; RV64-NEXT: ret
- %a = mul i64 %y, %z
- %tmp = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %z)
- ret i64 %tmp
-}
-
-define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
-; RV64I-LABEL: func16:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 16
-; RV64I-NEXT: sraiw a0, a0, 16
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: slli a1, a1, 16
-; RV64I-NEXT: sraiw a1, a1, 16
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: lui a1, 8
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: bge a0, a1, .LBB2_3
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: lui a1, 1048568
-; RV64I-NEXT: bge a1, a0, .LBB2_4
-; RV64I-NEXT: .LBB2_2:
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB2_3:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: lui a1, 1048568
-; RV64I-NEXT: blt a1, a0, .LBB2_2
-; RV64I-NEXT: .LBB2_4:
-; RV64I-NEXT: lui a0, 1048568
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func16:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: sext.h a0, a0
-; RV64IZbb-NEXT: mul a1, a1, a2
-; RV64IZbb-NEXT: sext.h a1, a1
-; RV64IZbb-NEXT: subw a0, a0, a1
-; RV64IZbb-NEXT: lui a1, 8
-; RV64IZbb-NEXT: addiw a1, a1, -1
-; RV64IZbb-NEXT: min a0, a0, a1
-; RV64IZbb-NEXT: lui a1, 1048568
-; RV64IZbb-NEXT: max a0, a0, a1
-; RV64IZbb-NEXT: ret
- %a = mul i16 %y, %z
- %tmp = call i16 @llvm.ssub.sat.i16(i16 %x, i16 %a)
- ret i16 %tmp
-}
-
-define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
-; RV64I-LABEL: func8:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 24
-; RV64I-NEXT: sraiw a0, a0, 24
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: slli a1, a1, 24
-; RV64I-NEXT: sraiw a1, a1, 24
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: li a1, 127
-; RV64I-NEXT: bge a0, a1, .LBB3_3
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: li a1, -127
-; RV64I-NEXT: blt a0, a1, .LBB3_4
-; RV64I-NEXT: .LBB3_2:
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB3_3:
-; RV64I-NEXT: li a0, 127
-; RV64I-NEXT: li a1, -127
-; RV64I-NEXT: bge a0, a1, .LBB3_2
-; RV64I-NEXT: .LBB3_4:
-; RV64I-NEXT: li a0, -128
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func8:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: sext.b a0, a0
-; RV64IZbb-NEXT: mul a1, a1, a2
-; RV64IZbb-NEXT: sext.b a1, a1
-; RV64IZbb-NEXT: subw a0, a0, a1
-; RV64IZbb-NEXT: li a1, 127
-; RV64IZbb-NEXT: min a0, a0, a1
-; RV64IZbb-NEXT: li a1, -128
-; RV64IZbb-NEXT: max a0, a0, a1
-; RV64IZbb-NEXT: ret
- %a = mul i8 %y, %z
- %tmp = call i8 @llvm.ssub.sat.i8(i8 %x, i8 %a)
- ret i8 %tmp
-}
-
-define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
-; RV64I-LABEL: func4:
-; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 28
-; RV64I-NEXT: sraiw a0, a0, 28
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: slli a1, a1, 28
-; RV64I-NEXT: sraiw a1, a1, 28
-; RV64I-NEXT: subw a0, a0, a1
-; RV64I-NEXT: li a1, 7
-; RV64I-NEXT: bge a0, a1, .LBB4_3
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: li a1, -7
-; RV64I-NEXT: blt a0, a1, .LBB4_4
-; RV64I-NEXT: .LBB4_2:
-; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB4_3:
-; RV64I-NEXT: li a0, 7
-; RV64I-NEXT: li a1, -7
-; RV64I-NEXT: bge a0, a1, .LBB4_2
-; RV64I-NEXT: .LBB4_4:
-; RV64I-NEXT: li a0, -8
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func4:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: slli a0, a0, 28
-; RV64IZbb-NEXT: sraiw a0, a0, 28
-; RV64IZbb-NEXT: mul a1, a1, a2
-; RV64IZbb-NEXT: slli a1, a1, 28
-; RV64IZbb-NEXT: sraiw a1, a1, 28
-; RV64IZbb-NEXT: subw a0, a0, a1
-; RV64IZbb-NEXT: li a1, 7
-; RV64IZbb-NEXT: min a0, a0, a1
-; RV64IZbb-NEXT: li a1, -8
-; RV64IZbb-NEXT: max a0, a0, a1
-; RV64IZbb-NEXT: ret
- %a = mul i4 %y, %z
- %tmp = call i4 @llvm.ssub.sat.i4(i4 %x, i4 %a)
- ret i4 %tmp
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/uadd_sat.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/uadd_sat.ll
deleted file mode 100644
index 6b42631..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/uadd_sat.ll
+++ /dev/null
@@ -1,120 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefix=RV64I
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefix=RV64IZbb
-
-declare i4 @llvm.uadd.sat.i4(i4, i4)
-declare i8 @llvm.uadd.sat.i8(i8, i8)
-declare i16 @llvm.uadd.sat.i16(i16, i16)
-declare i32 @llvm.uadd.sat.i32(i32, i32)
-declare i64 @llvm.uadd.sat.i64(i64, i64)
-
-define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
-; RV64I-LABEL: func:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addw a1, a0, a1
-; RV64I-NEXT: sltu a0, a1, a0
-; RV64I-NEXT: negw a0, a0
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: not a2, a1
-; RV64IZbb-NEXT: minu a0, a0, a2
-; RV64IZbb-NEXT: addw a0, a0, a1
-; RV64IZbb-NEXT: ret
- %tmp = call i32 @llvm.uadd.sat.i32(i32 %x, i32 %y);
- ret i32 %tmp;
-}
-
-define i64 @func2(i64 %x, i64 %y) nounwind {
-; RV64I-LABEL: func2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: add a1, a0, a1
-; RV64I-NEXT: sltu a0, a1, a0
-; RV64I-NEXT: neg a0, a0
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func2:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: not a2, a1
-; RV64IZbb-NEXT: minu a0, a0, a2
-; RV64IZbb-NEXT: add a0, a0, a1
-; RV64IZbb-NEXT: ret
- %tmp = call i64 @llvm.uadd.sat.i64(i64 %x, i64 %y);
- ret i64 %tmp;
-}
-
-define zeroext i16 @func16(i16 zeroext %x, i16 zeroext %y) nounwind {
-; RV64I-LABEL: func16:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: lui a1, 16
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: bltu a0, a1, .LBB2_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: .LBB2_2:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func16:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: addw a0, a0, a1
-; RV64IZbb-NEXT: lui a1, 16
-; RV64IZbb-NEXT: addiw a1, a1, -1
-; RV64IZbb-NEXT: minu a0, a0, a1
-; RV64IZbb-NEXT: ret
- %tmp = call i16 @llvm.uadd.sat.i16(i16 %x, i16 %y);
- ret i16 %tmp;
-}
-
-define zeroext i8 @func8(i8 zeroext %x, i8 zeroext %y) nounwind {
-; RV64I-LABEL: func8:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: li a1, 255
-; RV64I-NEXT: bltu a0, a1, .LBB3_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: li a0, 255
-; RV64I-NEXT: .LBB3_2:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func8:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: addw a0, a0, a1
-; RV64IZbb-NEXT: li a1, 255
-; RV64IZbb-NEXT: minu a0, a0, a1
-; RV64IZbb-NEXT: ret
- %tmp = call i8 @llvm.uadd.sat.i8(i8 %x, i8 %y);
- ret i8 %tmp;
-}
-
-define zeroext i4 @func3(i4 zeroext %x, i4 zeroext %y) nounwind {
-; RV64I-LABEL: func3:
-; RV64I: # %bb.0:
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: li a1, 15
-; RV64I-NEXT: bltu a0, a1, .LBB4_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: li a0, 15
-; RV64I-NEXT: .LBB4_2:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func3:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: addw a0, a0, a1
-; RV64IZbb-NEXT: li a1, 15
-; RV64IZbb-NEXT: minu a0, a0, a1
-; RV64IZbb-NEXT: ret
- %tmp = call i4 @llvm.uadd.sat.i4(i4 %x, i4 %y);
- ret i4 %tmp;
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/uadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/uadd_sat_plus.ll
deleted file mode 100644
index db8f3e1..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/uadd_sat_plus.ll
+++ /dev/null
@@ -1,141 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefix=RV64I
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefix=RV64IZbb
-
-declare i4 @llvm.uadd.sat.i4(i4, i4)
-declare i8 @llvm.uadd.sat.i8(i8, i8)
-declare i16 @llvm.uadd.sat.i16(i16, i16)
-declare i32 @llvm.uadd.sat.i32(i32, i32)
-declare i64 @llvm.uadd.sat.i64(i64, i64)
-
-define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
-; RV64I-LABEL: func32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: addw a1, a0, a1
-; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: sltu a0, a1, a0
-; RV64I-NEXT: neg a0, a0
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func32:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: mulw a1, a1, a2
-; RV64IZbb-NEXT: not a2, a1
-; RV64IZbb-NEXT: sext.w a0, a0
-; RV64IZbb-NEXT: minu a0, a0, a2
-; RV64IZbb-NEXT: add a0, a0, a1
-; RV64IZbb-NEXT: ret
- %a = mul i32 %y, %z
- %tmp = call i32 @llvm.uadd.sat.i32(i32 %x, i32 %a)
- ret i32 %tmp
-}
-
-define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
-; RV64I-LABEL: func64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: add a2, a0, a2
-; RV64I-NEXT: sltu a0, a2, a0
-; RV64I-NEXT: neg a0, a0
-; RV64I-NEXT: or a0, a0, a2
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func64:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: not a1, a2
-; RV64IZbb-NEXT: minu a0, a0, a1
-; RV64IZbb-NEXT: add a0, a0, a2
-; RV64IZbb-NEXT: ret
- %a = mul i64 %y, %z
- %tmp = call i64 @llvm.uadd.sat.i64(i64 %x, i64 %z)
- ret i64 %tmp
-}
-
-define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
-; RV64I-LABEL: func16:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a3, 16
-; RV64I-NEXT: addiw a3, a3, -1
-; RV64I-NEXT: and a0, a0, a3
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: and a1, a1, a3
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: bltu a0, a3, .LBB2_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: mv a0, a3
-; RV64I-NEXT: .LBB2_2:
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func16:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: zext.h a0, a0
-; RV64IZbb-NEXT: mul a1, a1, a2
-; RV64IZbb-NEXT: zext.h a1, a1
-; RV64IZbb-NEXT: addw a0, a0, a1
-; RV64IZbb-NEXT: lui a1, 16
-; RV64IZbb-NEXT: addiw a1, a1, -1
-; RV64IZbb-NEXT: minu a0, a0, a1
-; RV64IZbb-NEXT: ret
- %a = mul i16 %y, %z
- %tmp = call i16 @llvm.uadd.sat.i16(i16 %x, i16 %a)
- ret i16 %tmp
-}
-
-define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
-; RV64I-LABEL: func8:
-; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 255
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: li a1, 255
-; RV64I-NEXT: bltu a0, a1, .LBB3_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: li a0, 255
-; RV64I-NEXT: .LBB3_2:
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func8:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: andi a0, a0, 255
-; RV64IZbb-NEXT: mul a1, a1, a2
-; RV64IZbb-NEXT: andi a1, a1, 255
-; RV64IZbb-NEXT: addw a0, a0, a1
-; RV64IZbb-NEXT: li a1, 255
-; RV64IZbb-NEXT: minu a0, a0, a1
-; RV64IZbb-NEXT: ret
- %a = mul i8 %y, %z
- %tmp = call i8 @llvm.uadd.sat.i8(i8 %x, i8 %a)
- ret i8 %tmp
-}
-
-define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
-; RV64I-LABEL: func4:
-; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 15
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: andi a1, a1, 15
-; RV64I-NEXT: addw a0, a0, a1
-; RV64I-NEXT: li a1, 15
-; RV64I-NEXT: bltu a0, a1, .LBB4_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: li a0, 15
-; RV64I-NEXT: .LBB4_2:
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func4:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: andi a0, a0, 15
-; RV64IZbb-NEXT: mul a1, a1, a2
-; RV64IZbb-NEXT: andi a1, a1, 15
-; RV64IZbb-NEXT: addw a0, a0, a1
-; RV64IZbb-NEXT: li a1, 15
-; RV64IZbb-NEXT: minu a0, a0, a1
-; RV64IZbb-NEXT: ret
- %a = mul i4 %y, %z
- %tmp = call i4 @llvm.uadd.sat.i4(i4 %x, i4 %a)
- ret i4 %tmp
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/usub_sat.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/usub_sat.ll
deleted file mode 100644
index beca180..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/usub_sat.ll
+++ /dev/null
@@ -1,113 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefix=RV64I
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefix=RV64IZbb
-
-declare i4 @llvm.usub.sat.i4(i4, i4)
-declare i8 @llvm.usub.sat.i8(i8, i8)
-declare i16 @llvm.usub.sat.i16(i16, i16)
-declare i32 @llvm.usub.sat.i32(i32, i32)
-declare i64 @llvm.usub.sat.i64(i64, i64)
-
-define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
-; RV64I-LABEL: func:
-; RV64I: # %bb.0:
-; RV64I-NEXT: subw a1, a0, a1
-; RV64I-NEXT: sltu a0, a0, a1
-; RV64I-NEXT: addiw a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: maxu a0, a0, a1
-; RV64IZbb-NEXT: subw a0, a0, a1
-; RV64IZbb-NEXT: ret
- %tmp = call i32 @llvm.usub.sat.i32(i32 %x, i32 %y);
- ret i32 %tmp;
-}
-
-define i64 @func2(i64 %x, i64 %y) nounwind {
-; RV64I-LABEL: func2:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sub a1, a0, a1
-; RV64I-NEXT: sltu a0, a0, a1
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func2:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: maxu a0, a0, a1
-; RV64IZbb-NEXT: sub a0, a0, a1
-; RV64IZbb-NEXT: ret
- %tmp = call i64 @llvm.usub.sat.i64(i64 %x, i64 %y);
- ret i64 %tmp;
-}
-
-define zeroext i16 @func16(i16 zeroext %x, i16 zeroext %y) nounwind {
-; RV64I-LABEL: func16:
-; RV64I: # %bb.0:
-; RV64I-NEXT: subw a1, a0, a1
-; RV64I-NEXT: sltu a0, a0, a1
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func16:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: maxu a0, a0, a1
-; RV64IZbb-NEXT: subw a0, a0, a1
-; RV64IZbb-NEXT: slli a0, a0, 32
-; RV64IZbb-NEXT: srli a0, a0, 32
-; RV64IZbb-NEXT: ret
- %tmp = call i16 @llvm.usub.sat.i16(i16 %x, i16 %y);
- ret i16 %tmp;
-}
-
-define zeroext i8 @func8(i8 zeroext %x, i8 zeroext %y) nounwind {
-; RV64I-LABEL: func8:
-; RV64I: # %bb.0:
-; RV64I-NEXT: subw a1, a0, a1
-; RV64I-NEXT: sltu a0, a0, a1
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func8:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: maxu a0, a0, a1
-; RV64IZbb-NEXT: subw a0, a0, a1
-; RV64IZbb-NEXT: slli a0, a0, 32
-; RV64IZbb-NEXT: srli a0, a0, 32
-; RV64IZbb-NEXT: ret
- %tmp = call i8 @llvm.usub.sat.i8(i8 %x, i8 %y);
- ret i8 %tmp;
-}
-
-define zeroext i4 @func3(i4 zeroext %x, i4 zeroext %y) nounwind {
-; RV64I-LABEL: func3:
-; RV64I: # %bb.0:
-; RV64I-NEXT: subw a1, a0, a1
-; RV64I-NEXT: sltu a0, a0, a1
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func3:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: maxu a0, a0, a1
-; RV64IZbb-NEXT: subw a0, a0, a1
-; RV64IZbb-NEXT: slli a0, a0, 32
-; RV64IZbb-NEXT: srli a0, a0, 32
-; RV64IZbb-NEXT: ret
- %tmp = call i4 @llvm.usub.sat.i4(i4 %x, i4 %y);
- ret i4 %tmp;
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/usub_sat_plus.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/usub_sat_plus.ll
deleted file mode 100644
index b12bd50..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/usub_sat_plus.ll
+++ /dev/null
@@ -1,131 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefix=RV64I
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefix=RV64IZbb
-
-declare i4 @llvm.usub.sat.i4(i4, i4)
-declare i8 @llvm.usub.sat.i8(i8, i8)
-declare i16 @llvm.usub.sat.i16(i16, i16)
-declare i32 @llvm.usub.sat.i32(i32, i32)
-declare i64 @llvm.usub.sat.i64(i64, i64)
-
-define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
-; RV64I-LABEL: func32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: subw a1, a0, a1
-; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: sltu a0, a0, a1
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func32:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: mulw a1, a1, a2
-; RV64IZbb-NEXT: sext.w a0, a0
-; RV64IZbb-NEXT: maxu a0, a0, a1
-; RV64IZbb-NEXT: sub a0, a0, a1
-; RV64IZbb-NEXT: ret
- %a = mul i32 %y, %z
- %tmp = call i32 @llvm.usub.sat.i32(i32 %x, i32 %a)
- ret i32 %tmp
-}
-
-define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
-; RV64I-LABEL: func64:
-; RV64I: # %bb.0:
-; RV64I-NEXT: sub a1, a0, a2
-; RV64I-NEXT: sltu a0, a0, a1
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func64:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: maxu a0, a0, a2
-; RV64IZbb-NEXT: sub a0, a0, a2
-; RV64IZbb-NEXT: ret
- %a = mul i64 %y, %z
- %tmp = call i64 @llvm.usub.sat.i64(i64 %x, i64 %z)
- ret i64 %tmp
-}
-
-define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
-; RV64I-LABEL: func16:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lui a3, 16
-; RV64I-NEXT: addi a3, a3, -1
-; RV64I-NEXT: and a0, a0, a3
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: and a1, a1, a3
-; RV64I-NEXT: subw a1, a0, a1
-; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: sltu a0, a0, a1
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func16:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: zext.h a0, a0
-; RV64IZbb-NEXT: mul a1, a1, a2
-; RV64IZbb-NEXT: zext.h a1, a1
-; RV64IZbb-NEXT: maxu a0, a0, a1
-; RV64IZbb-NEXT: sub a0, a0, a1
-; RV64IZbb-NEXT: ret
- %a = mul i16 %y, %z
- %tmp = call i16 @llvm.usub.sat.i16(i16 %x, i16 %a)
- ret i16 %tmp
-}
-
-define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
-; RV64I-LABEL: func8:
-; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 255
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: subw a1, a0, a1
-; RV64I-NEXT: sltu a0, a0, a1
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func8:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: andi a0, a0, 255
-; RV64IZbb-NEXT: mul a1, a1, a2
-; RV64IZbb-NEXT: andi a1, a1, 255
-; RV64IZbb-NEXT: maxu a0, a0, a1
-; RV64IZbb-NEXT: sub a0, a0, a1
-; RV64IZbb-NEXT: ret
- %a = mul i8 %y, %z
- %tmp = call i8 @llvm.usub.sat.i8(i8 %x, i8 %a)
- ret i8 %tmp
-}
-
-define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
-; RV64I-LABEL: func4:
-; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 15
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: andi a1, a1, 15
-; RV64I-NEXT: subw a1, a0, a1
-; RV64I-NEXT: sltu a0, a0, a1
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: ret
-;
-; RV64IZbb-LABEL: func4:
-; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: andi a0, a0, 15
-; RV64IZbb-NEXT: mul a1, a1, a2
-; RV64IZbb-NEXT: andi a1, a1, 15
-; RV64IZbb-NEXT: maxu a0, a0, a1
-; RV64IZbb-NEXT: sub a0, a0, a1
-; RV64IZbb-NEXT: ret
- %a = mul i4 %y, %z
- %tmp = call i4 @llvm.usub.sat.i4(i4 %x, i4 %a)
- ret i4 %tmp
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/vararg.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/vararg.ll
deleted file mode 100644
index 7fe67a0..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/vararg.ll
+++ /dev/null
@@ -1,1391 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -verify-machineinstrs \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck -check-prefix=LP64-LP64F-LP64D-FPELIM %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d -target-abi lp64f \
-; RUN: -verify-machineinstrs -riscv-experimental-rv64-legal-i32 \
-; RUN: | FileCheck -check-prefix=LP64-LP64F-LP64D-FPELIM %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d -target-abi lp64d \
-; RUN: -verify-machineinstrs -riscv-experimental-rv64-legal-i32 \
-; RUN: | FileCheck -check-prefix=LP64-LP64F-LP64D-FPELIM %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -verify-machineinstrs -frame-pointer=all \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck -check-prefix=LP64-LP64F-LP64D-WITHFP %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -target-abi lp64e \
-; RUN: -verify-machineinstrs -riscv-experimental-rv64-legal-i32 \
-; RUN: | FileCheck -check-prefix=LP64E-FPELIM %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -target-abi lp64e -frame-pointer=all \
-; RUN: -verify-machineinstrs -riscv-experimental-rv64-legal-i32 \
-; RUN: | FileCheck -check-prefix=LP64E-WITHFP %s
-
-; The same vararg calling convention is used for ilp32/ilp32f/ilp32d and for
-; lp64/lp64f/lp64d. Different CHECK lines are required for RV32D due to slight
-; codegen differences due to the way the f64 load operations are lowered.
-; The nounwind attribute is omitted for some of the tests, to check that CFI
-; directives are correctly generated.
-
-declare void @llvm.va_start(ptr)
-declare void @llvm.va_end(ptr)
-
-declare void @notdead(ptr)
-
-; Although frontends are recommended to not generate va_arg due to the lack of
-; support for aggregate types, we test simple cases here to ensure they are
-; lowered correctly
-
-define i32 @va1(ptr %fmt, ...) {
-; LP64-LP64F-LP64D-FPELIM-LABEL: va1:
-; LP64-LP64F-LP64D-FPELIM: # %bb.0:
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -80
-; LP64-LP64F-LP64D-FPELIM-NEXT: .cfi_def_cfa_offset 80
-; LP64-LP64F-LP64D-FPELIM-NEXT: mv a0, a1
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 72(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 64(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 56(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 48(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 40(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 32(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 24(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, sp, 28
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80
-; LP64-LP64F-LP64D-FPELIM-NEXT: ret
-;
-; LP64-LP64F-LP64D-WITHFP-LABEL: va1:
-; LP64-LP64F-LP64D-WITHFP: # %bb.0:
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, -96
-; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_def_cfa_offset 96
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_offset ra, -72
-; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_offset s0, -80
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 32
-; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_def_cfa s0, 64
-; LP64-LP64F-LP64D-WITHFP-NEXT: mv a0, a1
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a7, 56(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a6, 48(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a5, 40(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a4, 32(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, 24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, 16(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, 8(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, s0, 12
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, -24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96
-; LP64-LP64F-LP64D-WITHFP-NEXT: ret
-;
-; LP64E-FPELIM-LABEL: va1:
-; LP64E-FPELIM: # %bb.0:
-; LP64E-FPELIM-NEXT: addi sp, sp, -56
-; LP64E-FPELIM-NEXT: .cfi_def_cfa_offset 56
-; LP64E-FPELIM-NEXT: mv a0, a1
-; LP64E-FPELIM-NEXT: sd a5, 48(sp)
-; LP64E-FPELIM-NEXT: sd a4, 40(sp)
-; LP64E-FPELIM-NEXT: sd a3, 32(sp)
-; LP64E-FPELIM-NEXT: sd a2, 24(sp)
-; LP64E-FPELIM-NEXT: sd a1, 16(sp)
-; LP64E-FPELIM-NEXT: addi a1, sp, 20
-; LP64E-FPELIM-NEXT: sd a1, 0(sp)
-; LP64E-FPELIM-NEXT: addi sp, sp, 56
-; LP64E-FPELIM-NEXT: ret
-;
-; LP64E-WITHFP-LABEL: va1:
-; LP64E-WITHFP: # %bb.0:
-; LP64E-WITHFP-NEXT: addi sp, sp, -72
-; LP64E-WITHFP-NEXT: .cfi_def_cfa_offset 72
-; LP64E-WITHFP-NEXT: sd ra, 16(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: .cfi_offset ra, -56
-; LP64E-WITHFP-NEXT: .cfi_offset s0, -64
-; LP64E-WITHFP-NEXT: addi s0, sp, 24
-; LP64E-WITHFP-NEXT: .cfi_def_cfa s0, 48
-; LP64E-WITHFP-NEXT: mv a0, a1
-; LP64E-WITHFP-NEXT: sd a5, 40(s0)
-; LP64E-WITHFP-NEXT: sd a4, 32(s0)
-; LP64E-WITHFP-NEXT: sd a3, 24(s0)
-; LP64E-WITHFP-NEXT: sd a2, 16(s0)
-; LP64E-WITHFP-NEXT: sd a1, 8(s0)
-; LP64E-WITHFP-NEXT: addi a1, s0, 12
-; LP64E-WITHFP-NEXT: sd a1, -24(s0)
-; LP64E-WITHFP-NEXT: ld ra, 16(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: addi sp, sp, 72
-; LP64E-WITHFP-NEXT: ret
- %va = alloca ptr
- call void @llvm.va_start(ptr %va)
- %argp.cur = load ptr, ptr %va, align 4
- %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
- store ptr %argp.next, ptr %va, align 4
- %1 = load i32, ptr %argp.cur, align 4
- call void @llvm.va_end(ptr %va)
- ret i32 %1
-}
-
-define i32 @va1_va_arg(ptr %fmt, ...) nounwind {
-; LP64-LP64F-LP64D-FPELIM-LABEL: va1_va_arg:
-; LP64-LP64F-LP64D-FPELIM: # %bb.0:
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -80
-; LP64-LP64F-LP64D-FPELIM-NEXT: mv a0, a1
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 72(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 64(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 56(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 48(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 40(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 32(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 24(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, sp, 32
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80
-; LP64-LP64F-LP64D-FPELIM-NEXT: ret
-;
-; LP64-LP64F-LP64D-WITHFP-LABEL: va1_va_arg:
-; LP64-LP64F-LP64D-WITHFP: # %bb.0:
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, -96
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 32
-; LP64-LP64F-LP64D-WITHFP-NEXT: mv a0, a1
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a7, 56(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a6, 48(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a5, 40(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a4, 32(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, 24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, 16(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, 8(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, s0, 16
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, -24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96
-; LP64-LP64F-LP64D-WITHFP-NEXT: ret
-;
-; LP64E-FPELIM-LABEL: va1_va_arg:
-; LP64E-FPELIM: # %bb.0:
-; LP64E-FPELIM-NEXT: addi sp, sp, -56
-; LP64E-FPELIM-NEXT: mv a0, a1
-; LP64E-FPELIM-NEXT: sd a5, 48(sp)
-; LP64E-FPELIM-NEXT: sd a4, 40(sp)
-; LP64E-FPELIM-NEXT: sd a3, 32(sp)
-; LP64E-FPELIM-NEXT: sd a2, 24(sp)
-; LP64E-FPELIM-NEXT: sd a1, 16(sp)
-; LP64E-FPELIM-NEXT: addi a1, sp, 24
-; LP64E-FPELIM-NEXT: sd a1, 0(sp)
-; LP64E-FPELIM-NEXT: addi sp, sp, 56
-; LP64E-FPELIM-NEXT: ret
-;
-; LP64E-WITHFP-LABEL: va1_va_arg:
-; LP64E-WITHFP: # %bb.0:
-; LP64E-WITHFP-NEXT: addi sp, sp, -72
-; LP64E-WITHFP-NEXT: sd ra, 16(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: addi s0, sp, 24
-; LP64E-WITHFP-NEXT: mv a0, a1
-; LP64E-WITHFP-NEXT: sd a5, 40(s0)
-; LP64E-WITHFP-NEXT: sd a4, 32(s0)
-; LP64E-WITHFP-NEXT: sd a3, 24(s0)
-; LP64E-WITHFP-NEXT: sd a2, 16(s0)
-; LP64E-WITHFP-NEXT: sd a1, 8(s0)
-; LP64E-WITHFP-NEXT: addi a1, s0, 16
-; LP64E-WITHFP-NEXT: sd a1, -24(s0)
-; LP64E-WITHFP-NEXT: ld ra, 16(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: addi sp, sp, 72
-; LP64E-WITHFP-NEXT: ret
- %va = alloca ptr
- call void @llvm.va_start(ptr %va)
- %1 = va_arg ptr %va, i32
- call void @llvm.va_end(ptr %va)
- ret i32 %1
-}
-
-; Ensure the adjustment when restoring the stack pointer using the frame
-; pointer is correct
-define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
-; LP64-LP64F-LP64D-FPELIM-LABEL: va1_va_arg_alloca:
-; LP64-LP64F-LP64D-FPELIM: # %bb.0:
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -96
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi s0, sp, 32
-; LP64-LP64F-LP64D-FPELIM-NEXT: mv s1, a1
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 56(s0)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 48(s0)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 40(s0)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 32(s0)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 24(s0)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 16(s0)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(s0)
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, s0, 16
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, -32(s0)
-; LP64-LP64F-LP64D-FPELIM-NEXT: slli a0, a1, 32
-; LP64-LP64F-LP64D-FPELIM-NEXT: srli a0, a0, 32
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 15
-; LP64-LP64F-LP64D-FPELIM-NEXT: andi a0, a0, -16
-; LP64-LP64F-LP64D-FPELIM-NEXT: sub a0, sp, a0
-; LP64-LP64F-LP64D-FPELIM-NEXT: mv sp, a0
-; LP64-LP64F-LP64D-FPELIM-NEXT: call notdead
-; LP64-LP64F-LP64D-FPELIM-NEXT: mv a0, s1
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, s0, -32
-; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-FPELIM-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-FPELIM-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 96
-; LP64-LP64F-LP64D-FPELIM-NEXT: ret
-;
-; LP64-LP64F-LP64D-WITHFP-LABEL: va1_va_arg_alloca:
-; LP64-LP64F-LP64D-WITHFP: # %bb.0:
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, -96
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 32
-; LP64-LP64F-LP64D-WITHFP-NEXT: mv s1, a1
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a7, 56(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a6, 48(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a5, 40(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a4, 32(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, 24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, 16(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, 8(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, s0, 16
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, -32(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: slli a0, a1, 32
-; LP64-LP64F-LP64D-WITHFP-NEXT: srli a0, a0, 32
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, a0, 15
-; LP64-LP64F-LP64D-WITHFP-NEXT: andi a0, a0, -16
-; LP64-LP64F-LP64D-WITHFP-NEXT: sub a0, sp, a0
-; LP64-LP64F-LP64D-WITHFP-NEXT: mv sp, a0
-; LP64-LP64F-LP64D-WITHFP-NEXT: call notdead
-; LP64-LP64F-LP64D-WITHFP-NEXT: mv a0, s1
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, s0, -32
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96
-; LP64-LP64F-LP64D-WITHFP-NEXT: ret
-;
-; LP64E-FPELIM-LABEL: va1_va_arg_alloca:
-; LP64E-FPELIM: # %bb.0:
-; LP64E-FPELIM-NEXT: addi sp, sp, -80
-; LP64E-FPELIM-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; LP64E-FPELIM-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; LP64E-FPELIM-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; LP64E-FPELIM-NEXT: addi s0, sp, 32
-; LP64E-FPELIM-NEXT: mv s1, a1
-; LP64E-FPELIM-NEXT: sd a5, 40(s0)
-; LP64E-FPELIM-NEXT: sd a4, 32(s0)
-; LP64E-FPELIM-NEXT: sd a3, 24(s0)
-; LP64E-FPELIM-NEXT: sd a2, 16(s0)
-; LP64E-FPELIM-NEXT: sd a1, 8(s0)
-; LP64E-FPELIM-NEXT: addi a0, s0, 16
-; LP64E-FPELIM-NEXT: sd a0, -32(s0)
-; LP64E-FPELIM-NEXT: slli a0, a1, 32
-; LP64E-FPELIM-NEXT: srli a0, a0, 32
-; LP64E-FPELIM-NEXT: addi a0, a0, 7
-; LP64E-FPELIM-NEXT: andi a0, a0, -8
-; LP64E-FPELIM-NEXT: sub a0, sp, a0
-; LP64E-FPELIM-NEXT: mv sp, a0
-; LP64E-FPELIM-NEXT: call notdead
-; LP64E-FPELIM-NEXT: mv a0, s1
-; LP64E-FPELIM-NEXT: addi sp, s0, -32
-; LP64E-FPELIM-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; LP64E-FPELIM-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; LP64E-FPELIM-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
-; LP64E-FPELIM-NEXT: addi sp, sp, 80
-; LP64E-FPELIM-NEXT: ret
-;
-; LP64E-WITHFP-LABEL: va1_va_arg_alloca:
-; LP64E-WITHFP: # %bb.0:
-; LP64E-WITHFP-NEXT: addi sp, sp, -80
-; LP64E-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: addi s0, sp, 32
-; LP64E-WITHFP-NEXT: mv s1, a1
-; LP64E-WITHFP-NEXT: sd a5, 40(s0)
-; LP64E-WITHFP-NEXT: sd a4, 32(s0)
-; LP64E-WITHFP-NEXT: sd a3, 24(s0)
-; LP64E-WITHFP-NEXT: sd a2, 16(s0)
-; LP64E-WITHFP-NEXT: sd a1, 8(s0)
-; LP64E-WITHFP-NEXT: addi a0, s0, 16
-; LP64E-WITHFP-NEXT: sd a0, -32(s0)
-; LP64E-WITHFP-NEXT: slli a0, a1, 32
-; LP64E-WITHFP-NEXT: srli a0, a0, 32
-; LP64E-WITHFP-NEXT: addi a0, a0, 7
-; LP64E-WITHFP-NEXT: andi a0, a0, -8
-; LP64E-WITHFP-NEXT: sub a0, sp, a0
-; LP64E-WITHFP-NEXT: mv sp, a0
-; LP64E-WITHFP-NEXT: call notdead
-; LP64E-WITHFP-NEXT: mv a0, s1
-; LP64E-WITHFP-NEXT: addi sp, s0, -32
-; LP64E-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: addi sp, sp, 80
-; LP64E-WITHFP-NEXT: ret
- %va = alloca ptr
- call void @llvm.va_start(ptr %va)
- %1 = va_arg ptr %va, i32
- %2 = alloca i8, i32 %1
- call void @notdead(ptr %2)
- call void @llvm.va_end(ptr %va)
- ret i32 %1
-}
-
-define void @va1_caller() nounwind {
-; Pass a double, as a float would be promoted by a C/C++ frontend
-; LP64-LP64F-LP64D-FPELIM-LABEL: va1_caller:
-; LP64-LP64F-LP64D-FPELIM: # %bb.0:
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -16
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1023
-; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a1, 52
-; LP64-LP64F-LP64D-FPELIM-NEXT: li a2, 2
-; LP64-LP64F-LP64D-FPELIM-NEXT: call va1
-; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 16
-; LP64-LP64F-LP64D-FPELIM-NEXT: ret
-;
-; LP64-LP64F-LP64D-WITHFP-LABEL: va1_caller:
-; LP64-LP64F-LP64D-WITHFP: # %bb.0:
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, -16
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 16
-; LP64-LP64F-LP64D-WITHFP-NEXT: li a1, 1023
-; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a1, 52
-; LP64-LP64F-LP64D-WITHFP-NEXT: li a2, 2
-; LP64-LP64F-LP64D-WITHFP-NEXT: call va1
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 16
-; LP64-LP64F-LP64D-WITHFP-NEXT: ret
-;
-; LP64E-FPELIM-LABEL: va1_caller:
-; LP64E-FPELIM: # %bb.0:
-; LP64E-FPELIM-NEXT: addi sp, sp, -8
-; LP64E-FPELIM-NEXT: sd ra, 0(sp) # 8-byte Folded Spill
-; LP64E-FPELIM-NEXT: li a1, 1023
-; LP64E-FPELIM-NEXT: slli a1, a1, 52
-; LP64E-FPELIM-NEXT: li a2, 2
-; LP64E-FPELIM-NEXT: call va1
-; LP64E-FPELIM-NEXT: ld ra, 0(sp) # 8-byte Folded Reload
-; LP64E-FPELIM-NEXT: addi sp, sp, 8
-; LP64E-FPELIM-NEXT: ret
-;
-; LP64E-WITHFP-LABEL: va1_caller:
-; LP64E-WITHFP: # %bb.0:
-; LP64E-WITHFP-NEXT: addi sp, sp, -16
-; LP64E-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: addi s0, sp, 16
-; LP64E-WITHFP-NEXT: li a1, 1023
-; LP64E-WITHFP-NEXT: slli a1, a1, 52
-; LP64E-WITHFP-NEXT: li a2, 2
-; LP64E-WITHFP-NEXT: call va1
-; LP64E-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: addi sp, sp, 16
-; LP64E-WITHFP-NEXT: ret
- %1 = call i32 (ptr, ...) @va1(ptr undef, double 1.0, i32 2)
- ret void
-}
-
-; Ensure that 2x xlen size+alignment varargs are accessed via an "aligned"
-; register pair (where the first register is even-numbered).
-
-define i64 @va2(ptr %fmt, ...) nounwind {
-; LP64-LP64F-LP64D-FPELIM-LABEL: va2:
-; LP64-LP64F-LP64D-FPELIM: # %bb.0:
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -80
-; LP64-LP64F-LP64D-FPELIM-NEXT: mv a0, a1
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 72(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 64(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 56(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 48(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 40(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 32(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 24(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, sp, 39
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80
-; LP64-LP64F-LP64D-FPELIM-NEXT: ret
-;
-; LP64-LP64F-LP64D-WITHFP-LABEL: va2:
-; LP64-LP64F-LP64D-WITHFP: # %bb.0:
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, -96
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 32
-; LP64-LP64F-LP64D-WITHFP-NEXT: mv a0, a1
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a7, 56(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a6, 48(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a5, 40(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a4, 32(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, 24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, 16(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, 8(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, s0, 23
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, -24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96
-; LP64-LP64F-LP64D-WITHFP-NEXT: ret
-;
-; LP64E-FPELIM-LABEL: va2:
-; LP64E-FPELIM: # %bb.0:
-; LP64E-FPELIM-NEXT: addi sp, sp, -56
-; LP64E-FPELIM-NEXT: mv a0, a1
-; LP64E-FPELIM-NEXT: sd a5, 48(sp)
-; LP64E-FPELIM-NEXT: sd a4, 40(sp)
-; LP64E-FPELIM-NEXT: sd a3, 32(sp)
-; LP64E-FPELIM-NEXT: sd a2, 24(sp)
-; LP64E-FPELIM-NEXT: sd a1, 16(sp)
-; LP64E-FPELIM-NEXT: addi a1, sp, 31
-; LP64E-FPELIM-NEXT: sd a1, 0(sp)
-; LP64E-FPELIM-NEXT: addi sp, sp, 56
-; LP64E-FPELIM-NEXT: ret
-;
-; LP64E-WITHFP-LABEL: va2:
-; LP64E-WITHFP: # %bb.0:
-; LP64E-WITHFP-NEXT: addi sp, sp, -72
-; LP64E-WITHFP-NEXT: sd ra, 16(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: addi s0, sp, 24
-; LP64E-WITHFP-NEXT: mv a0, a1
-; LP64E-WITHFP-NEXT: sd a5, 40(s0)
-; LP64E-WITHFP-NEXT: sd a4, 32(s0)
-; LP64E-WITHFP-NEXT: sd a3, 24(s0)
-; LP64E-WITHFP-NEXT: sd a2, 16(s0)
-; LP64E-WITHFP-NEXT: sd a1, 8(s0)
-; LP64E-WITHFP-NEXT: addi a1, s0, 23
-; LP64E-WITHFP-NEXT: sd a1, -24(s0)
-; LP64E-WITHFP-NEXT: ld ra, 16(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: addi sp, sp, 72
-; LP64E-WITHFP-NEXT: ret
- %va = alloca ptr
- call void @llvm.va_start(ptr %va)
- %argp.cur = load ptr, ptr %va
- %ptrint = ptrtoint ptr %argp.cur to iXLen
- %1 = add iXLen %ptrint, 7
- %2 = and iXLen %1, -8
- %argp.cur.aligned = inttoptr iXLen %1 to ptr
- %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8
- store ptr %argp.next, ptr %va
- %3 = inttoptr iXLen %2 to ptr
- %4 = load double, ptr %3, align 8
- %5 = bitcast double %4 to i64
- call void @llvm.va_end(ptr %va)
- ret i64 %5
-}
-
-define i64 @va2_va_arg(ptr %fmt, ...) nounwind {
-; LP64-LP64F-LP64D-FPELIM-LABEL: va2_va_arg:
-; LP64-LP64F-LP64D-FPELIM: # %bb.0:
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -80
-; LP64-LP64F-LP64D-FPELIM-NEXT: mv a0, a1
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 72(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 64(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 56(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 48(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 40(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 32(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 24(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, sp, 32
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80
-; LP64-LP64F-LP64D-FPELIM-NEXT: ret
-;
-; LP64-LP64F-LP64D-WITHFP-LABEL: va2_va_arg:
-; LP64-LP64F-LP64D-WITHFP: # %bb.0:
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, -96
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 32
-; LP64-LP64F-LP64D-WITHFP-NEXT: mv a0, a1
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a7, 56(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a6, 48(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a5, 40(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a4, 32(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, 24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, 16(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, 8(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, s0, 16
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, -24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96
-; LP64-LP64F-LP64D-WITHFP-NEXT: ret
-;
-; LP64E-FPELIM-LABEL: va2_va_arg:
-; LP64E-FPELIM: # %bb.0:
-; LP64E-FPELIM-NEXT: addi sp, sp, -56
-; LP64E-FPELIM-NEXT: mv a0, a1
-; LP64E-FPELIM-NEXT: sd a5, 48(sp)
-; LP64E-FPELIM-NEXT: sd a4, 40(sp)
-; LP64E-FPELIM-NEXT: sd a3, 32(sp)
-; LP64E-FPELIM-NEXT: sd a2, 24(sp)
-; LP64E-FPELIM-NEXT: sd a1, 16(sp)
-; LP64E-FPELIM-NEXT: addi a1, sp, 24
-; LP64E-FPELIM-NEXT: sd a1, 0(sp)
-; LP64E-FPELIM-NEXT: addi sp, sp, 56
-; LP64E-FPELIM-NEXT: ret
-;
-; LP64E-WITHFP-LABEL: va2_va_arg:
-; LP64E-WITHFP: # %bb.0:
-; LP64E-WITHFP-NEXT: addi sp, sp, -72
-; LP64E-WITHFP-NEXT: sd ra, 16(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: addi s0, sp, 24
-; LP64E-WITHFP-NEXT: mv a0, a1
-; LP64E-WITHFP-NEXT: sd a5, 40(s0)
-; LP64E-WITHFP-NEXT: sd a4, 32(s0)
-; LP64E-WITHFP-NEXT: sd a3, 24(s0)
-; LP64E-WITHFP-NEXT: sd a2, 16(s0)
-; LP64E-WITHFP-NEXT: sd a1, 8(s0)
-; LP64E-WITHFP-NEXT: addi a1, s0, 16
-; LP64E-WITHFP-NEXT: sd a1, -24(s0)
-; LP64E-WITHFP-NEXT: ld ra, 16(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: addi sp, sp, 72
-; LP64E-WITHFP-NEXT: ret
- %va = alloca ptr
- call void @llvm.va_start(ptr %va)
- %1 = va_arg ptr %va, double
- call void @llvm.va_end(ptr %va)
- %2 = bitcast double %1 to i64
- ret i64 %2
-}
-
-define void @va2_caller() nounwind {
-; LP64-LP64F-LP64D-FPELIM-LABEL: va2_caller:
-; LP64-LP64F-LP64D-FPELIM: # %bb.0:
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -16
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1023
-; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a1, 52
-; LP64-LP64F-LP64D-FPELIM-NEXT: call va2
-; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 16
-; LP64-LP64F-LP64D-FPELIM-NEXT: ret
-;
-; LP64-LP64F-LP64D-WITHFP-LABEL: va2_caller:
-; LP64-LP64F-LP64D-WITHFP: # %bb.0:
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, -16
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 16
-; LP64-LP64F-LP64D-WITHFP-NEXT: li a1, 1023
-; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a1, 52
-; LP64-LP64F-LP64D-WITHFP-NEXT: call va2
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 16
-; LP64-LP64F-LP64D-WITHFP-NEXT: ret
-;
-; LP64E-FPELIM-LABEL: va2_caller:
-; LP64E-FPELIM: # %bb.0:
-; LP64E-FPELIM-NEXT: addi sp, sp, -8
-; LP64E-FPELIM-NEXT: sd ra, 0(sp) # 8-byte Folded Spill
-; LP64E-FPELIM-NEXT: li a1, 1023
-; LP64E-FPELIM-NEXT: slli a1, a1, 52
-; LP64E-FPELIM-NEXT: call va2
-; LP64E-FPELIM-NEXT: ld ra, 0(sp) # 8-byte Folded Reload
-; LP64E-FPELIM-NEXT: addi sp, sp, 8
-; LP64E-FPELIM-NEXT: ret
-;
-; LP64E-WITHFP-LABEL: va2_caller:
-; LP64E-WITHFP: # %bb.0:
-; LP64E-WITHFP-NEXT: addi sp, sp, -16
-; LP64E-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: addi s0, sp, 16
-; LP64E-WITHFP-NEXT: li a1, 1023
-; LP64E-WITHFP-NEXT: slli a1, a1, 52
-; LP64E-WITHFP-NEXT: call va2
-; LP64E-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: addi sp, sp, 16
-; LP64E-WITHFP-NEXT: ret
- %1 = call i64 (ptr, ...) @va2(ptr undef, double 1.000000e+00)
- ret void
-}
-
-; On RV32, Ensure a named 2*xlen argument is passed in a1 and a2, while the
-; vararg double is passed in a4 and a5 (rather than a3 and a4)
-
-define i64 @va3(i32 %a, i64 %b, ...) nounwind {
-; LP64-LP64F-LP64D-FPELIM-LABEL: va3:
-; LP64-LP64F-LP64D-FPELIM: # %bb.0:
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -64
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 56(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 48(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 40(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 32(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 24(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 16(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi a3, sp, 31
-; LP64-LP64F-LP64D-FPELIM-NEXT: add a0, a1, a2
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 64
-; LP64-LP64F-LP64D-FPELIM-NEXT: ret
-;
-; LP64-LP64F-LP64D-WITHFP-LABEL: va3:
-; LP64-LP64F-LP64D-WITHFP: # %bb.0:
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, -80
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 32
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a7, 40(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a6, 32(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a5, 24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a4, 16(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, 8(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, 0(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi a3, s0, 15
-; LP64-LP64F-LP64D-WITHFP-NEXT: add a0, a1, a2
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, -24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 80
-; LP64-LP64F-LP64D-WITHFP-NEXT: ret
-;
-; LP64E-FPELIM-LABEL: va3:
-; LP64E-FPELIM: # %bb.0:
-; LP64E-FPELIM-NEXT: addi sp, sp, -40
-; LP64E-FPELIM-NEXT: sd a5, 32(sp)
-; LP64E-FPELIM-NEXT: sd a4, 24(sp)
-; LP64E-FPELIM-NEXT: sd a3, 16(sp)
-; LP64E-FPELIM-NEXT: sd a2, 8(sp)
-; LP64E-FPELIM-NEXT: addi a3, sp, 23
-; LP64E-FPELIM-NEXT: add a0, a1, a2
-; LP64E-FPELIM-NEXT: sd a3, 0(sp)
-; LP64E-FPELIM-NEXT: addi sp, sp, 40
-; LP64E-FPELIM-NEXT: ret
-;
-; LP64E-WITHFP-LABEL: va3:
-; LP64E-WITHFP: # %bb.0:
-; LP64E-WITHFP-NEXT: addi sp, sp, -56
-; LP64E-WITHFP-NEXT: sd ra, 16(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: addi s0, sp, 24
-; LP64E-WITHFP-NEXT: sd a5, 24(s0)
-; LP64E-WITHFP-NEXT: sd a4, 16(s0)
-; LP64E-WITHFP-NEXT: sd a3, 8(s0)
-; LP64E-WITHFP-NEXT: sd a2, 0(s0)
-; LP64E-WITHFP-NEXT: addi a3, s0, 15
-; LP64E-WITHFP-NEXT: add a0, a1, a2
-; LP64E-WITHFP-NEXT: sd a3, -24(s0)
-; LP64E-WITHFP-NEXT: ld ra, 16(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: addi sp, sp, 56
-; LP64E-WITHFP-NEXT: ret
- %va = alloca ptr
- call void @llvm.va_start(ptr %va)
- %argp.cur = load ptr, ptr %va
- %ptrint = ptrtoint ptr %argp.cur to iXLen
- %1 = add iXLen %ptrint, 7
- %2 = and iXLen %1, -8
- %argp.cur.aligned = inttoptr iXLen %1 to ptr
- %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8
- store ptr %argp.next, ptr %va
- %3 = inttoptr iXLen %2 to ptr
- %4 = load double, ptr %3, align 8
- call void @llvm.va_end(ptr %va)
- %5 = bitcast double %4 to i64
- %6 = add i64 %b, %5
- ret i64 %6
-}
-
-define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind {
-; LP64-LP64F-LP64D-FPELIM-LABEL: va3_va_arg:
-; LP64-LP64F-LP64D-FPELIM: # %bb.0:
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -64
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 56(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 48(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 40(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 32(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 24(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 16(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi a3, sp, 24
-; LP64-LP64F-LP64D-FPELIM-NEXT: add a0, a1, a2
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 64
-; LP64-LP64F-LP64D-FPELIM-NEXT: ret
-;
-; LP64-LP64F-LP64D-WITHFP-LABEL: va3_va_arg:
-; LP64-LP64F-LP64D-WITHFP: # %bb.0:
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, -80
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 32
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a7, 40(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a6, 32(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a5, 24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a4, 16(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, 8(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, 0(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi a3, s0, 8
-; LP64-LP64F-LP64D-WITHFP-NEXT: add a0, a1, a2
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, -24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 80
-; LP64-LP64F-LP64D-WITHFP-NEXT: ret
-;
-; LP64E-FPELIM-LABEL: va3_va_arg:
-; LP64E-FPELIM: # %bb.0:
-; LP64E-FPELIM-NEXT: addi sp, sp, -40
-; LP64E-FPELIM-NEXT: sd a5, 32(sp)
-; LP64E-FPELIM-NEXT: sd a4, 24(sp)
-; LP64E-FPELIM-NEXT: sd a3, 16(sp)
-; LP64E-FPELIM-NEXT: sd a2, 8(sp)
-; LP64E-FPELIM-NEXT: addi a3, sp, 16
-; LP64E-FPELIM-NEXT: add a0, a1, a2
-; LP64E-FPELIM-NEXT: sd a3, 0(sp)
-; LP64E-FPELIM-NEXT: addi sp, sp, 40
-; LP64E-FPELIM-NEXT: ret
-;
-; LP64E-WITHFP-LABEL: va3_va_arg:
-; LP64E-WITHFP: # %bb.0:
-; LP64E-WITHFP-NEXT: addi sp, sp, -56
-; LP64E-WITHFP-NEXT: sd ra, 16(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: addi s0, sp, 24
-; LP64E-WITHFP-NEXT: sd a5, 24(s0)
-; LP64E-WITHFP-NEXT: sd a4, 16(s0)
-; LP64E-WITHFP-NEXT: sd a3, 8(s0)
-; LP64E-WITHFP-NEXT: sd a2, 0(s0)
-; LP64E-WITHFP-NEXT: addi a3, s0, 8
-; LP64E-WITHFP-NEXT: add a0, a1, a2
-; LP64E-WITHFP-NEXT: sd a3, -24(s0)
-; LP64E-WITHFP-NEXT: ld ra, 16(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: addi sp, sp, 56
-; LP64E-WITHFP-NEXT: ret
- %va = alloca ptr
- call void @llvm.va_start(ptr %va)
- %1 = va_arg ptr %va, double
- call void @llvm.va_end(ptr %va)
- %2 = bitcast double %1 to i64
- %3 = add i64 %b, %2
- ret i64 %3
-}
-
-define void @va3_caller() nounwind {
-; LP64-LP64F-LP64D-FPELIM-LABEL: va3_caller:
-; LP64-LP64F-LP64D-FPELIM: # %bb.0:
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -16
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-FPELIM-NEXT: li a2, 1
-; LP64-LP64F-LP64D-FPELIM-NEXT: slli a2, a2, 62
-; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 2
-; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1111
-; LP64-LP64F-LP64D-FPELIM-NEXT: call va3
-; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 16
-; LP64-LP64F-LP64D-FPELIM-NEXT: ret
-;
-; LP64-LP64F-LP64D-WITHFP-LABEL: va3_caller:
-; LP64-LP64F-LP64D-WITHFP: # %bb.0:
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, -16
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 16
-; LP64-LP64F-LP64D-WITHFP-NEXT: li a2, 1
-; LP64-LP64F-LP64D-WITHFP-NEXT: slli a2, a2, 62
-; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 2
-; LP64-LP64F-LP64D-WITHFP-NEXT: li a1, 1111
-; LP64-LP64F-LP64D-WITHFP-NEXT: call va3
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 16
-; LP64-LP64F-LP64D-WITHFP-NEXT: ret
-;
-; LP64E-FPELIM-LABEL: va3_caller:
-; LP64E-FPELIM: # %bb.0:
-; LP64E-FPELIM-NEXT: addi sp, sp, -8
-; LP64E-FPELIM-NEXT: sd ra, 0(sp) # 8-byte Folded Spill
-; LP64E-FPELIM-NEXT: li a2, 1
-; LP64E-FPELIM-NEXT: slli a2, a2, 62
-; LP64E-FPELIM-NEXT: li a0, 2
-; LP64E-FPELIM-NEXT: li a1, 1111
-; LP64E-FPELIM-NEXT: call va3
-; LP64E-FPELIM-NEXT: ld ra, 0(sp) # 8-byte Folded Reload
-; LP64E-FPELIM-NEXT: addi sp, sp, 8
-; LP64E-FPELIM-NEXT: ret
-;
-; LP64E-WITHFP-LABEL: va3_caller:
-; LP64E-WITHFP: # %bb.0:
-; LP64E-WITHFP-NEXT: addi sp, sp, -16
-; LP64E-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: addi s0, sp, 16
-; LP64E-WITHFP-NEXT: li a2, 1
-; LP64E-WITHFP-NEXT: slli a2, a2, 62
-; LP64E-WITHFP-NEXT: li a0, 2
-; LP64E-WITHFP-NEXT: li a1, 1111
-; LP64E-WITHFP-NEXT: call va3
-; LP64E-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: addi sp, sp, 16
-; LP64E-WITHFP-NEXT: ret
- %1 = call i64 (i32, i64, ...) @va3(i32 2, i64 1111, double 2.000000e+00)
- ret void
-}
-
-declare void @llvm.va_copy(ptr, ptr)
-
-define i32 @va4_va_copy(i32 %argno, ...) nounwind {
-; LP64-LP64F-LP64D-FPELIM-LABEL: va4_va_copy:
-; LP64-LP64F-LP64D-FPELIM: # %bb.0:
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -96
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-FPELIM-NEXT: mv s0, a1
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 88(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 80(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 72(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 64(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 56(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 48(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 40(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, sp, 48
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 0(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: call notdead
-; LP64-LP64F-LP64D-FPELIM-NEXT: ld a0, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 3
-; LP64-LP64F-LP64D-FPELIM-NEXT: andi a0, a0, -4
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, a0, 8
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: lw a1, 0(a0)
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 11
-; LP64-LP64F-LP64D-FPELIM-NEXT: andi a0, a0, -4
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi a2, a0, 8
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: lw a2, 0(a0)
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 11
-; LP64-LP64F-LP64D-FPELIM-NEXT: andi a0, a0, -4
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi a3, a0, 8
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: lw a0, 0(a0)
-; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, a1, s0
-; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, a1, a2
-; LP64-LP64F-LP64D-FPELIM-NEXT: addw a0, a1, a0
-; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-FPELIM-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 96
-; LP64-LP64F-LP64D-FPELIM-NEXT: ret
-;
-; LP64-LP64F-LP64D-WITHFP-LABEL: va4_va_copy:
-; LP64-LP64F-LP64D-WITHFP: # %bb.0:
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, -112
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 48
-; LP64-LP64F-LP64D-WITHFP-NEXT: mv s1, a1
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a7, 56(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a6, 48(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a5, 40(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a4, 32(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, 24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, 16(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, 8(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, s0, 16
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, -32(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, -40(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: call notdead
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld a0, -32(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, a0, 3
-; LP64-LP64F-LP64D-WITHFP-NEXT: andi a0, a0, -4
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, a0, 8
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, -32(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: lw a1, 0(a0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, a0, 11
-; LP64-LP64F-LP64D-WITHFP-NEXT: andi a0, a0, -4
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi a2, a0, 8
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, -32(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: lw a2, 0(a0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, a0, 11
-; LP64-LP64F-LP64D-WITHFP-NEXT: andi a0, a0, -4
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi a3, a0, 8
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, -32(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: lw a0, 0(a0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: add a1, a1, s1
-; LP64-LP64F-LP64D-WITHFP-NEXT: add a1, a1, a2
-; LP64-LP64F-LP64D-WITHFP-NEXT: addw a0, a1, a0
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 112
-; LP64-LP64F-LP64D-WITHFP-NEXT: ret
-;
-; LP64E-FPELIM-LABEL: va4_va_copy:
-; LP64E-FPELIM: # %bb.0:
-; LP64E-FPELIM-NEXT: addi sp, sp, -80
-; LP64E-FPELIM-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; LP64E-FPELIM-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; LP64E-FPELIM-NEXT: mv s0, a1
-; LP64E-FPELIM-NEXT: sd a5, 72(sp)
-; LP64E-FPELIM-NEXT: sd a4, 64(sp)
-; LP64E-FPELIM-NEXT: sd a3, 56(sp)
-; LP64E-FPELIM-NEXT: sd a2, 48(sp)
-; LP64E-FPELIM-NEXT: sd a1, 40(sp)
-; LP64E-FPELIM-NEXT: addi a0, sp, 48
-; LP64E-FPELIM-NEXT: sd a0, 8(sp)
-; LP64E-FPELIM-NEXT: sd a0, 0(sp)
-; LP64E-FPELIM-NEXT: call notdead
-; LP64E-FPELIM-NEXT: ld a0, 8(sp)
-; LP64E-FPELIM-NEXT: addi a0, a0, 3
-; LP64E-FPELIM-NEXT: andi a0, a0, -4
-; LP64E-FPELIM-NEXT: addi a1, a0, 8
-; LP64E-FPELIM-NEXT: sd a1, 8(sp)
-; LP64E-FPELIM-NEXT: lw a1, 0(a0)
-; LP64E-FPELIM-NEXT: addi a0, a0, 11
-; LP64E-FPELIM-NEXT: andi a0, a0, -4
-; LP64E-FPELIM-NEXT: addi a2, a0, 8
-; LP64E-FPELIM-NEXT: sd a2, 8(sp)
-; LP64E-FPELIM-NEXT: lw a2, 0(a0)
-; LP64E-FPELIM-NEXT: addi a0, a0, 11
-; LP64E-FPELIM-NEXT: andi a0, a0, -4
-; LP64E-FPELIM-NEXT: addi a3, a0, 8
-; LP64E-FPELIM-NEXT: sd a3, 8(sp)
-; LP64E-FPELIM-NEXT: lw a0, 0(a0)
-; LP64E-FPELIM-NEXT: add a1, a1, s0
-; LP64E-FPELIM-NEXT: add a1, a1, a2
-; LP64E-FPELIM-NEXT: addw a0, a1, a0
-; LP64E-FPELIM-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; LP64E-FPELIM-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; LP64E-FPELIM-NEXT: addi sp, sp, 80
-; LP64E-FPELIM-NEXT: ret
-;
-; LP64E-WITHFP-LABEL: va4_va_copy:
-; LP64E-WITHFP: # %bb.0:
-; LP64E-WITHFP-NEXT: addi sp, sp, -88
-; LP64E-WITHFP-NEXT: sd ra, 32(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: sd s0, 24(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: sd s1, 16(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: addi s0, sp, 40
-; LP64E-WITHFP-NEXT: mv s1, a1
-; LP64E-WITHFP-NEXT: sd a5, 40(s0)
-; LP64E-WITHFP-NEXT: sd a4, 32(s0)
-; LP64E-WITHFP-NEXT: sd a3, 24(s0)
-; LP64E-WITHFP-NEXT: sd a2, 16(s0)
-; LP64E-WITHFP-NEXT: sd a1, 8(s0)
-; LP64E-WITHFP-NEXT: addi a0, s0, 16
-; LP64E-WITHFP-NEXT: sd a0, -32(s0)
-; LP64E-WITHFP-NEXT: sd a0, -40(s0)
-; LP64E-WITHFP-NEXT: call notdead
-; LP64E-WITHFP-NEXT: ld a0, -32(s0)
-; LP64E-WITHFP-NEXT: addi a0, a0, 3
-; LP64E-WITHFP-NEXT: andi a0, a0, -4
-; LP64E-WITHFP-NEXT: addi a1, a0, 8
-; LP64E-WITHFP-NEXT: sd a1, -32(s0)
-; LP64E-WITHFP-NEXT: lw a1, 0(a0)
-; LP64E-WITHFP-NEXT: addi a0, a0, 11
-; LP64E-WITHFP-NEXT: andi a0, a0, -4
-; LP64E-WITHFP-NEXT: addi a2, a0, 8
-; LP64E-WITHFP-NEXT: sd a2, -32(s0)
-; LP64E-WITHFP-NEXT: lw a2, 0(a0)
-; LP64E-WITHFP-NEXT: addi a0, a0, 11
-; LP64E-WITHFP-NEXT: andi a0, a0, -4
-; LP64E-WITHFP-NEXT: addi a3, a0, 8
-; LP64E-WITHFP-NEXT: sd a3, -32(s0)
-; LP64E-WITHFP-NEXT: lw a0, 0(a0)
-; LP64E-WITHFP-NEXT: add a1, a1, s1
-; LP64E-WITHFP-NEXT: add a1, a1, a2
-; LP64E-WITHFP-NEXT: addw a0, a1, a0
-; LP64E-WITHFP-NEXT: ld ra, 32(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: ld s0, 24(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: ld s1, 16(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: addi sp, sp, 88
-; LP64E-WITHFP-NEXT: ret
- %vargs = alloca ptr
- %wargs = alloca ptr
- call void @llvm.va_start(ptr %vargs)
- %1 = va_arg ptr %vargs, i32
- call void @llvm.va_copy(ptr %wargs, ptr %vargs)
- %2 = load ptr, ptr %wargs, align 4
- call void @notdead(ptr %2)
- %3 = va_arg ptr %vargs, i32
- %4 = va_arg ptr %vargs, i32
- %5 = va_arg ptr %vargs, i32
- call void @llvm.va_end(ptr %vargs)
- call void @llvm.va_end(ptr %wargs)
- %add1 = add i32 %3, %1
- %add2 = add i32 %add1, %4
- %add3 = add i32 %add2, %5
- ret i32 %add3
-}
-
-; Check 2x*xlen values are aligned appropriately when passed on the stack in a vararg call
-
-declare i32 @va5_aligned_stack_callee(i32, ...)
-
-define void @va5_aligned_stack_caller() nounwind {
-; The double should be 8-byte aligned on the stack, but the two-element array
-; should only be 4-byte aligned
-; LP64-LP64F-LP64D-FPELIM-LABEL: va5_aligned_stack_caller:
-; LP64-LP64F-LP64D-FPELIM: # %bb.0:
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -48
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 17
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 24(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 16
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 16(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 15
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, %hi(.LCPI11_0)
-; LP64-LP64F-LP64D-FPELIM-NEXT: ld t0, %lo(.LCPI11_0)(a0)
-; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, %hi(.LCPI11_1)
-; LP64-LP64F-LP64D-FPELIM-NEXT: ld a2, %lo(.LCPI11_1)(a0)
-; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, %hi(.LCPI11_2)
-; LP64-LP64F-LP64D-FPELIM-NEXT: ld a3, %lo(.LCPI11_2)(a0)
-; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, 2384
-; LP64-LP64F-LP64D-FPELIM-NEXT: addiw a6, a0, 761
-; LP64-LP64F-LP64D-FPELIM-NEXT: slli a6, a6, 11
-; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 1
-; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 11
-; LP64-LP64F-LP64D-FPELIM-NEXT: li a4, 12
-; LP64-LP64F-LP64D-FPELIM-NEXT: li a5, 13
-; LP64-LP64F-LP64D-FPELIM-NEXT: li a7, 14
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd t0, 0(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: call va5_aligned_stack_callee
-; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 48
-; LP64-LP64F-LP64D-FPELIM-NEXT: ret
-;
-; LP64-LP64F-LP64D-WITHFP-LABEL: va5_aligned_stack_caller:
-; LP64-LP64F-LP64D-WITHFP: # %bb.0:
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, -48
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 48
-; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 17
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, 24(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 16
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, 16(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 15
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, 8(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT: lui a0, %hi(.LCPI11_0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld t0, %lo(.LCPI11_0)(a0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: lui a0, %hi(.LCPI11_1)
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld a2, %lo(.LCPI11_1)(a0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: lui a0, %hi(.LCPI11_2)
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld a3, %lo(.LCPI11_2)(a0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: lui a0, 2384
-; LP64-LP64F-LP64D-WITHFP-NEXT: addiw a6, a0, 761
-; LP64-LP64F-LP64D-WITHFP-NEXT: slli a6, a6, 11
-; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 1
-; LP64-LP64F-LP64D-WITHFP-NEXT: li a1, 11
-; LP64-LP64F-LP64D-WITHFP-NEXT: li a4, 12
-; LP64-LP64F-LP64D-WITHFP-NEXT: li a5, 13
-; LP64-LP64F-LP64D-WITHFP-NEXT: li a7, 14
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd t0, 0(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT: call va5_aligned_stack_callee
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 48
-; LP64-LP64F-LP64D-WITHFP-NEXT: ret
-;
-; LP64E-FPELIM-LABEL: va5_aligned_stack_caller:
-; LP64E-FPELIM: # %bb.0:
-; LP64E-FPELIM-NEXT: addi sp, sp, -56
-; LP64E-FPELIM-NEXT: sd ra, 48(sp) # 8-byte Folded Spill
-; LP64E-FPELIM-NEXT: li a0, 17
-; LP64E-FPELIM-NEXT: sd a0, 40(sp)
-; LP64E-FPELIM-NEXT: li a0, 16
-; LP64E-FPELIM-NEXT: lui a1, %hi(.LCPI11_0)
-; LP64E-FPELIM-NEXT: ld a1, %lo(.LCPI11_0)(a1)
-; LP64E-FPELIM-NEXT: sd a0, 32(sp)
-; LP64E-FPELIM-NEXT: li a0, 15
-; LP64E-FPELIM-NEXT: sd a0, 24(sp)
-; LP64E-FPELIM-NEXT: sd a1, 16(sp)
-; LP64E-FPELIM-NEXT: li a0, 14
-; LP64E-FPELIM-NEXT: sd a0, 8(sp)
-; LP64E-FPELIM-NEXT: lui a0, 2384
-; LP64E-FPELIM-NEXT: addiw a0, a0, 761
-; LP64E-FPELIM-NEXT: slli a6, a0, 11
-; LP64E-FPELIM-NEXT: lui a0, %hi(.LCPI11_1)
-; LP64E-FPELIM-NEXT: ld a2, %lo(.LCPI11_1)(a0)
-; LP64E-FPELIM-NEXT: lui a0, %hi(.LCPI11_2)
-; LP64E-FPELIM-NEXT: ld a3, %lo(.LCPI11_2)(a0)
-; LP64E-FPELIM-NEXT: li a0, 1
-; LP64E-FPELIM-NEXT: li a1, 11
-; LP64E-FPELIM-NEXT: li a4, 12
-; LP64E-FPELIM-NEXT: li a5, 13
-; LP64E-FPELIM-NEXT: sd a6, 0(sp)
-; LP64E-FPELIM-NEXT: call va5_aligned_stack_callee
-; LP64E-FPELIM-NEXT: ld ra, 48(sp) # 8-byte Folded Reload
-; LP64E-FPELIM-NEXT: addi sp, sp, 56
-; LP64E-FPELIM-NEXT: ret
-;
-; LP64E-WITHFP-LABEL: va5_aligned_stack_caller:
-; LP64E-WITHFP: # %bb.0:
-; LP64E-WITHFP-NEXT: addi sp, sp, -64
-; LP64E-WITHFP-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: addi s0, sp, 64
-; LP64E-WITHFP-NEXT: li a0, 17
-; LP64E-WITHFP-NEXT: sd a0, 40(sp)
-; LP64E-WITHFP-NEXT: li a0, 16
-; LP64E-WITHFP-NEXT: lui a1, %hi(.LCPI11_0)
-; LP64E-WITHFP-NEXT: ld a1, %lo(.LCPI11_0)(a1)
-; LP64E-WITHFP-NEXT: sd a0, 32(sp)
-; LP64E-WITHFP-NEXT: li a0, 15
-; LP64E-WITHFP-NEXT: sd a0, 24(sp)
-; LP64E-WITHFP-NEXT: sd a1, 16(sp)
-; LP64E-WITHFP-NEXT: li a0, 14
-; LP64E-WITHFP-NEXT: sd a0, 8(sp)
-; LP64E-WITHFP-NEXT: lui a0, 2384
-; LP64E-WITHFP-NEXT: addiw a0, a0, 761
-; LP64E-WITHFP-NEXT: slli a6, a0, 11
-; LP64E-WITHFP-NEXT: lui a0, %hi(.LCPI11_1)
-; LP64E-WITHFP-NEXT: ld a2, %lo(.LCPI11_1)(a0)
-; LP64E-WITHFP-NEXT: lui a0, %hi(.LCPI11_2)
-; LP64E-WITHFP-NEXT: ld a3, %lo(.LCPI11_2)(a0)
-; LP64E-WITHFP-NEXT: li a0, 1
-; LP64E-WITHFP-NEXT: li a1, 11
-; LP64E-WITHFP-NEXT: li a4, 12
-; LP64E-WITHFP-NEXT: li a5, 13
-; LP64E-WITHFP-NEXT: sd a6, 0(sp)
-; LP64E-WITHFP-NEXT: call va5_aligned_stack_callee
-; LP64E-WITHFP-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: addi sp, sp, 64
-; LP64E-WITHFP-NEXT: ret
- %1 = call i32 (i32, ...) @va5_aligned_stack_callee(i32 1, i32 11,
- fp128 0xLEB851EB851EB851F400091EB851EB851, i32 12, i32 13, i64 20000000000,
- i32 14, double 2.720000e+00, i32 15, [2 x i32] [i32 16, i32 17])
- ret void
-}
-
-; A function with no fixed arguments is not valid C, but can be
-; specified in LLVM IR. We must ensure the vararg save area is
-; still set up correctly.
-
-define i32 @va6_no_fixed_args(...) nounwind {
-; LP64-LP64F-LP64D-FPELIM-LABEL: va6_no_fixed_args:
-; LP64-LP64F-LP64D-FPELIM: # %bb.0:
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -80
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 72(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 64(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 56(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 48(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 40(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 32(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 24(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 16(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, sp, 24
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80
-; LP64-LP64F-LP64D-FPELIM-NEXT: ret
-;
-; LP64-LP64F-LP64D-WITHFP-LABEL: va6_no_fixed_args:
-; LP64-LP64F-LP64D-WITHFP: # %bb.0:
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, -96
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 32
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a7, 56(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a6, 48(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a5, 40(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a4, 32(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, 24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, 16(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, 8(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, 0(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, s0, 8
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, -24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96
-; LP64-LP64F-LP64D-WITHFP-NEXT: ret
-;
-; LP64E-FPELIM-LABEL: va6_no_fixed_args:
-; LP64E-FPELIM: # %bb.0:
-; LP64E-FPELIM-NEXT: addi sp, sp, -56
-; LP64E-FPELIM-NEXT: sd a5, 48(sp)
-; LP64E-FPELIM-NEXT: sd a4, 40(sp)
-; LP64E-FPELIM-NEXT: sd a3, 32(sp)
-; LP64E-FPELIM-NEXT: sd a2, 24(sp)
-; LP64E-FPELIM-NEXT: sd a1, 16(sp)
-; LP64E-FPELIM-NEXT: sd a0, 8(sp)
-; LP64E-FPELIM-NEXT: addi a1, sp, 16
-; LP64E-FPELIM-NEXT: sd a1, 0(sp)
-; LP64E-FPELIM-NEXT: addi sp, sp, 56
-; LP64E-FPELIM-NEXT: ret
-;
-; LP64E-WITHFP-LABEL: va6_no_fixed_args:
-; LP64E-WITHFP: # %bb.0:
-; LP64E-WITHFP-NEXT: addi sp, sp, -72
-; LP64E-WITHFP-NEXT: sd ra, 16(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: addi s0, sp, 24
-; LP64E-WITHFP-NEXT: sd a5, 40(s0)
-; LP64E-WITHFP-NEXT: sd a4, 32(s0)
-; LP64E-WITHFP-NEXT: sd a3, 24(s0)
-; LP64E-WITHFP-NEXT: sd a2, 16(s0)
-; LP64E-WITHFP-NEXT: sd a1, 8(s0)
-; LP64E-WITHFP-NEXT: sd a0, 0(s0)
-; LP64E-WITHFP-NEXT: addi a1, s0, 8
-; LP64E-WITHFP-NEXT: sd a1, -24(s0)
-; LP64E-WITHFP-NEXT: ld ra, 16(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: addi sp, sp, 72
-; LP64E-WITHFP-NEXT: ret
- %va = alloca ptr
- call void @llvm.va_start(ptr %va)
- %1 = va_arg ptr %va, i32
- call void @llvm.va_end(ptr %va)
- ret i32 %1
-}
-
-; TODO: improve constant materialization of stack addresses
-
-define i32 @va_large_stack(ptr %fmt, ...) {
-; LP64-LP64F-LP64D-FPELIM-LABEL: va_large_stack:
-; LP64-LP64F-LP64D-FPELIM: # %bb.0:
-; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, 24414
-; LP64-LP64F-LP64D-FPELIM-NEXT: addiw a0, a0, 336
-; LP64-LP64F-LP64D-FPELIM-NEXT: sub sp, sp, a0
-; LP64-LP64F-LP64D-FPELIM-NEXT: .cfi_def_cfa_offset 100000080
-; LP64-LP64F-LP64D-FPELIM-NEXT: mv a0, a1
-; LP64-LP64F-LP64D-FPELIM-NEXT: lui t0, 24414
-; LP64-LP64F-LP64D-FPELIM-NEXT: add t0, sp, t0
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 328(t0)
-; LP64-LP64F-LP64D-FPELIM-NEXT: lui a7, 24414
-; LP64-LP64F-LP64D-FPELIM-NEXT: add a7, sp, a7
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 320(a7)
-; LP64-LP64F-LP64D-FPELIM-NEXT: lui a6, 24414
-; LP64-LP64F-LP64D-FPELIM-NEXT: add a6, sp, a6
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 312(a6)
-; LP64-LP64F-LP64D-FPELIM-NEXT: lui a5, 24414
-; LP64-LP64F-LP64D-FPELIM-NEXT: add a5, sp, a5
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 304(a5)
-; LP64-LP64F-LP64D-FPELIM-NEXT: lui a4, 24414
-; LP64-LP64F-LP64D-FPELIM-NEXT: add a4, sp, a4
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 296(a4)
-; LP64-LP64F-LP64D-FPELIM-NEXT: lui a3, 24414
-; LP64-LP64F-LP64D-FPELIM-NEXT: add a3, sp, a3
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 288(a3)
-; LP64-LP64F-LP64D-FPELIM-NEXT: lui a2, 24414
-; LP64-LP64F-LP64D-FPELIM-NEXT: add a2, sp, a2
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 280(a2)
-; LP64-LP64F-LP64D-FPELIM-NEXT: lui a1, 24414
-; LP64-LP64F-LP64D-FPELIM-NEXT: addiw a1, a1, 284
-; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, sp, a1
-; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: lui a1, 24414
-; LP64-LP64F-LP64D-FPELIM-NEXT: addiw a1, a1, 336
-; LP64-LP64F-LP64D-FPELIM-NEXT: add sp, sp, a1
-; LP64-LP64F-LP64D-FPELIM-NEXT: ret
-;
-; LP64-LP64F-LP64D-WITHFP-LABEL: va_large_stack:
-; LP64-LP64F-LP64D-WITHFP: # %bb.0:
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, -2032
-; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_def_cfa_offset 2032
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 1960(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 1952(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_offset ra, -72
-; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_offset s0, -80
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 1968
-; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_def_cfa s0, 64
-; LP64-LP64F-LP64D-WITHFP-NEXT: lui a0, 24414
-; LP64-LP64F-LP64D-WITHFP-NEXT: addiw a0, a0, -1680
-; LP64-LP64F-LP64D-WITHFP-NEXT: sub sp, sp, a0
-; LP64-LP64F-LP64D-WITHFP-NEXT: mv a0, a1
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a7, 56(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a6, 48(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a5, 40(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a4, 32(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, 24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, 16(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, 8(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, s0, 12
-; LP64-LP64F-LP64D-WITHFP-NEXT: lui a2, 24414
-; LP64-LP64F-LP64D-WITHFP-NEXT: sub a2, s0, a2
-; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, -288(a2)
-; LP64-LP64F-LP64D-WITHFP-NEXT: lui a1, 24414
-; LP64-LP64F-LP64D-WITHFP-NEXT: addiw a1, a1, -1680
-; LP64-LP64F-LP64D-WITHFP-NEXT: add sp, sp, a1
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 1960(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 1952(sp) # 8-byte Folded Reload
-; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 2032
-; LP64-LP64F-LP64D-WITHFP-NEXT: ret
-;
-; LP64E-FPELIM-LABEL: va_large_stack:
-; LP64E-FPELIM: # %bb.0:
-; LP64E-FPELIM-NEXT: lui a0, 24414
-; LP64E-FPELIM-NEXT: addiw a0, a0, 320
-; LP64E-FPELIM-NEXT: sub sp, sp, a0
-; LP64E-FPELIM-NEXT: .cfi_def_cfa_offset 100000064
-; LP64E-FPELIM-NEXT: mv a0, a1
-; LP64E-FPELIM-NEXT: lui a6, 24414
-; LP64E-FPELIM-NEXT: add a6, sp, a6
-; LP64E-FPELIM-NEXT: sd a5, 312(a6)
-; LP64E-FPELIM-NEXT: lui a5, 24414
-; LP64E-FPELIM-NEXT: add a5, sp, a5
-; LP64E-FPELIM-NEXT: sd a4, 304(a5)
-; LP64E-FPELIM-NEXT: lui a4, 24414
-; LP64E-FPELIM-NEXT: add a4, sp, a4
-; LP64E-FPELIM-NEXT: sd a3, 296(a4)
-; LP64E-FPELIM-NEXT: lui a3, 24414
-; LP64E-FPELIM-NEXT: add a3, sp, a3
-; LP64E-FPELIM-NEXT: sd a2, 288(a3)
-; LP64E-FPELIM-NEXT: lui a2, 24414
-; LP64E-FPELIM-NEXT: add a2, sp, a2
-; LP64E-FPELIM-NEXT: sd a1, 280(a2)
-; LP64E-FPELIM-NEXT: lui a1, 24414
-; LP64E-FPELIM-NEXT: addiw a1, a1, 284
-; LP64E-FPELIM-NEXT: add a1, sp, a1
-; LP64E-FPELIM-NEXT: sd a1, 8(sp)
-; LP64E-FPELIM-NEXT: lui a1, 24414
-; LP64E-FPELIM-NEXT: addiw a1, a1, 320
-; LP64E-FPELIM-NEXT: add sp, sp, a1
-; LP64E-FPELIM-NEXT: ret
-;
-; LP64E-WITHFP-LABEL: va_large_stack:
-; LP64E-WITHFP: # %bb.0:
-; LP64E-WITHFP-NEXT: addi sp, sp, -2040
-; LP64E-WITHFP-NEXT: .cfi_def_cfa_offset 2040
-; LP64E-WITHFP-NEXT: sd ra, 1984(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: sd s0, 1976(sp) # 8-byte Folded Spill
-; LP64E-WITHFP-NEXT: .cfi_offset ra, -56
-; LP64E-WITHFP-NEXT: .cfi_offset s0, -64
-; LP64E-WITHFP-NEXT: addi s0, sp, 1992
-; LP64E-WITHFP-NEXT: .cfi_def_cfa s0, 48
-; LP64E-WITHFP-NEXT: lui a0, 24414
-; LP64E-WITHFP-NEXT: addiw a0, a0, -1704
-; LP64E-WITHFP-NEXT: sub sp, sp, a0
-; LP64E-WITHFP-NEXT: mv a0, a1
-; LP64E-WITHFP-NEXT: sd a5, 40(s0)
-; LP64E-WITHFP-NEXT: sd a4, 32(s0)
-; LP64E-WITHFP-NEXT: sd a3, 24(s0)
-; LP64E-WITHFP-NEXT: sd a2, 16(s0)
-; LP64E-WITHFP-NEXT: sd a1, 8(s0)
-; LP64E-WITHFP-NEXT: addi a1, s0, 12
-; LP64E-WITHFP-NEXT: lui a2, 24414
-; LP64E-WITHFP-NEXT: sub a2, s0, a2
-; LP64E-WITHFP-NEXT: sd a1, -288(a2)
-; LP64E-WITHFP-NEXT: lui a1, 24414
-; LP64E-WITHFP-NEXT: addiw a1, a1, -1704
-; LP64E-WITHFP-NEXT: add sp, sp, a1
-; LP64E-WITHFP-NEXT: ld ra, 1984(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: ld s0, 1976(sp) # 8-byte Folded Reload
-; LP64E-WITHFP-NEXT: addi sp, sp, 2040
-; LP64E-WITHFP-NEXT: ret
- %large = alloca [ 100000000 x i8 ]
- %va = alloca ptr
- call void @llvm.va_start(ptr %va)
- %argp.cur = load ptr, ptr %va, align 4
- %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
- store ptr %argp.next, ptr %va, align 4
- %1 = load i32, ptr %argp.cur, align 4
- call void @llvm.va_end(ptr %va)
- ret i32 %1
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/xaluo.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/xaluo.ll
deleted file mode 100644
index 1c794a1..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/xaluo.ll
+++ /dev/null
@@ -1,2609 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m -verify-machineinstrs \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefix=RV64
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zba -verify-machineinstrs \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefix=RV64ZBA
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zicond -verify-machineinstrs \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefix=RV64ZICOND
-
-;
-; Get the actual value of the overflow bit.
-;
-define zeroext i1 @saddo1.i32(i32 signext %v1, i32 signext %v2, ptr %res) {
-; RV64-LABEL: saddo1.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addw a3, a0, a1
-; RV64-NEXT: add a1, a0, a1
-; RV64-NEXT: xor a3, a1, a3
-; RV64-NEXT: snez a0, a3
-; RV64-NEXT: sw a1, 0(a2)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: saddo1.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addw a3, a0, a1
-; RV64ZBA-NEXT: add a1, a0, a1
-; RV64ZBA-NEXT: xor a3, a1, a3
-; RV64ZBA-NEXT: snez a0, a3
-; RV64ZBA-NEXT: sw a1, 0(a2)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: saddo1.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addw a3, a0, a1
-; RV64ZICOND-NEXT: add a1, a0, a1
-; RV64ZICOND-NEXT: xor a3, a1, a3
-; RV64ZICOND-NEXT: snez a0, a3
-; RV64ZICOND-NEXT: sw a1, 0(a2)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- store i32 %val, ptr %res
- ret i1 %obit
-}
-
-; Test the immediate version.
-define zeroext i1 @saddo2.i32(i32 signext %v1, ptr %res) {
-; RV64-LABEL: saddo2.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addiw a2, a0, 4
-; RV64-NEXT: slt a0, a2, a0
-; RV64-NEXT: sw a2, 0(a1)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: saddo2.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addiw a2, a0, 4
-; RV64ZBA-NEXT: slt a0, a2, a0
-; RV64ZBA-NEXT: sw a2, 0(a1)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: saddo2.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addiw a2, a0, 4
-; RV64ZICOND-NEXT: slt a0, a2, a0
-; RV64ZICOND-NEXT: sw a2, 0(a1)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 4)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- store i32 %val, ptr %res
- ret i1 %obit
-}
-
-; Test negative immediates.
-define zeroext i1 @saddo3.i32(i32 signext %v1, ptr %res) {
-; RV64-LABEL: saddo3.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addiw a2, a0, -4
-; RV64-NEXT: slt a0, a2, a0
-; RV64-NEXT: xori a0, a0, 1
-; RV64-NEXT: sw a2, 0(a1)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: saddo3.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addiw a2, a0, -4
-; RV64ZBA-NEXT: slt a0, a2, a0
-; RV64ZBA-NEXT: xori a0, a0, 1
-; RV64ZBA-NEXT: sw a2, 0(a1)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: saddo3.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addiw a2, a0, -4
-; RV64ZICOND-NEXT: slt a0, a2, a0
-; RV64ZICOND-NEXT: xori a0, a0, 1
-; RV64ZICOND-NEXT: sw a2, 0(a1)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 -4)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- store i32 %val, ptr %res
- ret i1 %obit
-}
-
-; Test immediates that are too large to be encoded.
-define zeroext i1 @saddo4.i32(i32 signext %v1, ptr %res) {
-; RV64-LABEL: saddo4.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: lui a2, 4096
-; RV64-NEXT: addi a2, a2, -1
-; RV64-NEXT: addw a2, a0, a2
-; RV64-NEXT: slt a0, a2, a0
-; RV64-NEXT: sw a2, 0(a1)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: saddo4.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: lui a2, 4096
-; RV64ZBA-NEXT: addi a2, a2, -1
-; RV64ZBA-NEXT: addw a2, a0, a2
-; RV64ZBA-NEXT: slt a0, a2, a0
-; RV64ZBA-NEXT: sw a2, 0(a1)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: saddo4.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: lui a2, 4096
-; RV64ZICOND-NEXT: addi a2, a2, -1
-; RV64ZICOND-NEXT: addw a2, a0, a2
-; RV64ZICOND-NEXT: slt a0, a2, a0
-; RV64ZICOND-NEXT: sw a2, 0(a1)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 16777215)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- store i32 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @saddo1.i64(i64 %v1, i64 %v2, ptr %res) {
-; RV64-LABEL: saddo1.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: add a3, a0, a1
-; RV64-NEXT: slt a0, a3, a0
-; RV64-NEXT: slti a1, a1, 0
-; RV64-NEXT: xor a0, a1, a0
-; RV64-NEXT: sd a3, 0(a2)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: saddo1.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: add a3, a0, a1
-; RV64ZBA-NEXT: slt a0, a3, a0
-; RV64ZBA-NEXT: slti a1, a1, 0
-; RV64ZBA-NEXT: xor a0, a1, a0
-; RV64ZBA-NEXT: sd a3, 0(a2)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: saddo1.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: add a3, a0, a1
-; RV64ZICOND-NEXT: slt a0, a3, a0
-; RV64ZICOND-NEXT: slti a1, a1, 0
-; RV64ZICOND-NEXT: xor a0, a1, a0
-; RV64ZICOND-NEXT: sd a3, 0(a2)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- store i64 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @saddo2.i64(i64 %v1, ptr %res) {
-; RV64-LABEL: saddo2.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addi a2, a0, 4
-; RV64-NEXT: slt a0, a2, a0
-; RV64-NEXT: sd a2, 0(a1)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: saddo2.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addi a2, a0, 4
-; RV64ZBA-NEXT: slt a0, a2, a0
-; RV64ZBA-NEXT: sd a2, 0(a1)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: saddo2.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addi a2, a0, 4
-; RV64ZICOND-NEXT: slt a0, a2, a0
-; RV64ZICOND-NEXT: sd a2, 0(a1)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 4)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- store i64 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @saddo3.i64(i64 %v1, ptr %res) {
-; RV64-LABEL: saddo3.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addi a2, a0, -4
-; RV64-NEXT: slt a0, a2, a0
-; RV64-NEXT: xori a0, a0, 1
-; RV64-NEXT: sd a2, 0(a1)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: saddo3.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addi a2, a0, -4
-; RV64ZBA-NEXT: slt a0, a2, a0
-; RV64ZBA-NEXT: xori a0, a0, 1
-; RV64ZBA-NEXT: sd a2, 0(a1)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: saddo3.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addi a2, a0, -4
-; RV64ZICOND-NEXT: slt a0, a2, a0
-; RV64ZICOND-NEXT: xori a0, a0, 1
-; RV64ZICOND-NEXT: sd a2, 0(a1)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -4)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- store i64 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @uaddo.i32(i32 signext %v1, i32 signext %v2, ptr %res) {
-; RV64-LABEL: uaddo.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addw a1, a0, a1
-; RV64-NEXT: sltu a0, a1, a0
-; RV64-NEXT: sw a1, 0(a2)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: uaddo.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addw a1, a0, a1
-; RV64ZBA-NEXT: sltu a0, a1, a0
-; RV64ZBA-NEXT: sw a1, 0(a2)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: uaddo.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addw a1, a0, a1
-; RV64ZICOND-NEXT: sltu a0, a1, a0
-; RV64ZICOND-NEXT: sw a1, 0(a2)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- store i32 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @uaddo.i32.constant(i32 signext %v1, ptr %res) {
-; RV64-LABEL: uaddo.i32.constant:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addiw a2, a0, -2
-; RV64-NEXT: sltu a0, a2, a0
-; RV64-NEXT: sw a2, 0(a1)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: uaddo.i32.constant:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addiw a2, a0, -2
-; RV64ZBA-NEXT: sltu a0, a2, a0
-; RV64ZBA-NEXT: sw a2, 0(a1)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: uaddo.i32.constant:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addiw a2, a0, -2
-; RV64ZICOND-NEXT: sltu a0, a2, a0
-; RV64ZICOND-NEXT: sw a2, 0(a1)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 -2)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- store i32 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @uaddo.i32.constant_one(i32 signext %v1, ptr %res) {
-; RV64-LABEL: uaddo.i32.constant_one:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addiw a2, a0, 1
-; RV64-NEXT: seqz a0, a2
-; RV64-NEXT: sw a2, 0(a1)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: uaddo.i32.constant_one:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addiw a2, a0, 1
-; RV64ZBA-NEXT: seqz a0, a2
-; RV64ZBA-NEXT: sw a2, 0(a1)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: uaddo.i32.constant_one:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addiw a2, a0, 1
-; RV64ZICOND-NEXT: seqz a0, a2
-; RV64ZICOND-NEXT: sw a2, 0(a1)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 1)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- store i32 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @uaddo.i64(i64 %v1, i64 %v2, ptr %res) {
-; RV64-LABEL: uaddo.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: add a1, a0, a1
-; RV64-NEXT: sltu a0, a1, a0
-; RV64-NEXT: sd a1, 0(a2)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: uaddo.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: add a1, a0, a1
-; RV64ZBA-NEXT: sltu a0, a1, a0
-; RV64ZBA-NEXT: sd a1, 0(a2)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: uaddo.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: add a1, a0, a1
-; RV64ZICOND-NEXT: sltu a0, a1, a0
-; RV64ZICOND-NEXT: sd a1, 0(a2)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- store i64 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @uaddo.i64.constant_one(i64 %v1, ptr %res) {
-; RV64-LABEL: uaddo.i64.constant_one:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addi a2, a0, 1
-; RV64-NEXT: seqz a0, a2
-; RV64-NEXT: sd a2, 0(a1)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: uaddo.i64.constant_one:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addi a2, a0, 1
-; RV64ZBA-NEXT: seqz a0, a2
-; RV64ZBA-NEXT: sd a2, 0(a1)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: uaddo.i64.constant_one:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addi a2, a0, 1
-; RV64ZICOND-NEXT: seqz a0, a2
-; RV64ZICOND-NEXT: sd a2, 0(a1)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 1)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- store i64 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @ssubo1.i32(i32 signext %v1, i32 signext %v2, ptr %res) {
-; RV64-LABEL: ssubo1.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: subw a3, a0, a1
-; RV64-NEXT: sub a1, a0, a1
-; RV64-NEXT: xor a3, a1, a3
-; RV64-NEXT: snez a0, a3
-; RV64-NEXT: sw a1, 0(a2)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: ssubo1.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: subw a3, a0, a1
-; RV64ZBA-NEXT: sub a1, a0, a1
-; RV64ZBA-NEXT: xor a3, a1, a3
-; RV64ZBA-NEXT: snez a0, a3
-; RV64ZBA-NEXT: sw a1, 0(a2)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: ssubo1.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: subw a3, a0, a1
-; RV64ZICOND-NEXT: sub a1, a0, a1
-; RV64ZICOND-NEXT: xor a3, a1, a3
-; RV64ZICOND-NEXT: snez a0, a3
-; RV64ZICOND-NEXT: sw a1, 0(a2)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- store i32 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @ssubo2.i32(i32 signext %v1, ptr %res) {
-; RV64-LABEL: ssubo2.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addiw a2, a0, 4
-; RV64-NEXT: slt a0, a2, a0
-; RV64-NEXT: sw a2, 0(a1)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: ssubo2.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addiw a2, a0, 4
-; RV64ZBA-NEXT: slt a0, a2, a0
-; RV64ZBA-NEXT: sw a2, 0(a1)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: ssubo2.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addiw a2, a0, 4
-; RV64ZICOND-NEXT: slt a0, a2, a0
-; RV64ZICOND-NEXT: sw a2, 0(a1)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 -4)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- store i32 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @ssubo.i64(i64 %v1, i64 %v2, ptr %res) {
-; RV64-LABEL: ssubo.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: sgtz a3, a1
-; RV64-NEXT: sub a1, a0, a1
-; RV64-NEXT: slt a0, a1, a0
-; RV64-NEXT: xor a0, a3, a0
-; RV64-NEXT: sd a1, 0(a2)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: ssubo.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: sgtz a3, a1
-; RV64ZBA-NEXT: sub a1, a0, a1
-; RV64ZBA-NEXT: slt a0, a1, a0
-; RV64ZBA-NEXT: xor a0, a3, a0
-; RV64ZBA-NEXT: sd a1, 0(a2)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: ssubo.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: sgtz a3, a1
-; RV64ZICOND-NEXT: sub a1, a0, a1
-; RV64ZICOND-NEXT: slt a0, a1, a0
-; RV64ZICOND-NEXT: xor a0, a3, a0
-; RV64ZICOND-NEXT: sd a1, 0(a2)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- store i64 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @usubo.i32(i32 signext %v1, i32 signext %v2, ptr %res) {
-; RV64-LABEL: usubo.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: subw a1, a0, a1
-; RV64-NEXT: sltu a0, a0, a1
-; RV64-NEXT: sw a1, 0(a2)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: usubo.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: subw a1, a0, a1
-; RV64ZBA-NEXT: sltu a0, a0, a1
-; RV64ZBA-NEXT: sw a1, 0(a2)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: usubo.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: subw a1, a0, a1
-; RV64ZICOND-NEXT: sltu a0, a0, a1
-; RV64ZICOND-NEXT: sw a1, 0(a2)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- store i32 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @usubo.i32.constant.rhs(i32 signext %v1, ptr %res) {
-; RV64-LABEL: usubo.i32.constant.rhs:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addiw a2, a0, 2
-; RV64-NEXT: sltu a0, a0, a2
-; RV64-NEXT: sw a2, 0(a1)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: usubo.i32.constant.rhs:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addiw a2, a0, 2
-; RV64ZBA-NEXT: sltu a0, a0, a2
-; RV64ZBA-NEXT: sw a2, 0(a1)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: usubo.i32.constant.rhs:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addiw a2, a0, 2
-; RV64ZICOND-NEXT: sltu a0, a0, a2
-; RV64ZICOND-NEXT: sw a2, 0(a1)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 -2)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- store i32 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @usubo.i32.constant.lhs(i32 signext %v1, ptr %res) {
-; RV64-LABEL: usubo.i32.constant.lhs:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: li a2, -2
-; RV64-NEXT: subw a2, a2, a0
-; RV64-NEXT: addi a0, a2, 1
-; RV64-NEXT: seqz a0, a0
-; RV64-NEXT: sw a2, 0(a1)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: usubo.i32.constant.lhs:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: li a2, -2
-; RV64ZBA-NEXT: subw a2, a2, a0
-; RV64ZBA-NEXT: addi a0, a2, 1
-; RV64ZBA-NEXT: seqz a0, a0
-; RV64ZBA-NEXT: sw a2, 0(a1)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: usubo.i32.constant.lhs:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: li a2, -2
-; RV64ZICOND-NEXT: subw a2, a2, a0
-; RV64ZICOND-NEXT: addi a0, a2, 1
-; RV64ZICOND-NEXT: seqz a0, a0
-; RV64ZICOND-NEXT: sw a2, 0(a1)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 -2, i32 %v1)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- store i32 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @usubo.i64(i64 %v1, i64 %v2, ptr %res) {
-; RV64-LABEL: usubo.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: sub a1, a0, a1
-; RV64-NEXT: sltu a0, a0, a1
-; RV64-NEXT: sd a1, 0(a2)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: usubo.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: sub a1, a0, a1
-; RV64ZBA-NEXT: sltu a0, a0, a1
-; RV64ZBA-NEXT: sd a1, 0(a2)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: usubo.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: sub a1, a0, a1
-; RV64ZICOND-NEXT: sltu a0, a0, a1
-; RV64ZICOND-NEXT: sd a1, 0(a2)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- store i64 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @smulo.i32(i32 signext %v1, i32 signext %v2, ptr %res) {
-; RV64-LABEL: smulo.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: mulw a3, a0, a1
-; RV64-NEXT: mul a1, a0, a1
-; RV64-NEXT: xor a3, a1, a3
-; RV64-NEXT: snez a0, a3
-; RV64-NEXT: sw a1, 0(a2)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: smulo.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: mulw a3, a0, a1
-; RV64ZBA-NEXT: mul a1, a0, a1
-; RV64ZBA-NEXT: xor a3, a1, a3
-; RV64ZBA-NEXT: snez a0, a3
-; RV64ZBA-NEXT: sw a1, 0(a2)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: smulo.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: mulw a3, a0, a1
-; RV64ZICOND-NEXT: mul a1, a0, a1
-; RV64ZICOND-NEXT: xor a3, a1, a3
-; RV64ZICOND-NEXT: snez a0, a3
-; RV64ZICOND-NEXT: sw a1, 0(a2)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- store i32 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @smulo2.i32(i32 signext %v1, ptr %res) {
-; RV64-LABEL: smulo2.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: li a2, 13
-; RV64-NEXT: mulw a3, a0, a2
-; RV64-NEXT: mul a2, a0, a2
-; RV64-NEXT: xor a3, a2, a3
-; RV64-NEXT: snez a0, a3
-; RV64-NEXT: sw a2, 0(a1)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: smulo2.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: sh1add a2, a0, a0
-; RV64ZBA-NEXT: sh2add a2, a2, a0
-; RV64ZBA-NEXT: sext.w a0, a2
-; RV64ZBA-NEXT: xor a0, a2, a0
-; RV64ZBA-NEXT: snez a0, a0
-; RV64ZBA-NEXT: sw a2, 0(a1)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: smulo2.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: li a2, 13
-; RV64ZICOND-NEXT: mulw a3, a0, a2
-; RV64ZICOND-NEXT: mul a2, a0, a2
-; RV64ZICOND-NEXT: xor a3, a2, a3
-; RV64ZICOND-NEXT: snez a0, a3
-; RV64ZICOND-NEXT: sw a2, 0(a1)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 13)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- store i32 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @smulo.i64(i64 %v1, i64 %v2, ptr %res) {
-; RV64-LABEL: smulo.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: mulh a3, a0, a1
-; RV64-NEXT: mul a1, a0, a1
-; RV64-NEXT: srai a0, a1, 63
-; RV64-NEXT: xor a0, a3, a0
-; RV64-NEXT: snez a0, a0
-; RV64-NEXT: sd a1, 0(a2)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: smulo.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: mulh a3, a0, a1
-; RV64ZBA-NEXT: mul a1, a0, a1
-; RV64ZBA-NEXT: srai a0, a1, 63
-; RV64ZBA-NEXT: xor a0, a3, a0
-; RV64ZBA-NEXT: snez a0, a0
-; RV64ZBA-NEXT: sd a1, 0(a2)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: smulo.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: mulh a3, a0, a1
-; RV64ZICOND-NEXT: mul a1, a0, a1
-; RV64ZICOND-NEXT: srai a0, a1, 63
-; RV64ZICOND-NEXT: xor a0, a3, a0
-; RV64ZICOND-NEXT: snez a0, a0
-; RV64ZICOND-NEXT: sd a1, 0(a2)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- store i64 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @smulo2.i64(i64 %v1, ptr %res) {
-; RV64-LABEL: smulo2.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: li a2, 13
-; RV64-NEXT: mulh a3, a0, a2
-; RV64-NEXT: mul a2, a0, a2
-; RV64-NEXT: srai a0, a2, 63
-; RV64-NEXT: xor a0, a3, a0
-; RV64-NEXT: snez a0, a0
-; RV64-NEXT: sd a2, 0(a1)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: smulo2.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: li a2, 13
-; RV64ZBA-NEXT: mulh a2, a0, a2
-; RV64ZBA-NEXT: sh1add a3, a0, a0
-; RV64ZBA-NEXT: sh2add a3, a3, a0
-; RV64ZBA-NEXT: srai a0, a3, 63
-; RV64ZBA-NEXT: xor a0, a2, a0
-; RV64ZBA-NEXT: snez a0, a0
-; RV64ZBA-NEXT: sd a3, 0(a1)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: smulo2.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: li a2, 13
-; RV64ZICOND-NEXT: mulh a3, a0, a2
-; RV64ZICOND-NEXT: mul a2, a0, a2
-; RV64ZICOND-NEXT: srai a0, a2, 63
-; RV64ZICOND-NEXT: xor a0, a3, a0
-; RV64ZICOND-NEXT: snez a0, a0
-; RV64ZICOND-NEXT: sd a2, 0(a1)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 13)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- store i64 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @umulo.i32(i32 signext %v1, i32 signext %v2, ptr %res) {
-; RV64-LABEL: umulo.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: slli a1, a1, 32
-; RV64-NEXT: slli a0, a0, 32
-; RV64-NEXT: mulhu a1, a0, a1
-; RV64-NEXT: srai a0, a1, 32
-; RV64-NEXT: snez a0, a0
-; RV64-NEXT: sw a1, 0(a2)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: umulo.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: zext.w a1, a1
-; RV64ZBA-NEXT: zext.w a0, a0
-; RV64ZBA-NEXT: mul a1, a0, a1
-; RV64ZBA-NEXT: srai a0, a1, 32
-; RV64ZBA-NEXT: snez a0, a0
-; RV64ZBA-NEXT: sw a1, 0(a2)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: umulo.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: slli a1, a1, 32
-; RV64ZICOND-NEXT: slli a0, a0, 32
-; RV64ZICOND-NEXT: mulhu a1, a0, a1
-; RV64ZICOND-NEXT: srai a0, a1, 32
-; RV64ZICOND-NEXT: snez a0, a0
-; RV64ZICOND-NEXT: sw a1, 0(a2)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- store i32 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @umulo2.i32(i32 signext %v1, ptr %res) {
-; RV64-LABEL: umulo2.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: li a2, 13
-; RV64-NEXT: slli a2, a2, 32
-; RV64-NEXT: slli a0, a0, 32
-; RV64-NEXT: mulhu a2, a0, a2
-; RV64-NEXT: srli a0, a2, 32
-; RV64-NEXT: snez a0, a0
-; RV64-NEXT: sw a2, 0(a1)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: umulo2.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: zext.w a2, a0
-; RV64ZBA-NEXT: sh1add.uw a0, a0, a2
-; RV64ZBA-NEXT: sh2add a2, a0, a2
-; RV64ZBA-NEXT: srli a0, a2, 32
-; RV64ZBA-NEXT: snez a0, a0
-; RV64ZBA-NEXT: sw a2, 0(a1)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: umulo2.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: li a2, 13
-; RV64ZICOND-NEXT: slli a2, a2, 32
-; RV64ZICOND-NEXT: slli a0, a0, 32
-; RV64ZICOND-NEXT: mulhu a2, a0, a2
-; RV64ZICOND-NEXT: srli a0, a2, 32
-; RV64ZICOND-NEXT: snez a0, a0
-; RV64ZICOND-NEXT: sw a2, 0(a1)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 13)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- store i32 %val, ptr %res
- ret i1 %obit
-}
-
-; Similar to umulo.i32, but storing the overflow and returning the result.
-define signext i32 @umulo3.i32(i32 signext %0, i32 signext %1, ptr %2) {
-; RV64-LABEL: umulo3.i32:
-; RV64: # %bb.0:
-; RV64-NEXT: slli a1, a1, 32
-; RV64-NEXT: slli a0, a0, 32
-; RV64-NEXT: mulhu a0, a0, a1
-; RV64-NEXT: srai a1, a0, 32
-; RV64-NEXT: snez a1, a1
-; RV64-NEXT: sext.w a0, a0
-; RV64-NEXT: sw a1, 0(a2)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: umulo3.i32:
-; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: zext.w a1, a1
-; RV64ZBA-NEXT: zext.w a0, a0
-; RV64ZBA-NEXT: mul a3, a0, a1
-; RV64ZBA-NEXT: srai a3, a3, 32
-; RV64ZBA-NEXT: snez a3, a3
-; RV64ZBA-NEXT: mulw a0, a0, a1
-; RV64ZBA-NEXT: sw a3, 0(a2)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: umulo3.i32:
-; RV64ZICOND: # %bb.0:
-; RV64ZICOND-NEXT: slli a1, a1, 32
-; RV64ZICOND-NEXT: slli a0, a0, 32
-; RV64ZICOND-NEXT: mulhu a0, a0, a1
-; RV64ZICOND-NEXT: srai a1, a0, 32
-; RV64ZICOND-NEXT: snez a1, a1
-; RV64ZICOND-NEXT: sext.w a0, a0
-; RV64ZICOND-NEXT: sw a1, 0(a2)
-; RV64ZICOND-NEXT: ret
- %4 = tail call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %0, i32 %1)
- %5 = extractvalue { i32, i1 } %4, 1
- %6 = extractvalue { i32, i1 } %4, 0
- %7 = zext i1 %5 to i32
- store i32 %7, ptr %2, align 4
- ret i32 %6
-}
-
-define zeroext i1 @umulo.i64(i64 %v1, i64 %v2, ptr %res) {
-; RV64-LABEL: umulo.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: mulhu a3, a0, a1
-; RV64-NEXT: snez a3, a3
-; RV64-NEXT: mul a0, a0, a1
-; RV64-NEXT: sd a0, 0(a2)
-; RV64-NEXT: mv a0, a3
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: umulo.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: mulhu a3, a0, a1
-; RV64ZBA-NEXT: snez a3, a3
-; RV64ZBA-NEXT: mul a0, a0, a1
-; RV64ZBA-NEXT: sd a0, 0(a2)
-; RV64ZBA-NEXT: mv a0, a3
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: umulo.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: mulhu a3, a0, a1
-; RV64ZICOND-NEXT: snez a3, a3
-; RV64ZICOND-NEXT: mul a0, a0, a1
-; RV64ZICOND-NEXT: sd a0, 0(a2)
-; RV64ZICOND-NEXT: mv a0, a3
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- store i64 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @umulo2.i64(i64 %v1, ptr %res) {
-; RV64-LABEL: umulo2.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: li a3, 13
-; RV64-NEXT: mulhu a2, a0, a3
-; RV64-NEXT: snez a2, a2
-; RV64-NEXT: mul a0, a0, a3
-; RV64-NEXT: sd a0, 0(a1)
-; RV64-NEXT: mv a0, a2
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: umulo2.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: li a2, 13
-; RV64ZBA-NEXT: mulhu a2, a0, a2
-; RV64ZBA-NEXT: snez a2, a2
-; RV64ZBA-NEXT: sh1add a3, a0, a0
-; RV64ZBA-NEXT: sh2add a0, a3, a0
-; RV64ZBA-NEXT: sd a0, 0(a1)
-; RV64ZBA-NEXT: mv a0, a2
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: umulo2.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: li a3, 13
-; RV64ZICOND-NEXT: mulhu a2, a0, a3
-; RV64ZICOND-NEXT: snez a2, a2
-; RV64ZICOND-NEXT: mul a0, a0, a3
-; RV64ZICOND-NEXT: sd a0, 0(a1)
-; RV64ZICOND-NEXT: mv a0, a2
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 13)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- store i64 %val, ptr %res
- ret i1 %obit
-}
-
-
-;
-; Check the use of the overflow bit in combination with a select instruction.
-;
-define i32 @saddo.select.i32(i32 signext %v1, i32 signext %v2) {
-; RV64-LABEL: saddo.select.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addw a2, a0, a1
-; RV64-NEXT: add a3, a0, a1
-; RV64-NEXT: bne a3, a2, .LBB28_2
-; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: mv a0, a1
-; RV64-NEXT: .LBB28_2: # %entry
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: saddo.select.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addw a2, a0, a1
-; RV64ZBA-NEXT: add a3, a0, a1
-; RV64ZBA-NEXT: bne a3, a2, .LBB28_2
-; RV64ZBA-NEXT: # %bb.1: # %entry
-; RV64ZBA-NEXT: mv a0, a1
-; RV64ZBA-NEXT: .LBB28_2: # %entry
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: saddo.select.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addw a2, a0, a1
-; RV64ZICOND-NEXT: add a3, a0, a1
-; RV64ZICOND-NEXT: xor a2, a3, a2
-; RV64ZICOND-NEXT: czero.nez a1, a1, a2
-; RV64ZICOND-NEXT: czero.eqz a0, a0, a2
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
- %obit = extractvalue {i32, i1} %t, 1
- %ret = select i1 %obit, i32 %v1, i32 %v2
- ret i32 %ret
-}
-
-define i1 @saddo.not.i32(i32 signext %v1, i32 signext %v2) {
-; RV64-LABEL: saddo.not.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addw a2, a0, a1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: xor a0, a0, a2
-; RV64-NEXT: seqz a0, a0
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: saddo.not.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addw a2, a0, a1
-; RV64ZBA-NEXT: add a0, a0, a1
-; RV64ZBA-NEXT: xor a0, a0, a2
-; RV64ZBA-NEXT: seqz a0, a0
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: saddo.not.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addw a2, a0, a1
-; RV64ZICOND-NEXT: add a0, a0, a1
-; RV64ZICOND-NEXT: xor a0, a0, a2
-; RV64ZICOND-NEXT: seqz a0, a0
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
- %obit = extractvalue {i32, i1} %t, 1
- %ret = xor i1 %obit, true
- ret i1 %ret
-}
-
-define i64 @saddo.select.i64(i64 %v1, i64 %v2) {
-; RV64-LABEL: saddo.select.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: add a2, a0, a1
-; RV64-NEXT: slt a2, a2, a0
-; RV64-NEXT: slti a3, a1, 0
-; RV64-NEXT: bne a3, a2, .LBB30_2
-; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: mv a0, a1
-; RV64-NEXT: .LBB30_2: # %entry
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: saddo.select.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: add a2, a0, a1
-; RV64ZBA-NEXT: slt a2, a2, a0
-; RV64ZBA-NEXT: slti a3, a1, 0
-; RV64ZBA-NEXT: bne a3, a2, .LBB30_2
-; RV64ZBA-NEXT: # %bb.1: # %entry
-; RV64ZBA-NEXT: mv a0, a1
-; RV64ZBA-NEXT: .LBB30_2: # %entry
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: saddo.select.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: add a2, a0, a1
-; RV64ZICOND-NEXT: slt a2, a2, a0
-; RV64ZICOND-NEXT: slti a3, a1, 0
-; RV64ZICOND-NEXT: xor a2, a3, a2
-; RV64ZICOND-NEXT: czero.nez a1, a1, a2
-; RV64ZICOND-NEXT: czero.eqz a0, a0, a2
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
- %obit = extractvalue {i64, i1} %t, 1
- %ret = select i1 %obit, i64 %v1, i64 %v2
- ret i64 %ret
-}
-
-define i1 @saddo.not.i64(i64 %v1, i64 %v2) {
-; RV64-LABEL: saddo.not.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: add a2, a0, a1
-; RV64-NEXT: slt a0, a2, a0
-; RV64-NEXT: slti a1, a1, 0
-; RV64-NEXT: xor a0, a1, a0
-; RV64-NEXT: xori a0, a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: saddo.not.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: add a2, a0, a1
-; RV64ZBA-NEXT: slt a0, a2, a0
-; RV64ZBA-NEXT: slti a1, a1, 0
-; RV64ZBA-NEXT: xor a0, a1, a0
-; RV64ZBA-NEXT: xori a0, a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: saddo.not.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: add a2, a0, a1
-; RV64ZICOND-NEXT: slt a0, a2, a0
-; RV64ZICOND-NEXT: slti a1, a1, 0
-; RV64ZICOND-NEXT: xor a0, a1, a0
-; RV64ZICOND-NEXT: xori a0, a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
- %obit = extractvalue {i64, i1} %t, 1
- %ret = xor i1 %obit, true
- ret i1 %ret
-}
-
-define i32 @uaddo.select.i32(i32 signext %v1, i32 signext %v2) {
-; RV64-LABEL: uaddo.select.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addw a2, a0, a1
-; RV64-NEXT: bltu a2, a0, .LBB32_2
-; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: mv a0, a1
-; RV64-NEXT: .LBB32_2: # %entry
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: uaddo.select.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addw a2, a0, a1
-; RV64ZBA-NEXT: bltu a2, a0, .LBB32_2
-; RV64ZBA-NEXT: # %bb.1: # %entry
-; RV64ZBA-NEXT: mv a0, a1
-; RV64ZBA-NEXT: .LBB32_2: # %entry
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: uaddo.select.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addw a2, a0, a1
-; RV64ZICOND-NEXT: sltu a2, a2, a0
-; RV64ZICOND-NEXT: czero.nez a1, a1, a2
-; RV64ZICOND-NEXT: czero.eqz a0, a0, a2
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
- %obit = extractvalue {i32, i1} %t, 1
- %ret = select i1 %obit, i32 %v1, i32 %v2
- ret i32 %ret
-}
-
-define i1 @uaddo.not.i32(i32 signext %v1, i32 signext %v2) {
-; RV64-LABEL: uaddo.not.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addw a1, a0, a1
-; RV64-NEXT: sltu a0, a1, a0
-; RV64-NEXT: xori a0, a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: uaddo.not.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addw a1, a0, a1
-; RV64ZBA-NEXT: sltu a0, a1, a0
-; RV64ZBA-NEXT: xori a0, a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: uaddo.not.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addw a1, a0, a1
-; RV64ZICOND-NEXT: sltu a0, a1, a0
-; RV64ZICOND-NEXT: xori a0, a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
- %obit = extractvalue {i32, i1} %t, 1
- %ret = xor i1 %obit, true
- ret i1 %ret
-}
-
-define i64 @uaddo.select.i64(i64 %v1, i64 %v2) {
-; RV64-LABEL: uaddo.select.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: add a2, a0, a1
-; RV64-NEXT: bltu a2, a0, .LBB34_2
-; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: mv a0, a1
-; RV64-NEXT: .LBB34_2: # %entry
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: uaddo.select.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: add a2, a0, a1
-; RV64ZBA-NEXT: bltu a2, a0, .LBB34_2
-; RV64ZBA-NEXT: # %bb.1: # %entry
-; RV64ZBA-NEXT: mv a0, a1
-; RV64ZBA-NEXT: .LBB34_2: # %entry
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: uaddo.select.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: add a2, a0, a1
-; RV64ZICOND-NEXT: sltu a2, a2, a0
-; RV64ZICOND-NEXT: czero.nez a1, a1, a2
-; RV64ZICOND-NEXT: czero.eqz a0, a0, a2
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
- %obit = extractvalue {i64, i1} %t, 1
- %ret = select i1 %obit, i64 %v1, i64 %v2
- ret i64 %ret
-}
-
-define i1 @uaddo.not.i64(i64 %v1, i64 %v2) {
-; RV64-LABEL: uaddo.not.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: add a1, a0, a1
-; RV64-NEXT: sltu a0, a1, a0
-; RV64-NEXT: xori a0, a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: uaddo.not.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: add a1, a0, a1
-; RV64ZBA-NEXT: sltu a0, a1, a0
-; RV64ZBA-NEXT: xori a0, a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: uaddo.not.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: add a1, a0, a1
-; RV64ZICOND-NEXT: sltu a0, a1, a0
-; RV64ZICOND-NEXT: xori a0, a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
- %obit = extractvalue {i64, i1} %t, 1
- %ret = xor i1 %obit, true
- ret i1 %ret
-}
-
-define i32 @ssubo.select.i32(i32 signext %v1, i32 signext %v2) {
-; RV64-LABEL: ssubo.select.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: subw a2, a0, a1
-; RV64-NEXT: sub a3, a0, a1
-; RV64-NEXT: bne a3, a2, .LBB36_2
-; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: mv a0, a1
-; RV64-NEXT: .LBB36_2: # %entry
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: ssubo.select.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: subw a2, a0, a1
-; RV64ZBA-NEXT: sub a3, a0, a1
-; RV64ZBA-NEXT: bne a3, a2, .LBB36_2
-; RV64ZBA-NEXT: # %bb.1: # %entry
-; RV64ZBA-NEXT: mv a0, a1
-; RV64ZBA-NEXT: .LBB36_2: # %entry
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: ssubo.select.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: subw a2, a0, a1
-; RV64ZICOND-NEXT: sub a3, a0, a1
-; RV64ZICOND-NEXT: xor a2, a3, a2
-; RV64ZICOND-NEXT: czero.nez a1, a1, a2
-; RV64ZICOND-NEXT: czero.eqz a0, a0, a2
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
- %obit = extractvalue {i32, i1} %t, 1
- %ret = select i1 %obit, i32 %v1, i32 %v2
- ret i32 %ret
-}
-
-define i1 @ssubo.not.i32(i32 signext %v1, i32 signext %v2) {
-; RV64-LABEL: ssubo.not.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: subw a2, a0, a1
-; RV64-NEXT: sub a0, a0, a1
-; RV64-NEXT: xor a0, a0, a2
-; RV64-NEXT: seqz a0, a0
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: ssubo.not.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: subw a2, a0, a1
-; RV64ZBA-NEXT: sub a0, a0, a1
-; RV64ZBA-NEXT: xor a0, a0, a2
-; RV64ZBA-NEXT: seqz a0, a0
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: ssubo.not.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: subw a2, a0, a1
-; RV64ZICOND-NEXT: sub a0, a0, a1
-; RV64ZICOND-NEXT: xor a0, a0, a2
-; RV64ZICOND-NEXT: seqz a0, a0
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
- %obit = extractvalue {i32, i1} %t, 1
- %ret = xor i1 %obit, true
- ret i1 %ret
-}
-
-define i64 @ssubo.select.i64(i64 %v1, i64 %v2) {
-; RV64-LABEL: ssubo.select.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: sgtz a2, a1
-; RV64-NEXT: sub a3, a0, a1
-; RV64-NEXT: slt a3, a3, a0
-; RV64-NEXT: bne a2, a3, .LBB38_2
-; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: mv a0, a1
-; RV64-NEXT: .LBB38_2: # %entry
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: ssubo.select.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: sgtz a2, a1
-; RV64ZBA-NEXT: sub a3, a0, a1
-; RV64ZBA-NEXT: slt a3, a3, a0
-; RV64ZBA-NEXT: bne a2, a3, .LBB38_2
-; RV64ZBA-NEXT: # %bb.1: # %entry
-; RV64ZBA-NEXT: mv a0, a1
-; RV64ZBA-NEXT: .LBB38_2: # %entry
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: ssubo.select.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: sgtz a2, a1
-; RV64ZICOND-NEXT: sub a3, a0, a1
-; RV64ZICOND-NEXT: slt a3, a3, a0
-; RV64ZICOND-NEXT: xor a2, a2, a3
-; RV64ZICOND-NEXT: czero.nez a1, a1, a2
-; RV64ZICOND-NEXT: czero.eqz a0, a0, a2
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
- %obit = extractvalue {i64, i1} %t, 1
- %ret = select i1 %obit, i64 %v1, i64 %v2
- ret i64 %ret
-}
-
-define i1 @ssub.not.i64(i64 %v1, i64 %v2) {
-; RV64-LABEL: ssub.not.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: sgtz a2, a1
-; RV64-NEXT: sub a1, a0, a1
-; RV64-NEXT: slt a0, a1, a0
-; RV64-NEXT: xor a0, a2, a0
-; RV64-NEXT: xori a0, a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: ssub.not.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: sgtz a2, a1
-; RV64ZBA-NEXT: sub a1, a0, a1
-; RV64ZBA-NEXT: slt a0, a1, a0
-; RV64ZBA-NEXT: xor a0, a2, a0
-; RV64ZBA-NEXT: xori a0, a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: ssub.not.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: sgtz a2, a1
-; RV64ZICOND-NEXT: sub a1, a0, a1
-; RV64ZICOND-NEXT: slt a0, a1, a0
-; RV64ZICOND-NEXT: xor a0, a2, a0
-; RV64ZICOND-NEXT: xori a0, a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
- %obit = extractvalue {i64, i1} %t, 1
- %ret = xor i1 %obit, true
- ret i1 %ret
-}
-
-define i32 @usubo.select.i32(i32 signext %v1, i32 signext %v2) {
-; RV64-LABEL: usubo.select.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: subw a2, a0, a1
-; RV64-NEXT: bltu a0, a2, .LBB40_2
-; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: mv a0, a1
-; RV64-NEXT: .LBB40_2: # %entry
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: usubo.select.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: subw a2, a0, a1
-; RV64ZBA-NEXT: bltu a0, a2, .LBB40_2
-; RV64ZBA-NEXT: # %bb.1: # %entry
-; RV64ZBA-NEXT: mv a0, a1
-; RV64ZBA-NEXT: .LBB40_2: # %entry
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: usubo.select.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: subw a2, a0, a1
-; RV64ZICOND-NEXT: sltu a2, a0, a2
-; RV64ZICOND-NEXT: czero.nez a1, a1, a2
-; RV64ZICOND-NEXT: czero.eqz a0, a0, a2
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
- %obit = extractvalue {i32, i1} %t, 1
- %ret = select i1 %obit, i32 %v1, i32 %v2
- ret i32 %ret
-}
-
-define i1 @usubo.not.i32(i32 signext %v1, i32 signext %v2) {
-; RV64-LABEL: usubo.not.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: subw a1, a0, a1
-; RV64-NEXT: sltu a0, a0, a1
-; RV64-NEXT: xori a0, a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: usubo.not.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: subw a1, a0, a1
-; RV64ZBA-NEXT: sltu a0, a0, a1
-; RV64ZBA-NEXT: xori a0, a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: usubo.not.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: subw a1, a0, a1
-; RV64ZICOND-NEXT: sltu a0, a0, a1
-; RV64ZICOND-NEXT: xori a0, a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
- %obit = extractvalue {i32, i1} %t, 1
- %ret = xor i1 %obit, true
- ret i1 %ret
-}
-
-define i64 @usubo.select.i64(i64 %v1, i64 %v2) {
-; RV64-LABEL: usubo.select.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: sub a2, a0, a1
-; RV64-NEXT: bltu a0, a2, .LBB42_2
-; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: mv a0, a1
-; RV64-NEXT: .LBB42_2: # %entry
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: usubo.select.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: sub a2, a0, a1
-; RV64ZBA-NEXT: bltu a0, a2, .LBB42_2
-; RV64ZBA-NEXT: # %bb.1: # %entry
-; RV64ZBA-NEXT: mv a0, a1
-; RV64ZBA-NEXT: .LBB42_2: # %entry
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: usubo.select.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: sub a2, a0, a1
-; RV64ZICOND-NEXT: sltu a2, a0, a2
-; RV64ZICOND-NEXT: czero.nez a1, a1, a2
-; RV64ZICOND-NEXT: czero.eqz a0, a0, a2
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
- %obit = extractvalue {i64, i1} %t, 1
- %ret = select i1 %obit, i64 %v1, i64 %v2
- ret i64 %ret
-}
-
-define i1 @usubo.not.i64(i64 %v1, i64 %v2) {
-; RV64-LABEL: usubo.not.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: sub a1, a0, a1
-; RV64-NEXT: sltu a0, a0, a1
-; RV64-NEXT: xori a0, a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: usubo.not.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: sub a1, a0, a1
-; RV64ZBA-NEXT: sltu a0, a0, a1
-; RV64ZBA-NEXT: xori a0, a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: usubo.not.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: sub a1, a0, a1
-; RV64ZICOND-NEXT: sltu a0, a0, a1
-; RV64ZICOND-NEXT: xori a0, a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
- %obit = extractvalue {i64, i1} %t, 1
- %ret = xor i1 %obit, true
- ret i1 %ret
-}
-
-define i32 @smulo.select.i32(i32 signext %v1, i32 signext %v2) {
-; RV64-LABEL: smulo.select.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: mulw a2, a0, a1
-; RV64-NEXT: mul a3, a0, a1
-; RV64-NEXT: bne a3, a2, .LBB44_2
-; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: mv a0, a1
-; RV64-NEXT: .LBB44_2: # %entry
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: smulo.select.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: mulw a2, a0, a1
-; RV64ZBA-NEXT: mul a3, a0, a1
-; RV64ZBA-NEXT: bne a3, a2, .LBB44_2
-; RV64ZBA-NEXT: # %bb.1: # %entry
-; RV64ZBA-NEXT: mv a0, a1
-; RV64ZBA-NEXT: .LBB44_2: # %entry
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: smulo.select.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: mulw a2, a0, a1
-; RV64ZICOND-NEXT: mul a3, a0, a1
-; RV64ZICOND-NEXT: xor a2, a3, a2
-; RV64ZICOND-NEXT: czero.nez a1, a1, a2
-; RV64ZICOND-NEXT: czero.eqz a0, a0, a2
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
- %obit = extractvalue {i32, i1} %t, 1
- %ret = select i1 %obit, i32 %v1, i32 %v2
- ret i32 %ret
-}
-
-define i1 @smulo.not.i32(i32 signext %v1, i32 signext %v2) {
-; RV64-LABEL: smulo.not.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: mulw a2, a0, a1
-; RV64-NEXT: mul a0, a0, a1
-; RV64-NEXT: xor a0, a0, a2
-; RV64-NEXT: seqz a0, a0
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: smulo.not.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: mulw a2, a0, a1
-; RV64ZBA-NEXT: mul a0, a0, a1
-; RV64ZBA-NEXT: xor a0, a0, a2
-; RV64ZBA-NEXT: seqz a0, a0
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: smulo.not.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: mulw a2, a0, a1
-; RV64ZICOND-NEXT: mul a0, a0, a1
-; RV64ZICOND-NEXT: xor a0, a0, a2
-; RV64ZICOND-NEXT: seqz a0, a0
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
- %obit = extractvalue {i32, i1} %t, 1
- %ret = xor i1 %obit, true
- ret i1 %ret
-}
-
-define i64 @smulo.select.i64(i64 %v1, i64 %v2) {
-; RV64-LABEL: smulo.select.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: mulh a2, a0, a1
-; RV64-NEXT: mul a3, a0, a1
-; RV64-NEXT: srai a3, a3, 63
-; RV64-NEXT: bne a2, a3, .LBB46_2
-; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: mv a0, a1
-; RV64-NEXT: .LBB46_2: # %entry
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: smulo.select.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: mulh a2, a0, a1
-; RV64ZBA-NEXT: mul a3, a0, a1
-; RV64ZBA-NEXT: srai a3, a3, 63
-; RV64ZBA-NEXT: bne a2, a3, .LBB46_2
-; RV64ZBA-NEXT: # %bb.1: # %entry
-; RV64ZBA-NEXT: mv a0, a1
-; RV64ZBA-NEXT: .LBB46_2: # %entry
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: smulo.select.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: mulh a2, a0, a1
-; RV64ZICOND-NEXT: mul a3, a0, a1
-; RV64ZICOND-NEXT: srai a3, a3, 63
-; RV64ZICOND-NEXT: xor a2, a2, a3
-; RV64ZICOND-NEXT: czero.nez a1, a1, a2
-; RV64ZICOND-NEXT: czero.eqz a0, a0, a2
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
- %obit = extractvalue {i64, i1} %t, 1
- %ret = select i1 %obit, i64 %v1, i64 %v2
- ret i64 %ret
-}
-
-define i1 @smulo.not.i64(i64 %v1, i64 %v2) {
-; RV64-LABEL: smulo.not.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: mulh a2, a0, a1
-; RV64-NEXT: mul a0, a0, a1
-; RV64-NEXT: srai a0, a0, 63
-; RV64-NEXT: xor a0, a2, a0
-; RV64-NEXT: seqz a0, a0
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: smulo.not.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: mulh a2, a0, a1
-; RV64ZBA-NEXT: mul a0, a0, a1
-; RV64ZBA-NEXT: srai a0, a0, 63
-; RV64ZBA-NEXT: xor a0, a2, a0
-; RV64ZBA-NEXT: seqz a0, a0
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: smulo.not.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: mulh a2, a0, a1
-; RV64ZICOND-NEXT: mul a0, a0, a1
-; RV64ZICOND-NEXT: srai a0, a0, 63
-; RV64ZICOND-NEXT: xor a0, a2, a0
-; RV64ZICOND-NEXT: seqz a0, a0
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
- %obit = extractvalue {i64, i1} %t, 1
- %ret = xor i1 %obit, true
- ret i1 %ret
-}
-
-define i32 @umulo.select.i32(i32 signext %v1, i32 signext %v2) {
-; RV64-LABEL: umulo.select.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: slli a2, a1, 32
-; RV64-NEXT: slli a3, a0, 32
-; RV64-NEXT: mulhu a2, a3, a2
-; RV64-NEXT: srai a2, a2, 32
-; RV64-NEXT: bnez a2, .LBB48_2
-; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: mv a0, a1
-; RV64-NEXT: .LBB48_2: # %entry
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: umulo.select.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: zext.w a2, a1
-; RV64ZBA-NEXT: zext.w a3, a0
-; RV64ZBA-NEXT: mul a2, a3, a2
-; RV64ZBA-NEXT: srai a2, a2, 32
-; RV64ZBA-NEXT: bnez a2, .LBB48_2
-; RV64ZBA-NEXT: # %bb.1: # %entry
-; RV64ZBA-NEXT: mv a0, a1
-; RV64ZBA-NEXT: .LBB48_2: # %entry
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: umulo.select.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: slli a2, a1, 32
-; RV64ZICOND-NEXT: slli a3, a0, 32
-; RV64ZICOND-NEXT: mulhu a2, a3, a2
-; RV64ZICOND-NEXT: srai a2, a2, 32
-; RV64ZICOND-NEXT: czero.nez a1, a1, a2
-; RV64ZICOND-NEXT: czero.eqz a0, a0, a2
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
- %obit = extractvalue {i32, i1} %t, 1
- %ret = select i1 %obit, i32 %v1, i32 %v2
- ret i32 %ret
-}
-
-define i1 @umulo.not.i32(i32 signext %v1, i32 signext %v2) {
-; RV64-LABEL: umulo.not.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: slli a1, a1, 32
-; RV64-NEXT: slli a0, a0, 32
-; RV64-NEXT: mulhu a0, a0, a1
-; RV64-NEXT: srai a0, a0, 32
-; RV64-NEXT: seqz a0, a0
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: umulo.not.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: zext.w a1, a1
-; RV64ZBA-NEXT: zext.w a0, a0
-; RV64ZBA-NEXT: mul a0, a0, a1
-; RV64ZBA-NEXT: srai a0, a0, 32
-; RV64ZBA-NEXT: seqz a0, a0
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: umulo.not.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: slli a1, a1, 32
-; RV64ZICOND-NEXT: slli a0, a0, 32
-; RV64ZICOND-NEXT: mulhu a0, a0, a1
-; RV64ZICOND-NEXT: srai a0, a0, 32
-; RV64ZICOND-NEXT: seqz a0, a0
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
- %obit = extractvalue {i32, i1} %t, 1
- %ret = xor i1 %obit, true
- ret i1 %ret
-}
-
-define i64 @umulo.select.i64(i64 %v1, i64 %v2) {
-; RV64-LABEL: umulo.select.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: mulhu a2, a0, a1
-; RV64-NEXT: bnez a2, .LBB50_2
-; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: mv a0, a1
-; RV64-NEXT: .LBB50_2: # %entry
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: umulo.select.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: mulhu a2, a0, a1
-; RV64ZBA-NEXT: bnez a2, .LBB50_2
-; RV64ZBA-NEXT: # %bb.1: # %entry
-; RV64ZBA-NEXT: mv a0, a1
-; RV64ZBA-NEXT: .LBB50_2: # %entry
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: umulo.select.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: mulhu a2, a0, a1
-; RV64ZICOND-NEXT: czero.nez a1, a1, a2
-; RV64ZICOND-NEXT: czero.eqz a0, a0, a2
-; RV64ZICOND-NEXT: or a0, a0, a1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
- %obit = extractvalue {i64, i1} %t, 1
- %ret = select i1 %obit, i64 %v1, i64 %v2
- ret i64 %ret
-}
-
-define i1 @umulo.not.i64(i64 %v1, i64 %v2) {
-; RV64-LABEL: umulo.not.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: mulhu a0, a0, a1
-; RV64-NEXT: seqz a0, a0
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: umulo.not.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: mulhu a0, a0, a1
-; RV64ZBA-NEXT: seqz a0, a0
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: umulo.not.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: mulhu a0, a0, a1
-; RV64ZICOND-NEXT: seqz a0, a0
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
- %obit = extractvalue {i64, i1} %t, 1
- %ret = xor i1 %obit, true
- ret i1 %ret
-}
-
-
-;
-; Check the use of the overflow bit in combination with a branch instruction.
-;
-define zeroext i1 @saddo.br.i32(i32 signext %v1, i32 signext %v2) {
-; RV64-LABEL: saddo.br.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addw a2, a0, a1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: beq a0, a2, .LBB52_2
-; RV64-NEXT: # %bb.1: # %overflow
-; RV64-NEXT: li a0, 0
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB52_2: # %continue
-; RV64-NEXT: li a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: saddo.br.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addw a2, a0, a1
-; RV64ZBA-NEXT: add a0, a0, a1
-; RV64ZBA-NEXT: beq a0, a2, .LBB52_2
-; RV64ZBA-NEXT: # %bb.1: # %overflow
-; RV64ZBA-NEXT: li a0, 0
-; RV64ZBA-NEXT: ret
-; RV64ZBA-NEXT: .LBB52_2: # %continue
-; RV64ZBA-NEXT: li a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: saddo.br.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addw a2, a0, a1
-; RV64ZICOND-NEXT: add a0, a0, a1
-; RV64ZICOND-NEXT: beq a0, a2, .LBB52_2
-; RV64ZICOND-NEXT: # %bb.1: # %overflow
-; RV64ZICOND-NEXT: li a0, 0
-; RV64ZICOND-NEXT: ret
-; RV64ZICOND-NEXT: .LBB52_2: # %continue
-; RV64ZICOND-NEXT: li a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- br i1 %obit, label %overflow, label %continue
-
-overflow:
- ret i1 false
-
-continue:
- ret i1 true
-}
-
-define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
-; RV64-LABEL: saddo.br.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: add a2, a0, a1
-; RV64-NEXT: slt a0, a2, a0
-; RV64-NEXT: slti a1, a1, 0
-; RV64-NEXT: beq a1, a0, .LBB53_2
-; RV64-NEXT: # %bb.1: # %overflow
-; RV64-NEXT: li a0, 0
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB53_2: # %continue
-; RV64-NEXT: li a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: saddo.br.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: add a2, a0, a1
-; RV64ZBA-NEXT: slt a0, a2, a0
-; RV64ZBA-NEXT: slti a1, a1, 0
-; RV64ZBA-NEXT: beq a1, a0, .LBB53_2
-; RV64ZBA-NEXT: # %bb.1: # %overflow
-; RV64ZBA-NEXT: li a0, 0
-; RV64ZBA-NEXT: ret
-; RV64ZBA-NEXT: .LBB53_2: # %continue
-; RV64ZBA-NEXT: li a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: saddo.br.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: add a2, a0, a1
-; RV64ZICOND-NEXT: slt a0, a2, a0
-; RV64ZICOND-NEXT: slti a1, a1, 0
-; RV64ZICOND-NEXT: beq a1, a0, .LBB53_2
-; RV64ZICOND-NEXT: # %bb.1: # %overflow
-; RV64ZICOND-NEXT: li a0, 0
-; RV64ZICOND-NEXT: ret
-; RV64ZICOND-NEXT: .LBB53_2: # %continue
-; RV64ZICOND-NEXT: li a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- br i1 %obit, label %overflow, label %continue
-
-overflow:
- ret i1 false
-
-continue:
- ret i1 true
-}
-
-define zeroext i1 @uaddo.br.i32(i32 %v1, i32 %v2) {
-; RV64-LABEL: uaddo.br.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addw a1, a0, a1
-; RV64-NEXT: sext.w a0, a0
-; RV64-NEXT: bgeu a1, a0, .LBB54_2
-; RV64-NEXT: # %bb.1: # %overflow
-; RV64-NEXT: li a0, 0
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB54_2: # %continue
-; RV64-NEXT: li a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: uaddo.br.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addw a1, a0, a1
-; RV64ZBA-NEXT: sext.w a0, a0
-; RV64ZBA-NEXT: bgeu a1, a0, .LBB54_2
-; RV64ZBA-NEXT: # %bb.1: # %overflow
-; RV64ZBA-NEXT: li a0, 0
-; RV64ZBA-NEXT: ret
-; RV64ZBA-NEXT: .LBB54_2: # %continue
-; RV64ZBA-NEXT: li a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: uaddo.br.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addw a1, a0, a1
-; RV64ZICOND-NEXT: sext.w a0, a0
-; RV64ZICOND-NEXT: bgeu a1, a0, .LBB54_2
-; RV64ZICOND-NEXT: # %bb.1: # %overflow
-; RV64ZICOND-NEXT: li a0, 0
-; RV64ZICOND-NEXT: ret
-; RV64ZICOND-NEXT: .LBB54_2: # %continue
-; RV64ZICOND-NEXT: li a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- br i1 %obit, label %overflow, label %continue
-
-overflow:
- ret i1 false
-
-continue:
- ret i1 true
-}
-
-define zeroext i1 @uaddo.br.i64(i64 %v1, i64 %v2) {
-; RV64-LABEL: uaddo.br.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: add a1, a0, a1
-; RV64-NEXT: bgeu a1, a0, .LBB55_2
-; RV64-NEXT: # %bb.1: # %overflow
-; RV64-NEXT: li a0, 0
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB55_2: # %continue
-; RV64-NEXT: li a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: uaddo.br.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: add a1, a0, a1
-; RV64ZBA-NEXT: bgeu a1, a0, .LBB55_2
-; RV64ZBA-NEXT: # %bb.1: # %overflow
-; RV64ZBA-NEXT: li a0, 0
-; RV64ZBA-NEXT: ret
-; RV64ZBA-NEXT: .LBB55_2: # %continue
-; RV64ZBA-NEXT: li a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: uaddo.br.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: add a1, a0, a1
-; RV64ZICOND-NEXT: bgeu a1, a0, .LBB55_2
-; RV64ZICOND-NEXT: # %bb.1: # %overflow
-; RV64ZICOND-NEXT: li a0, 0
-; RV64ZICOND-NEXT: ret
-; RV64ZICOND-NEXT: .LBB55_2: # %continue
-; RV64ZICOND-NEXT: li a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- br i1 %obit, label %overflow, label %continue
-
-overflow:
- ret i1 false
-
-continue:
- ret i1 true
-}
-
-define zeroext i1 @ssubo.br.i32(i32 signext %v1, i32 signext %v2) {
-; RV64-LABEL: ssubo.br.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: subw a2, a0, a1
-; RV64-NEXT: sub a0, a0, a1
-; RV64-NEXT: beq a0, a2, .LBB56_2
-; RV64-NEXT: # %bb.1: # %overflow
-; RV64-NEXT: li a0, 0
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB56_2: # %continue
-; RV64-NEXT: li a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: ssubo.br.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: subw a2, a0, a1
-; RV64ZBA-NEXT: sub a0, a0, a1
-; RV64ZBA-NEXT: beq a0, a2, .LBB56_2
-; RV64ZBA-NEXT: # %bb.1: # %overflow
-; RV64ZBA-NEXT: li a0, 0
-; RV64ZBA-NEXT: ret
-; RV64ZBA-NEXT: .LBB56_2: # %continue
-; RV64ZBA-NEXT: li a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: ssubo.br.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: subw a2, a0, a1
-; RV64ZICOND-NEXT: sub a0, a0, a1
-; RV64ZICOND-NEXT: beq a0, a2, .LBB56_2
-; RV64ZICOND-NEXT: # %bb.1: # %overflow
-; RV64ZICOND-NEXT: li a0, 0
-; RV64ZICOND-NEXT: ret
-; RV64ZICOND-NEXT: .LBB56_2: # %continue
-; RV64ZICOND-NEXT: li a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- br i1 %obit, label %overflow, label %continue
-
-overflow:
- ret i1 false
-
-continue:
- ret i1 true
-}
-
-define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
-; RV64-LABEL: ssubo.br.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: sgtz a2, a1
-; RV64-NEXT: sub a1, a0, a1
-; RV64-NEXT: slt a0, a1, a0
-; RV64-NEXT: beq a2, a0, .LBB57_2
-; RV64-NEXT: # %bb.1: # %overflow
-; RV64-NEXT: li a0, 0
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB57_2: # %continue
-; RV64-NEXT: li a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: ssubo.br.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: sgtz a2, a1
-; RV64ZBA-NEXT: sub a1, a0, a1
-; RV64ZBA-NEXT: slt a0, a1, a0
-; RV64ZBA-NEXT: beq a2, a0, .LBB57_2
-; RV64ZBA-NEXT: # %bb.1: # %overflow
-; RV64ZBA-NEXT: li a0, 0
-; RV64ZBA-NEXT: ret
-; RV64ZBA-NEXT: .LBB57_2: # %continue
-; RV64ZBA-NEXT: li a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: ssubo.br.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: sgtz a2, a1
-; RV64ZICOND-NEXT: sub a1, a0, a1
-; RV64ZICOND-NEXT: slt a0, a1, a0
-; RV64ZICOND-NEXT: beq a2, a0, .LBB57_2
-; RV64ZICOND-NEXT: # %bb.1: # %overflow
-; RV64ZICOND-NEXT: li a0, 0
-; RV64ZICOND-NEXT: ret
-; RV64ZICOND-NEXT: .LBB57_2: # %continue
-; RV64ZICOND-NEXT: li a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- br i1 %obit, label %overflow, label %continue
-
-overflow:
- ret i1 false
-
-continue:
- ret i1 true
-}
-
-define zeroext i1 @usubo.br.i32(i32 signext %v1, i32 signext %v2) {
-; RV64-LABEL: usubo.br.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: subw a1, a0, a1
-; RV64-NEXT: bgeu a0, a1, .LBB58_2
-; RV64-NEXT: # %bb.1: # %overflow
-; RV64-NEXT: li a0, 0
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB58_2: # %continue
-; RV64-NEXT: li a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: usubo.br.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: subw a1, a0, a1
-; RV64ZBA-NEXT: bgeu a0, a1, .LBB58_2
-; RV64ZBA-NEXT: # %bb.1: # %overflow
-; RV64ZBA-NEXT: li a0, 0
-; RV64ZBA-NEXT: ret
-; RV64ZBA-NEXT: .LBB58_2: # %continue
-; RV64ZBA-NEXT: li a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: usubo.br.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: subw a1, a0, a1
-; RV64ZICOND-NEXT: bgeu a0, a1, .LBB58_2
-; RV64ZICOND-NEXT: # %bb.1: # %overflow
-; RV64ZICOND-NEXT: li a0, 0
-; RV64ZICOND-NEXT: ret
-; RV64ZICOND-NEXT: .LBB58_2: # %continue
-; RV64ZICOND-NEXT: li a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- br i1 %obit, label %overflow, label %continue
-
-overflow:
- ret i1 false
-
-continue:
- ret i1 true
-}
-
-define zeroext i1 @usubo.br.i64(i64 %v1, i64 %v2) {
-; RV64-LABEL: usubo.br.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: sub a1, a0, a1
-; RV64-NEXT: bgeu a0, a1, .LBB59_2
-; RV64-NEXT: # %bb.1: # %overflow
-; RV64-NEXT: li a0, 0
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB59_2: # %continue
-; RV64-NEXT: li a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: usubo.br.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: sub a1, a0, a1
-; RV64ZBA-NEXT: bgeu a0, a1, .LBB59_2
-; RV64ZBA-NEXT: # %bb.1: # %overflow
-; RV64ZBA-NEXT: li a0, 0
-; RV64ZBA-NEXT: ret
-; RV64ZBA-NEXT: .LBB59_2: # %continue
-; RV64ZBA-NEXT: li a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: usubo.br.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: sub a1, a0, a1
-; RV64ZICOND-NEXT: bgeu a0, a1, .LBB59_2
-; RV64ZICOND-NEXT: # %bb.1: # %overflow
-; RV64ZICOND-NEXT: li a0, 0
-; RV64ZICOND-NEXT: ret
-; RV64ZICOND-NEXT: .LBB59_2: # %continue
-; RV64ZICOND-NEXT: li a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- br i1 %obit, label %overflow, label %continue
-
-overflow:
- ret i1 false
-
-continue:
- ret i1 true
-}
-
-define zeroext i1 @smulo.br.i32(i32 signext %v1, i32 signext %v2) {
-; RV64-LABEL: smulo.br.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: mulw a2, a0, a1
-; RV64-NEXT: mul a0, a0, a1
-; RV64-NEXT: beq a0, a2, .LBB60_2
-; RV64-NEXT: # %bb.1: # %overflow
-; RV64-NEXT: li a0, 0
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB60_2: # %continue
-; RV64-NEXT: li a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: smulo.br.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: mulw a2, a0, a1
-; RV64ZBA-NEXT: mul a0, a0, a1
-; RV64ZBA-NEXT: beq a0, a2, .LBB60_2
-; RV64ZBA-NEXT: # %bb.1: # %overflow
-; RV64ZBA-NEXT: li a0, 0
-; RV64ZBA-NEXT: ret
-; RV64ZBA-NEXT: .LBB60_2: # %continue
-; RV64ZBA-NEXT: li a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: smulo.br.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: mulw a2, a0, a1
-; RV64ZICOND-NEXT: mul a0, a0, a1
-; RV64ZICOND-NEXT: beq a0, a2, .LBB60_2
-; RV64ZICOND-NEXT: # %bb.1: # %overflow
-; RV64ZICOND-NEXT: li a0, 0
-; RV64ZICOND-NEXT: ret
-; RV64ZICOND-NEXT: .LBB60_2: # %continue
-; RV64ZICOND-NEXT: li a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- br i1 %obit, label %overflow, label %continue
-
-overflow:
- ret i1 false
-
-continue:
- ret i1 true
-}
-
-define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
-; RV64-LABEL: smulo.br.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: mulh a2, a0, a1
-; RV64-NEXT: mul a0, a0, a1
-; RV64-NEXT: srai a0, a0, 63
-; RV64-NEXT: beq a2, a0, .LBB61_2
-; RV64-NEXT: # %bb.1: # %overflow
-; RV64-NEXT: li a0, 0
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB61_2: # %continue
-; RV64-NEXT: li a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: smulo.br.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: mulh a2, a0, a1
-; RV64ZBA-NEXT: mul a0, a0, a1
-; RV64ZBA-NEXT: srai a0, a0, 63
-; RV64ZBA-NEXT: beq a2, a0, .LBB61_2
-; RV64ZBA-NEXT: # %bb.1: # %overflow
-; RV64ZBA-NEXT: li a0, 0
-; RV64ZBA-NEXT: ret
-; RV64ZBA-NEXT: .LBB61_2: # %continue
-; RV64ZBA-NEXT: li a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: smulo.br.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: mulh a2, a0, a1
-; RV64ZICOND-NEXT: mul a0, a0, a1
-; RV64ZICOND-NEXT: srai a0, a0, 63
-; RV64ZICOND-NEXT: beq a2, a0, .LBB61_2
-; RV64ZICOND-NEXT: # %bb.1: # %overflow
-; RV64ZICOND-NEXT: li a0, 0
-; RV64ZICOND-NEXT: ret
-; RV64ZICOND-NEXT: .LBB61_2: # %continue
-; RV64ZICOND-NEXT: li a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- br i1 %obit, label %overflow, label %continue
-
-overflow:
- ret i1 false
-
-continue:
- ret i1 true
-}
-
-define zeroext i1 @smulo2.br.i64(i64 %v1) {
-; RV64-LABEL: smulo2.br.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: li a1, -13
-; RV64-NEXT: mulh a2, a0, a1
-; RV64-NEXT: mul a0, a0, a1
-; RV64-NEXT: srai a0, a0, 63
-; RV64-NEXT: beq a2, a0, .LBB62_2
-; RV64-NEXT: # %bb.1: # %overflow
-; RV64-NEXT: li a0, 0
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB62_2: # %continue
-; RV64-NEXT: li a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: smulo2.br.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: li a1, -13
-; RV64ZBA-NEXT: mulh a2, a0, a1
-; RV64ZBA-NEXT: mul a0, a0, a1
-; RV64ZBA-NEXT: srai a0, a0, 63
-; RV64ZBA-NEXT: beq a2, a0, .LBB62_2
-; RV64ZBA-NEXT: # %bb.1: # %overflow
-; RV64ZBA-NEXT: li a0, 0
-; RV64ZBA-NEXT: ret
-; RV64ZBA-NEXT: .LBB62_2: # %continue
-; RV64ZBA-NEXT: li a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: smulo2.br.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: li a1, -13
-; RV64ZICOND-NEXT: mulh a2, a0, a1
-; RV64ZICOND-NEXT: mul a0, a0, a1
-; RV64ZICOND-NEXT: srai a0, a0, 63
-; RV64ZICOND-NEXT: beq a2, a0, .LBB62_2
-; RV64ZICOND-NEXT: # %bb.1: # %overflow
-; RV64ZICOND-NEXT: li a0, 0
-; RV64ZICOND-NEXT: ret
-; RV64ZICOND-NEXT: .LBB62_2: # %continue
-; RV64ZICOND-NEXT: li a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 -13)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- br i1 %obit, label %overflow, label %continue
-
-overflow:
- ret i1 false
-
-continue:
- ret i1 true
-}
-
-define zeroext i1 @umulo.br.i32(i32 signext %v1, i32 signext %v2) {
-; RV64-LABEL: umulo.br.i32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: slli a1, a1, 32
-; RV64-NEXT: slli a0, a0, 32
-; RV64-NEXT: mulhu a0, a0, a1
-; RV64-NEXT: srai a0, a0, 32
-; RV64-NEXT: beqz a0, .LBB63_2
-; RV64-NEXT: # %bb.1: # %overflow
-; RV64-NEXT: li a0, 0
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB63_2: # %continue
-; RV64-NEXT: li a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: umulo.br.i32:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: zext.w a1, a1
-; RV64ZBA-NEXT: zext.w a0, a0
-; RV64ZBA-NEXT: mul a0, a0, a1
-; RV64ZBA-NEXT: srai a0, a0, 32
-; RV64ZBA-NEXT: beqz a0, .LBB63_2
-; RV64ZBA-NEXT: # %bb.1: # %overflow
-; RV64ZBA-NEXT: li a0, 0
-; RV64ZBA-NEXT: ret
-; RV64ZBA-NEXT: .LBB63_2: # %continue
-; RV64ZBA-NEXT: li a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: umulo.br.i32:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: slli a1, a1, 32
-; RV64ZICOND-NEXT: slli a0, a0, 32
-; RV64ZICOND-NEXT: mulhu a0, a0, a1
-; RV64ZICOND-NEXT: srai a0, a0, 32
-; RV64ZICOND-NEXT: beqz a0, .LBB63_2
-; RV64ZICOND-NEXT: # %bb.1: # %overflow
-; RV64ZICOND-NEXT: li a0, 0
-; RV64ZICOND-NEXT: ret
-; RV64ZICOND-NEXT: .LBB63_2: # %continue
-; RV64ZICOND-NEXT: li a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- br i1 %obit, label %overflow, label %continue
-
-overflow:
- ret i1 false
-
-continue:
- ret i1 true
-}
-
-define zeroext i1 @umulo.br.i64(i64 %v1, i64 %v2) {
-; RV64-LABEL: umulo.br.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: mulhu a0, a0, a1
-; RV64-NEXT: beqz a0, .LBB64_2
-; RV64-NEXT: # %bb.1: # %overflow
-; RV64-NEXT: li a0, 0
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB64_2: # %continue
-; RV64-NEXT: li a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: umulo.br.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: mulhu a0, a0, a1
-; RV64ZBA-NEXT: beqz a0, .LBB64_2
-; RV64ZBA-NEXT: # %bb.1: # %overflow
-; RV64ZBA-NEXT: li a0, 0
-; RV64ZBA-NEXT: ret
-; RV64ZBA-NEXT: .LBB64_2: # %continue
-; RV64ZBA-NEXT: li a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: umulo.br.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: mulhu a0, a0, a1
-; RV64ZICOND-NEXT: beqz a0, .LBB64_2
-; RV64ZICOND-NEXT: # %bb.1: # %overflow
-; RV64ZICOND-NEXT: li a0, 0
-; RV64ZICOND-NEXT: ret
-; RV64ZICOND-NEXT: .LBB64_2: # %continue
-; RV64ZICOND-NEXT: li a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- br i1 %obit, label %overflow, label %continue
-
-overflow:
- ret i1 false
-
-continue:
- ret i1 true
-}
-
-define zeroext i1 @umulo2.br.i64(i64 %v1) {
-; RV64-LABEL: umulo2.br.i64:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: add a1, a0, a0
-; RV64-NEXT: bgeu a1, a0, .LBB65_2
-; RV64-NEXT: # %bb.1: # %overflow
-; RV64-NEXT: li a0, 0
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB65_2: # %continue
-; RV64-NEXT: li a0, 1
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: umulo2.br.i64:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: add a1, a0, a0
-; RV64ZBA-NEXT: bgeu a1, a0, .LBB65_2
-; RV64ZBA-NEXT: # %bb.1: # %overflow
-; RV64ZBA-NEXT: li a0, 0
-; RV64ZBA-NEXT: ret
-; RV64ZBA-NEXT: .LBB65_2: # %continue
-; RV64ZBA-NEXT: li a0, 1
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: umulo2.br.i64:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: add a1, a0, a0
-; RV64ZICOND-NEXT: bgeu a1, a0, .LBB65_2
-; RV64ZICOND-NEXT: # %bb.1: # %overflow
-; RV64ZICOND-NEXT: li a0, 0
-; RV64ZICOND-NEXT: ret
-; RV64ZICOND-NEXT: .LBB65_2: # %continue
-; RV64ZICOND-NEXT: li a0, 1
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 2)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- br i1 %obit, label %overflow, label %continue
-
-overflow:
- ret i1 false
-
-continue:
- ret i1 true
-}
-
-define zeroext i1 @uaddo.i64.constant(i64 %v1, ptr %res) {
-; RV64-LABEL: uaddo.i64.constant:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addi a2, a0, 2
-; RV64-NEXT: sltu a0, a2, a0
-; RV64-NEXT: sd a2, 0(a1)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: uaddo.i64.constant:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addi a2, a0, 2
-; RV64ZBA-NEXT: sltu a0, a2, a0
-; RV64ZBA-NEXT: sd a2, 0(a1)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: uaddo.i64.constant:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addi a2, a0, 2
-; RV64ZICOND-NEXT: sltu a0, a2, a0
-; RV64ZICOND-NEXT: sd a2, 0(a1)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 2)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- store i64 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @uaddo.i64.constant_2048(i64 %v1, ptr %res) {
-; RV64-LABEL: uaddo.i64.constant_2048:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addi a2, a0, 2047
-; RV64-NEXT: addi a2, a2, 1
-; RV64-NEXT: sltu a0, a2, a0
-; RV64-NEXT: sd a2, 0(a1)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: uaddo.i64.constant_2048:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addi a2, a0, 2047
-; RV64ZBA-NEXT: addi a2, a2, 1
-; RV64ZBA-NEXT: sltu a0, a2, a0
-; RV64ZBA-NEXT: sd a2, 0(a1)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: uaddo.i64.constant_2048:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addi a2, a0, 2047
-; RV64ZICOND-NEXT: addi a2, a2, 1
-; RV64ZICOND-NEXT: sltu a0, a2, a0
-; RV64ZICOND-NEXT: sd a2, 0(a1)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 2048)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- store i64 %val, ptr %res
- ret i1 %obit
-}
-
-define zeroext i1 @uaddo.i64.constant_2049(i64 %v1, ptr %res) {
-; RV64-LABEL: uaddo.i64.constant_2049:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addi a2, a0, 2047
-; RV64-NEXT: addi a2, a2, 2
-; RV64-NEXT: sltu a0, a2, a0
-; RV64-NEXT: sd a2, 0(a1)
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: uaddo.i64.constant_2049:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: addi a2, a0, 2047
-; RV64ZBA-NEXT: addi a2, a2, 2
-; RV64ZBA-NEXT: sltu a0, a2, a0
-; RV64ZBA-NEXT: sd a2, 0(a1)
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: uaddo.i64.constant_2049:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: addi a2, a0, 2047
-; RV64ZICOND-NEXT: addi a2, a2, 2
-; RV64ZICOND-NEXT: sltu a0, a2, a0
-; RV64ZICOND-NEXT: sd a2, 0(a1)
-; RV64ZICOND-NEXT: ret
-entry:
- %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 2049)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- store i64 %val, ptr %res
- ret i1 %obit
-}
-
-define i64 @uaddo.i64.constant_setcc_on_overflow_flag(ptr %p) {
-; RV64-LABEL: uaddo.i64.constant_setcc_on_overflow_flag:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: ld a1, 0(a0)
-; RV64-NEXT: addi a0, a1, 2
-; RV64-NEXT: bltu a0, a1, .LBB69_2
-; RV64-NEXT: # %bb.1: # %IfOverflow
-; RV64-NEXT: li a0, 0
-; RV64-NEXT: .LBB69_2: # %IfNoOverflow
-; RV64-NEXT: ret
-;
-; RV64ZBA-LABEL: uaddo.i64.constant_setcc_on_overflow_flag:
-; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: ld a1, 0(a0)
-; RV64ZBA-NEXT: addi a0, a1, 2
-; RV64ZBA-NEXT: bltu a0, a1, .LBB69_2
-; RV64ZBA-NEXT: # %bb.1: # %IfOverflow
-; RV64ZBA-NEXT: li a0, 0
-; RV64ZBA-NEXT: .LBB69_2: # %IfNoOverflow
-; RV64ZBA-NEXT: ret
-;
-; RV64ZICOND-LABEL: uaddo.i64.constant_setcc_on_overflow_flag:
-; RV64ZICOND: # %bb.0: # %entry
-; RV64ZICOND-NEXT: ld a1, 0(a0)
-; RV64ZICOND-NEXT: addi a0, a1, 2
-; RV64ZICOND-NEXT: bltu a0, a1, .LBB69_2
-; RV64ZICOND-NEXT: # %bb.1: # %IfOverflow
-; RV64ZICOND-NEXT: li a0, 0
-; RV64ZICOND-NEXT: .LBB69_2: # %IfNoOverflow
-; RV64ZICOND-NEXT: ret
-entry:
- %v1 = load i64, ptr %p
- %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 2)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- br i1 %obit, label %IfNoOverflow, label %IfOverflow
-IfOverflow:
- ret i64 0
-IfNoOverflow:
- ret i64 %val
-}
-
-declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
-declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
-declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
-declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
-declare {i32, i1} @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
-declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
-declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
-declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
-declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone
-declare {i64, i1} @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone
-declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
-declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/xtheadmac.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/xtheadmac.ll
deleted file mode 100644
index 63d1833..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/xtheadmac.ll
+++ /dev/null
@@ -1,123 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+xtheadmac -mattr=+m -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefix=RV64XTHEADMAC
-
-define i32 @mula_i32(i32 %a, i32 %b, i32 %c) {
-; RV64XTHEADMAC-LABEL: mula_i32:
-; RV64XTHEADMAC: # %bb.0:
-; RV64XTHEADMAC-NEXT: th.mulaw a0, a1, a2
-; RV64XTHEADMAC-NEXT: ret
- %d = mul i32 %b, %c
- %e = add i32 %a, %d
- ret i32 %e
-}
-
-define i32 @muls_i32(i32 %a, i32 %b, i32 %c) {
-; RV64XTHEADMAC-LABEL: muls_i32:
-; RV64XTHEADMAC: # %bb.0:
-; RV64XTHEADMAC-NEXT: th.mulsw a0, a1, a2
-; RV64XTHEADMAC-NEXT: ret
- %d = mul i32 %b, %c
- %e = sub i32 %a, %d
- ret i32 %e
-}
-
-define i64 @mula_i64(i64 %a, i64 %b, i64 %c) {
-; RV64XTHEADMAC-LABEL: mula_i64:
-; RV64XTHEADMAC: # %bb.0:
-; RV64XTHEADMAC-NEXT: th.mula a0, a1, a2
-; RV64XTHEADMAC-NEXT: ret
- %d = mul i64 %b, %c
- %f = add i64 %a, %d
- ret i64 %f
-}
-
-define i64 @mulaw_i64(i32 %a, i32 %b, i32 %c) {
-; RV64XTHEADMAC-LABEL: mulaw_i64:
-; RV64XTHEADMAC: # %bb.0:
-; RV64XTHEADMAC-NEXT: th.mulaw a0, a1, a2
-; RV64XTHEADMAC-NEXT: ret
- %d = mul i32 %b, %c
- %e = add i32 %a, %d
- %f = sext i32 %e to i64
- ret i64 %f
-}
-
-define i64 @mulah_i64(i32 %a, i16 %b, i16 %c) {
-; RV64XTHEADMAC-LABEL: mulah_i64:
-; RV64XTHEADMAC: # %bb.0:
-; RV64XTHEADMAC-NEXT: th.mulah a0, a1, a2
-; RV64XTHEADMAC-NEXT: ret
- %d = sext i16 %b to i32
- %e = sext i16 %c to i32
- %f = mul i32 %d, %e
- %g = add i32 %a, %f
- %h = sext i32 %g to i64
- ret i64 %h
-}
-
-define i64 @muls_i64(i64 %a, i64 %b, i64 %c) {
-; RV64XTHEADMAC-LABEL: muls_i64:
-; RV64XTHEADMAC: # %bb.0:
-; RV64XTHEADMAC-NEXT: th.muls a0, a1, a2
-; RV64XTHEADMAC-NEXT: ret
- %d = mul i64 %b, %c
- %f = sub i64 %a, %d
- ret i64 %f
-}
-
-define i64 @mulsw_i64(i32 %a, i32 %b, i32 %c) {
-; RV64XTHEADMAC-LABEL: mulsw_i64:
-; RV64XTHEADMAC: # %bb.0:
-; RV64XTHEADMAC-NEXT: th.mulsw a0, a1, a2
-; RV64XTHEADMAC-NEXT: ret
- %d = mul i32 %b, %c
- %e = sub i32 %a, %d
- %f = sext i32 %e to i64
- ret i64 %f
-}
-
-define i64 @mulsh_i64(i32 %a, i16 %b, i16 %c) {
-; RV64XTHEADMAC-LABEL: mulsh_i64:
-; RV64XTHEADMAC: # %bb.0:
-; RV64XTHEADMAC-NEXT: th.mulsh a0, a1, a2
-; RV64XTHEADMAC-NEXT: ret
- %d = sext i16 %b to i32
- %e = sext i16 %c to i32
- %f = mul i32 %d, %e
- %g = sub i32 %a, %f
- %h = sext i32 %g to i64
- ret i64 %h
-}
-
-define i32 @commutative1(i32 %A, i32 %B, i32 %C) {
-; RV64XTHEADMAC-LABEL: commutative1:
-; RV64XTHEADMAC: # %bb.0:
-; RV64XTHEADMAC-NEXT: th.mulaw a2, a1, a0
-; RV64XTHEADMAC-NEXT: mv a0, a2
-; RV64XTHEADMAC-NEXT: ret
- %mul = mul nsw i32 %B, %A
- %add = add i32 %mul, %C
- ret i32 %add
-}
-
-define i32 @commutative2(i32 %A, i32 %B, i32 %C) {
-; RV64XTHEADMAC-LABEL: commutative2:
-; RV64XTHEADMAC: # %bb.0:
-; RV64XTHEADMAC-NEXT: th.mulaw a0, a1, a2
-; RV64XTHEADMAC-NEXT: ret
- %mul = mul nsw i32 %B, %C
- %add = add i32 %mul, %A
- ret i32 %add
-}
-
-define i32 @commutative3(i32 %A, i32 %B, i32 %C) {
-; RV64XTHEADMAC-LABEL: commutative3:
-; RV64XTHEADMAC: # %bb.0:
-; RV64XTHEADMAC-NEXT: th.mulaw a1, a2, a0
-; RV64XTHEADMAC-NEXT: mv a0, a1
-; RV64XTHEADMAC-NEXT: ret
- %mul = mul nsw i32 %C, %A
- %add = add i32 %mul, %B
- ret i32 %add
-}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/xtheadmemidx.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/xtheadmemidx.ll
deleted file mode 100644
index e557a14..0000000
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/xtheadmemidx.ll
+++ /dev/null
@@ -1,717 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+d -mattr=+xtheadmemidx -mattr=+m -verify-machineinstrs < %s \
-; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefix=RV64XTHEADMEMIDX
-
-define ptr @lbia(ptr %base, ptr %addr.2, i8 %a) {
-; RV64XTHEADMEMIDX-LABEL: lbia:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lbia a3, (a0), -1, 0
-; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2
-; RV64XTHEADMEMIDX-NEXT: sb a2, 0(a1)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i8, ptr %base, i8 0
- %ld = load i8, ptr %addr
- %addr.1 = getelementptr i8, ptr %base, i8 -1
- %res = add i8 %ld, %a
- store i8 %res, ptr %addr.2
- ret ptr %addr.1
-}
-
-define ptr @lbib(ptr %base, i8 %a) {
-; RV64XTHEADMEMIDX-LABEL: lbib:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lbib a2, (a0), 1, 0
-; RV64XTHEADMEMIDX-NEXT: add a1, a2, a1
-; RV64XTHEADMEMIDX-NEXT: sb a1, 1(a0)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i8, ptr %base, i8 1
- %ld = load i8, ptr %addr
- %addr.1 = getelementptr i8, ptr %base, i8 2
- %res = add i8 %ld, %a
- store i8 %res, ptr %addr.1
- ret ptr %addr
-}
-
-define ptr @lbuia(ptr %base, ptr %addr.2, i64 %a) {
-; RV64XTHEADMEMIDX-LABEL: lbuia:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lbuia a3, (a0), -1, 0
-; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2
-; RV64XTHEADMEMIDX-NEXT: sd a2, 0(a1)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i8, ptr %base, i8 0
- %ld = load i8, ptr %addr
- %zext = zext i8 %ld to i64
- %addr.1 = getelementptr i8, ptr %base, i8 -1
- %res = add i64 %zext, %a
- store i64 %res, ptr %addr.2
- ret ptr %addr.1
-}
-
-define ptr @lbuib(ptr %base, i64 %a, ptr %addr.1) {
-; RV64XTHEADMEMIDX-LABEL: lbuib:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lbuib a3, (a0), 1, 0
-; RV64XTHEADMEMIDX-NEXT: add a1, a3, a1
-; RV64XTHEADMEMIDX-NEXT: sd a1, 0(a2)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i8, ptr %base, i8 1
- %ld = load i8, ptr %addr
- %zext = zext i8 %ld to i64
- %res = add i64 %zext, %a
- store i64 %res, ptr %addr.1
- ret ptr %addr
-}
-
-define ptr @lhia(ptr %base, ptr %addr.2, i16 %a) {
-; RV64XTHEADMEMIDX-LABEL: lhia:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lhia a3, (a0), -16, 1
-; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2
-; RV64XTHEADMEMIDX-NEXT: sh a2, 0(a1)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i16, ptr %base, i16 0
- %ld = load i16, ptr %addr
- %addr.1 = getelementptr i16, ptr %base, i16 -16
- %res = add i16 %ld, %a
- store i16 %res, ptr %addr.2
- ret ptr %addr.1
-}
-
-define ptr @lhib(ptr %base, i16 %a) {
-; RV64XTHEADMEMIDX-LABEL: lhib:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lhib a2, (a0), 2, 0
-; RV64XTHEADMEMIDX-NEXT: add a1, a2, a1
-; RV64XTHEADMEMIDX-NEXT: sh a1, 2(a0)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i16, ptr %base, i16 1
- %ld = load i16, ptr %addr
- %addr.1 = getelementptr i16, ptr %base, i16 2
- %res = add i16 %ld, %a
- store i16 %res, ptr %addr.1
- ret ptr %addr
-}
-
-define ptr @lhuia(ptr %base, ptr %addr.2, i64 %a) {
-; RV64XTHEADMEMIDX-LABEL: lhuia:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lhuia a3, (a0), -16, 1
-; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2
-; RV64XTHEADMEMIDX-NEXT: sd a2, 0(a1)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i16, ptr %base, i16 0
- %ld = load i16, ptr %addr
- %zext = zext i16 %ld to i64
- %addr.1 = getelementptr i16, ptr %base, i16 -16
- %res = add i64 %zext, %a
- store i64 %res, ptr %addr.2
- ret ptr %addr.1
-}
-
-define ptr @lhuib(ptr %base, i64 %a, ptr %addr.1) {
-; RV64XTHEADMEMIDX-LABEL: lhuib:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lhuib a3, (a0), 2, 0
-; RV64XTHEADMEMIDX-NEXT: add a1, a3, a1
-; RV64XTHEADMEMIDX-NEXT: sd a1, 0(a2)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i16, ptr %base, i16 1
- %ld = load i16, ptr %addr
- %zext = zext i16 %ld to i64
- %res = add i64 %zext, %a
- store i64 %res, ptr %addr.1
- ret ptr %addr
-}
-
-define ptr @lwia(ptr %base, ptr %addr.2, i32 %a) {
-; RV64XTHEADMEMIDX-LABEL: lwia:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lwia a3, (a0), -16, 2
-; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2
-; RV64XTHEADMEMIDX-NEXT: sw a2, 0(a1)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i32, ptr %base, i32 0
- %ld = load i32, ptr %addr
- %addr.1 = getelementptr i32, ptr %base, i32 -16
- %res = add i32 %ld, %a
- store i32 %res, ptr %addr.2
- ret ptr %addr.1
-}
-
-define ptr @lwib(ptr %base, i32 %a) {
-; RV64XTHEADMEMIDX-LABEL: lwib:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lwib a2, (a0), 4, 0
-; RV64XTHEADMEMIDX-NEXT: add a1, a2, a1
-; RV64XTHEADMEMIDX-NEXT: sw a1, 4(a0)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i32, ptr %base, i32 1
- %ld = load i32, ptr %addr
- %addr.1 = getelementptr i32, ptr %base, i32 2
- %res = add i32 %ld, %a
- store i32 %res, ptr %addr.1
- ret ptr %addr
-}
-
-define ptr @lwuia(ptr %base, ptr %addr.2, i64 %a) {
-; RV64XTHEADMEMIDX-LABEL: lwuia:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lwuia a3, (a0), -16, 2
-; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2
-; RV64XTHEADMEMIDX-NEXT: sd a2, 0(a1)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i32, ptr %base, i32 0
- %ld = load i32, ptr %addr
- %zext = zext i32 %ld to i64
- %addr.1 = getelementptr i32, ptr %base, i32 -16
- %res = add i64 %zext, %a
- store i64 %res, ptr %addr.2
- ret ptr %addr.1
-}
-
-define ptr @lwuib(ptr %base, i64 %a, ptr %addr.1) {
-; RV64XTHEADMEMIDX-LABEL: lwuib:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lwuib a3, (a0), 4, 0
-; RV64XTHEADMEMIDX-NEXT: add a1, a3, a1
-; RV64XTHEADMEMIDX-NEXT: sd a1, 0(a2)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i32, ptr %base, i32 1
- %ld = load i32, ptr %addr
- %zext = zext i32 %ld to i64
- %res = add i64 %zext, %a
- store i64 %res, ptr %addr.1
- ret ptr %addr
-}
-
-define ptr @ldia(ptr %base, ptr %addr.2, i64 %a) {
-; RV64XTHEADMEMIDX-LABEL: ldia:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.ldia a3, (a0), -16, 3
-; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2
-; RV64XTHEADMEMIDX-NEXT: sd a2, 0(a1)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i64, ptr %base, i64 0
- %ld = load i64, ptr %addr
- %addr.1 = getelementptr i64, ptr %base, i64 -16
- %res = add i64 %ld, %a
- store i64 %res, ptr %addr.2
- ret ptr %addr.1
-}
-
-define ptr @ldib(ptr %base, i64 %a) {
-; RV64XTHEADMEMIDX-LABEL: ldib:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.ldib a2, (a0), 8, 0
-; RV64XTHEADMEMIDX-NEXT: add a1, a2, a1
-; RV64XTHEADMEMIDX-NEXT: sd a1, 8(a0)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i64, ptr %base, i64 1
- %ld = load i64, ptr %addr
- %addr.1 = getelementptr i64, ptr %base, i64 2
- %res = add i64 %ld, %a
- store i64 %res, ptr %addr.1
- ret ptr %addr
-}
-
-define ptr @sbia(ptr %base, i8 %a, i8 %b) {
-; RV64XTHEADMEMIDX-LABEL: sbia:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: addw a1, a1, a2
-; RV64XTHEADMEMIDX-NEXT: th.sbia a1, (a0), 1, 0
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr.1 = getelementptr i8, ptr %base, i8 1
- %res = add i8 %a, %b
- store i8 %res, ptr %base
- ret ptr %addr.1
-}
-
-define ptr @sbib(ptr %base, i8 %a, i8 %b) {
-; RV64XTHEADMEMIDX-LABEL: sbib:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: addw a1, a1, a2
-; RV64XTHEADMEMIDX-NEXT: th.sbib a1, (a0), 1, 0
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr.1 = getelementptr i8, ptr %base, i8 1
- %res = add i8 %a, %b
- store i8 %res, ptr %addr.1
- ret ptr %addr.1
-}
-
-define ptr @shia(ptr %base, i16 %a, i16 %b) {
-; RV64XTHEADMEMIDX-LABEL: shia:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: addw a1, a1, a2
-; RV64XTHEADMEMIDX-NEXT: th.shia a1, (a0), -9, 1
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr.1 = getelementptr i16, ptr %base, i16 -9
- %res = add i16 %a, %b
- store i16 %res, ptr %base
- ret ptr %addr.1
-}
-
-define ptr @shib(ptr %base, i16 %a, i16 %b) {
-; RV64XTHEADMEMIDX-LABEL: shib:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: addw a1, a1, a2
-; RV64XTHEADMEMIDX-NEXT: th.shib a1, (a0), 2, 0
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr.1 = getelementptr i16, ptr %base, i16 1
- %res = add i16 %a, %b
- store i16 %res, ptr %addr.1
- ret ptr %addr.1
-}
-
-define ptr @swia(ptr %base, i32 %a, i32 %b) {
-; RV64XTHEADMEMIDX-LABEL: swia:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: addw a1, a1, a2
-; RV64XTHEADMEMIDX-NEXT: th.swia a1, (a0), 8, 2
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr.1 = getelementptr i32, ptr %base, i32 8
- %res = add i32 %a, %b
- store i32 %res, ptr %base
- ret ptr %addr.1
-}
-
-define ptr @swib(ptr %base, i32 %a, i32 %b) {
-; RV64XTHEADMEMIDX-LABEL: swib:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: addw a1, a1, a2
-; RV64XTHEADMEMIDX-NEXT: th.swib a1, (a0), -13, 3
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr.1 = getelementptr i32, ptr %base, i32 -26
- %res = add i32 %a, %b
- store i32 %res, ptr %addr.1
- ret ptr %addr.1
-}
-
-define ptr @sdia(ptr %base, i64 %a, i64 %b) {
-; RV64XTHEADMEMIDX-LABEL: sdia:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2
-; RV64XTHEADMEMIDX-NEXT: th.sdia a1, (a0), 8, 3
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr.1 = getelementptr i64, ptr %base, i64 8
- %res = add i64 %a, %b
- store i64 %res, ptr %base
- ret ptr %addr.1
-}
-
-define ptr @sdib(ptr %base, i64 %a, i64 %b) {
-; RV64XTHEADMEMIDX-LABEL: sdib:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2
-; RV64XTHEADMEMIDX-NEXT: th.sdib a1, (a0), 8, 0
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr.1 = getelementptr i64, ptr %base, i64 1
- %res = add i64 %a, %b
- store i64 %res, ptr %addr.1
- ret ptr %addr.1
-}
-
-define i8 @lrb_anyext(ptr %a, i64 %b) {
-; RV64XTHEADMEMIDX-LABEL: lrb_anyext:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lrb a0, a0, a1, 0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i8, ptr %a, i64 %b
- %2 = load i8, ptr %1, align 1
- ret i8 %2
-}
-
-define i64 @lrb(ptr %a, i64 %b) {
-; RV64XTHEADMEMIDX-LABEL: lrb:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lrb a0, a0, a1, 0
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i8, ptr %a, i64 %b
- %2 = load i8, ptr %1, align 1
- %3 = sext i8 %2 to i64
- %4 = add i64 %3, %3
- ret i64 %4
-}
-
-define i8 @lurb_anyext(ptr %a, i32 %b) {
-; RV64XTHEADMEMIDX-LABEL: lurb_anyext:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lurb a0, a0, a1, 0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = zext i32 %b to i64
- %2 = getelementptr i8, ptr %a, i64 %1
- %3 = load i8, ptr %2, align 1
- ret i8 %3
-}
-
-define i64 @lurb(ptr %a, i32 %b) {
-; RV64XTHEADMEMIDX-LABEL: lurb:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lurb a0, a0, a1, 0
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = zext i32 %b to i64
- %2 = getelementptr i8, ptr %a, i64 %1
- %3 = load i8, ptr %2, align 1
- %4 = sext i8 %3 to i64
- %5 = add i64 %4, %4
- ret i64 %5
-}
-
-define i64 @lrbu(ptr %a, i64 %b) {
-; RV64XTHEADMEMIDX-LABEL: lrbu:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lrbu a0, a0, a1, 0
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i8, ptr %a, i64 %b
- %2 = load i8, ptr %1, align 1
- %3 = zext i8 %2 to i64
- %4 = add i64 %3, %3
- ret i64 %4
-}
-
-define i64 @lurbu(ptr %a, i32 %b) {
-; RV64XTHEADMEMIDX-LABEL: lurbu:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lurbu a0, a0, a1, 0
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = zext i32 %b to i64
- %2 = getelementptr i8, ptr %a, i64 %1
- %3 = load i8, ptr %2, align 1
- %4 = zext i8 %3 to i64
- %5 = add i64 %4, %4
- ret i64 %5
-}
-
-define i16 @lrh_anyext(ptr %a, i64 %b) {
-; RV64XTHEADMEMIDX-LABEL: lrh_anyext:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lrh a0, a0, a1, 1
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i16, ptr %a, i64 %b
- %2 = load i16, ptr %1, align 2
- ret i16 %2
-}
-
-define i64 @lrh(ptr %a, i64 %b) {
-; RV64XTHEADMEMIDX-LABEL: lrh:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lrh a0, a0, a1, 1
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i16, ptr %a, i64 %b
- %2 = load i16, ptr %1, align 2
- %3 = sext i16 %2 to i64
- %4 = add i64 %3, %3
- ret i64 %4
-}
-
-define i16 @lurh_anyext(ptr %a, i32 %b) {
-; RV64XTHEADMEMIDX-LABEL: lurh_anyext:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lurh a0, a0, a1, 1
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = zext i32 %b to i64
- %2 = getelementptr i16, ptr %a, i64 %1
- %3 = load i16, ptr %2, align 2
- ret i16 %3
-}
-
-define i64 @lurh(ptr %a, i32 %b) {
-; RV64XTHEADMEMIDX-LABEL: lurh:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lurh a0, a0, a1, 1
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = zext i32 %b to i64
- %2 = getelementptr i16, ptr %a, i64 %1
- %3 = load i16, ptr %2, align 2
- %4 = sext i16 %3 to i64
- %5 = add i64 %4, %4
- ret i64 %5
-}
-
-define i64 @lrhu(ptr %a, i64 %b) {
-; RV64XTHEADMEMIDX-LABEL: lrhu:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lrhu a0, a0, a1, 1
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i16, ptr %a, i64 %b
- %2 = load i16, ptr %1, align 2
- %3 = zext i16 %2 to i64
- %4 = add i64 %3, %3
- ret i64 %4
-}
-
-define i64 @lurhu(ptr %a, i32 %b) {
-; RV64XTHEADMEMIDX-LABEL: lurhu:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lurhu a0, a0, a1, 1
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = zext i32 %b to i64
- %2 = getelementptr i16, ptr %a, i64 %1
- %3 = load i16, ptr %2, align 2
- %4 = zext i16 %3 to i64
- %5 = add i64 %4, %4
- ret i64 %5
-}
-
-define i32 @lrw_anyext(ptr %a, i64 %b) {
-; RV64XTHEADMEMIDX-LABEL: lrw_anyext:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lrw a0, a0, a1, 2
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i32, ptr %a, i64 %b
- %2 = load i32, ptr %1, align 4
- ret i32 %2
-}
-
-define i64 @lrw(ptr %a, i64 %b) {
-; RV64XTHEADMEMIDX-LABEL: lrw:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lrw a0, a0, a1, 2
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i32, ptr %a, i64 %b
- %2 = load i32, ptr %1, align 4
- %3 = sext i32 %2 to i64
- %4 = add i64 %3, %3
- ret i64 %4
-}
-
-define i32 @lurw_anyext(ptr %a, i32 %b) {
-; RV64XTHEADMEMIDX-LABEL: lurw_anyext:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lurw a0, a0, a1, 2
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = zext i32 %b to i64
- %2 = getelementptr i32, ptr %a, i64 %1
- %3 = load i32, ptr %2, align 4
- ret i32 %3
-}
-
-define i64 @lurw(ptr %a, i32 %b) {
-; RV64XTHEADMEMIDX-LABEL: lurw:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lurw a0, a0, a1, 2
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = zext i32 %b to i64
- %2 = getelementptr i32, ptr %a, i64 %1
- %3 = load i32, ptr %2, align 4
- %4 = sext i32 %3 to i64
- %5 = add i64 %4, %4
- ret i64 %5
-}
-
-define i64 @lrwu(ptr %a, i64 %b) {
-; RV64XTHEADMEMIDX-LABEL: lrwu:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lrwu a0, a0, a1, 2
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i32, ptr %a, i64 %b
- %2 = load i32, ptr %1, align 4
- %3 = zext i32 %2 to i64
- %4 = add i64 %3, %3
- ret i64 %4
-}
-
-define i64 @lurwu(ptr %a, i32 %b) {
-; RV64XTHEADMEMIDX-LABEL: lurwu:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lurwu a0, a0, a1, 2
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = zext i32 %b to i64
- %2 = getelementptr i32, ptr %a, i64 %1
- %3 = load i32, ptr %2, align 4
- %4 = zext i32 %3 to i64
- %5 = add i64 %4, %4
- ret i64 %5
-}
-
-define i64 @lrd(ptr %a, i64 %b) {
-; RV64XTHEADMEMIDX-LABEL: lrd:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lrd a0, a0, a1, 3
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i64, ptr %a, i64 %b
- %2 = load i64, ptr %1, align 8
- %3 = add i64 %2, %2
- ret i64 %3
-}
-
-define i64 @lrd_2(ptr %a, i64 %b) {
-; RV64XTHEADMEMIDX-LABEL: lrd_2:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: addi a0, a0, 96
-; RV64XTHEADMEMIDX-NEXT: th.lrd a0, a0, a1, 3
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = add i64 %b, 12
- %2 = getelementptr i64, ptr %a, i64 %1
- %3 = load i64, ptr %2, align 8
- %4 = add i64 %3, %3
- ret i64 %4
-}
-
-define i64 @lurd(ptr %a, i32 %b) {
-; RV64XTHEADMEMIDX-LABEL: lurd:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lurd a0, a0, a1, 3
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = zext i32 %b to i64
- %2 = getelementptr i64, ptr %a, i64 %1
- %3 = load i64, ptr %2, align 8
- %4 = add i64 %3, %3
- ret i64 %4
-}
-
-define void @srb(ptr %a, i64 %b, i8 %c) {
-; RV64XTHEADMEMIDX-LABEL: srb:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: addw a2, a2, a2
-; RV64XTHEADMEMIDX-NEXT: th.srb a2, a0, a1, 0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = add i8 %c, %c
- %2 = getelementptr i8, ptr %a, i64 %b
- store i8 %1, ptr %2, align 1
- ret void
-}
-
-define void @surb(ptr %a, i32 %b, i8 %c) {
-; RV64XTHEADMEMIDX-LABEL: surb:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: addw a2, a2, a2
-; RV64XTHEADMEMIDX-NEXT: th.surb a2, a0, a1, 0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = zext i32 %b to i64
- %2 = add i8 %c, %c
- %3 = getelementptr i8, ptr %a, i64 %1
- store i8 %2, ptr %3, align 1
- ret void
-}
-
-define void @srh(ptr %a, i64 %b, i16 %c) {
-; RV64XTHEADMEMIDX-LABEL: srh:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: addw a2, a2, a2
-; RV64XTHEADMEMIDX-NEXT: th.srh a2, a0, a1, 1
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = add i16 %c, %c
- %2 = getelementptr i16, ptr %a, i64 %b
- store i16 %1, ptr %2, align 2
- ret void
-}
-
-define void @surh(ptr %a, i32 %b, i16 %c) {
-; RV64XTHEADMEMIDX-LABEL: surh:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: addw a2, a2, a2
-; RV64XTHEADMEMIDX-NEXT: th.surh a2, a0, a1, 1
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = zext i32 %b to i64
- %2 = add i16 %c, %c
- %3 = getelementptr i16, ptr %a, i64 %1
- store i16 %2, ptr %3, align 2
- ret void
-}
-
-define void @srw(ptr %a, i64 %b, i32 %c) {
-; RV64XTHEADMEMIDX-LABEL: srw:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: addw a2, a2, a2
-; RV64XTHEADMEMIDX-NEXT: th.srw a2, a0, a1, 2
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = add i32 %c, %c
- %2 = getelementptr i32, ptr %a, i64 %b
- store i32 %1, ptr %2, align 4
- ret void
-}
-
-define void @surw(ptr %a, i32 %b, i32 %c) {
-; RV64XTHEADMEMIDX-LABEL: surw:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: addw a2, a2, a2
-; RV64XTHEADMEMIDX-NEXT: th.surw a2, a0, a1, 2
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = zext i32 %b to i64
- %2 = add i32 %c, %c
- %3 = getelementptr i32, ptr %a, i64 %1
- store i32 %2, ptr %3, align 4
- ret void
-}
-
-define void @srd(ptr %a, i64 %b, i64 %c) {
-; RV64XTHEADMEMIDX-LABEL: srd:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: add a2, a2, a2
-; RV64XTHEADMEMIDX-NEXT: th.srd a2, a0, a1, 3
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = add i64 %c, %c
- %2 = getelementptr i64, ptr %a, i64 %b
- store i64 %1, ptr %2, align 8
- ret void
-}
-
-define void @surd(ptr %a, i32 %b, i64 %c) {
-; RV64XTHEADMEMIDX-LABEL: surd:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: add a2, a2, a2
-; RV64XTHEADMEMIDX-NEXT: th.surd a2, a0, a1, 3
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = zext i32 %b to i64
- %2 = add i64 %c, %c
- %3 = getelementptr i64, ptr %a, i64 %1
- store i64 %2, ptr %3, align 8
- ret void
-}
-
-define ptr @test_simm5(ptr %base, i32 %a, i32 %b) {
-; RV64XTHEADMEMIDX-LABEL: test_simm5:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: addw a1, a1, a2
-; RV64XTHEADMEMIDX-NEXT: th.swia a1, (a0), -12, 2
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr.1 = getelementptr i32, ptr %base, i32 -12
- %res = add i32 %a, %b
- store i32 %res, ptr %base
- ret ptr %addr.1
-}
-
-define i64 @lrd_large_shift(ptr %a, i64 %b) {
-; RV64XTHEADMEMIDX-LABEL: lrd_large_shift:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: slli a1, a1, 5
-; RV64XTHEADMEMIDX-NEXT: add a0, a1, a0
-; RV64XTHEADMEMIDX-NEXT: ld a0, 384(a0)
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = add i64 %b, 12
- %2 = shl i64 %1, 2
- %3 = getelementptr i64, ptr %a, i64 %2
- %4 = load i64, ptr %3, align 8
- ret i64 %4
-}
-
-define i64 @lrd_large_offset(ptr %a, i64 %b) {
-; RV64XTHEADMEMIDX-LABEL: lrd_large_offset:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: slli a1, a1, 3
-; RV64XTHEADMEMIDX-NEXT: add a0, a1, a0
-; RV64XTHEADMEMIDX-NEXT: lui a1, 23
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a1
-; RV64XTHEADMEMIDX-NEXT: ld a0, 1792(a0)
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = add i64 %b, 12000
- %2 = getelementptr i64, ptr %a, i64 %1
- %3 = load i64, ptr %2, align 8
- ret i64 %3
-}
diff --git a/llvm/test/CodeGen/RISCV/shl-cttz.ll b/llvm/test/CodeGen/RISCV/shl-cttz.ll
index 0eeb8b04..1bffa42 100644
--- a/llvm/test/CodeGen/RISCV/shl-cttz.ll
+++ b/llvm/test/CodeGen/RISCV/shl-cttz.ll
@@ -4,13 +4,9 @@
; RUN: llc -mtriple=riscv32 -mattr=+m,+zbb < %s \
; RUN: | FileCheck %s -check-prefix=RV32ZBB
; RUN: llc -mtriple=riscv64 -mattr=+m < %s \
-; RUN: | FileCheck %s -check-prefixes=RV64I,RV64IILLEGALI32
+; RUN: | FileCheck %s -check-prefixes=RV64I
; RUN: llc -mtriple=riscv64 -mattr=+m,+zbb < %s \
-; RUN: | FileCheck %s -check-prefixes=RV64ZBB,RV64ZBBILLEGALI32
-; RUN: llc -mtriple=riscv64 -mattr=+m -riscv-experimental-rv64-legal-i32 < %s \
-; RUN: | FileCheck %s -check-prefixes=RV64I,RV64ILEGALI32
-; RUN: llc -mtriple=riscv64 -mattr=+m,+zbb -riscv-experimental-rv64-legal-i32 < %s \
-; RUN: | FileCheck %s -check-prefixes=RV64ZBB,RV64ZBBLEGALI32
+; RUN: | FileCheck %s -check-prefixes=RV64ZBB
define i8 @shl_cttz_i8(i8 %x, i8 %y) {
; RV32I-LABEL: shl_cttz_i8:
@@ -37,53 +33,29 @@ define i8 @shl_cttz_i8(i8 %x, i8 %y) {
; RV32ZBB-NEXT: sll a0, a0, a1
; RV32ZBB-NEXT: ret
;
-; RV64IILLEGALI32-LABEL: shl_cttz_i8:
-; RV64IILLEGALI32: # %bb.0: # %entry
-; RV64IILLEGALI32-NEXT: addi a2, a1, -1
-; RV64IILLEGALI32-NEXT: not a1, a1
-; RV64IILLEGALI32-NEXT: and a1, a1, a2
-; RV64IILLEGALI32-NEXT: srli a2, a1, 1
-; RV64IILLEGALI32-NEXT: andi a2, a2, 85
-; RV64IILLEGALI32-NEXT: subw a1, a1, a2
-; RV64IILLEGALI32-NEXT: andi a2, a1, 51
-; RV64IILLEGALI32-NEXT: srli a1, a1, 2
-; RV64IILLEGALI32-NEXT: andi a1, a1, 51
-; RV64IILLEGALI32-NEXT: add a1, a2, a1
-; RV64IILLEGALI32-NEXT: srli a2, a1, 4
-; RV64IILLEGALI32-NEXT: add a1, a1, a2
-; RV64IILLEGALI32-NEXT: andi a1, a1, 15
-; RV64IILLEGALI32-NEXT: sll a0, a0, a1
-; RV64IILLEGALI32-NEXT: ret
-;
-; RV64ZBBILLEGALI32-LABEL: shl_cttz_i8:
-; RV64ZBBILLEGALI32: # %bb.0: # %entry
-; RV64ZBBILLEGALI32-NEXT: ctz a1, a1
-; RV64ZBBILLEGALI32-NEXT: sll a0, a0, a1
-; RV64ZBBILLEGALI32-NEXT: ret
-;
-; RV64ILEGALI32-LABEL: shl_cttz_i8:
-; RV64ILEGALI32: # %bb.0: # %entry
-; RV64ILEGALI32-NEXT: addi a2, a1, -1
-; RV64ILEGALI32-NEXT: not a1, a1
-; RV64ILEGALI32-NEXT: and a1, a1, a2
-; RV64ILEGALI32-NEXT: srliw a2, a1, 1
-; RV64ILEGALI32-NEXT: andi a2, a2, 85
-; RV64ILEGALI32-NEXT: subw a1, a1, a2
-; RV64ILEGALI32-NEXT: andi a2, a1, 51
-; RV64ILEGALI32-NEXT: srliw a1, a1, 2
-; RV64ILEGALI32-NEXT: andi a1, a1, 51
-; RV64ILEGALI32-NEXT: add a1, a2, a1
-; RV64ILEGALI32-NEXT: srliw a2, a1, 4
-; RV64ILEGALI32-NEXT: add a1, a1, a2
-; RV64ILEGALI32-NEXT: andi a1, a1, 15
-; RV64ILEGALI32-NEXT: sllw a0, a0, a1
-; RV64ILEGALI32-NEXT: ret
+; RV64I-LABEL: shl_cttz_i8:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: addi a2, a1, -1
+; RV64I-NEXT: not a1, a1
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: srli a2, a1, 1
+; RV64I-NEXT: andi a2, a2, 85
+; RV64I-NEXT: subw a1, a1, a2
+; RV64I-NEXT: andi a2, a1, 51
+; RV64I-NEXT: srli a1, a1, 2
+; RV64I-NEXT: andi a1, a1, 51
+; RV64I-NEXT: add a1, a2, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: andi a1, a1, 15
+; RV64I-NEXT: sll a0, a0, a1
+; RV64I-NEXT: ret
;
-; RV64ZBBLEGALI32-LABEL: shl_cttz_i8:
-; RV64ZBBLEGALI32: # %bb.0: # %entry
-; RV64ZBBLEGALI32-NEXT: ctzw a1, a1
-; RV64ZBBLEGALI32-NEXT: sllw a0, a0, a1
-; RV64ZBBLEGALI32-NEXT: ret
+; RV64ZBB-LABEL: shl_cttz_i8:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: ctz a1, a1
+; RV64ZBB-NEXT: sll a0, a0, a1
+; RV64ZBB-NEXT: ret
entry:
%cttz = call i8 @llvm.cttz.i8(i8 %y, i1 true)
%res = shl i8 %x, %cttz
@@ -117,57 +89,31 @@ define i8 @shl_cttz_constant_i8(i8 %y) {
; RV32ZBB-NEXT: sll a0, a1, a0
; RV32ZBB-NEXT: ret
;
-; RV64IILLEGALI32-LABEL: shl_cttz_constant_i8:
-; RV64IILLEGALI32: # %bb.0: # %entry
-; RV64IILLEGALI32-NEXT: addi a1, a0, -1
-; RV64IILLEGALI32-NEXT: not a0, a0
-; RV64IILLEGALI32-NEXT: and a0, a0, a1
-; RV64IILLEGALI32-NEXT: srli a1, a0, 1
-; RV64IILLEGALI32-NEXT: andi a1, a1, 85
-; RV64IILLEGALI32-NEXT: subw a0, a0, a1
-; RV64IILLEGALI32-NEXT: andi a1, a0, 51
-; RV64IILLEGALI32-NEXT: srli a0, a0, 2
-; RV64IILLEGALI32-NEXT: andi a0, a0, 51
-; RV64IILLEGALI32-NEXT: add a0, a1, a0
-; RV64IILLEGALI32-NEXT: srli a1, a0, 4
-; RV64IILLEGALI32-NEXT: add a0, a0, a1
-; RV64IILLEGALI32-NEXT: andi a0, a0, 15
-; RV64IILLEGALI32-NEXT: li a1, 4
-; RV64IILLEGALI32-NEXT: sll a0, a1, a0
-; RV64IILLEGALI32-NEXT: ret
-;
-; RV64ZBBILLEGALI32-LABEL: shl_cttz_constant_i8:
-; RV64ZBBILLEGALI32: # %bb.0: # %entry
-; RV64ZBBILLEGALI32-NEXT: ctz a0, a0
-; RV64ZBBILLEGALI32-NEXT: li a1, 4
-; RV64ZBBILLEGALI32-NEXT: sll a0, a1, a0
-; RV64ZBBILLEGALI32-NEXT: ret
-;
-; RV64ILEGALI32-LABEL: shl_cttz_constant_i8:
-; RV64ILEGALI32: # %bb.0: # %entry
-; RV64ILEGALI32-NEXT: addi a1, a0, -1
-; RV64ILEGALI32-NEXT: not a0, a0
-; RV64ILEGALI32-NEXT: and a0, a0, a1
-; RV64ILEGALI32-NEXT: srliw a1, a0, 1
-; RV64ILEGALI32-NEXT: andi a1, a1, 85
-; RV64ILEGALI32-NEXT: subw a0, a0, a1
-; RV64ILEGALI32-NEXT: andi a1, a0, 51
-; RV64ILEGALI32-NEXT: srliw a0, a0, 2
-; RV64ILEGALI32-NEXT: andi a0, a0, 51
-; RV64ILEGALI32-NEXT: add a0, a1, a0
-; RV64ILEGALI32-NEXT: srliw a1, a0, 4
-; RV64ILEGALI32-NEXT: add a0, a0, a1
-; RV64ILEGALI32-NEXT: andi a0, a0, 15
-; RV64ILEGALI32-NEXT: li a1, 4
-; RV64ILEGALI32-NEXT: sllw a0, a1, a0
-; RV64ILEGALI32-NEXT: ret
+; RV64I-LABEL: shl_cttz_constant_i8:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: addi a1, a0, -1
+; RV64I-NEXT: not a0, a0
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: srli a1, a0, 1
+; RV64I-NEXT: andi a1, a1, 85
+; RV64I-NEXT: subw a0, a0, a1
+; RV64I-NEXT: andi a1, a0, 51
+; RV64I-NEXT: srli a0, a0, 2
+; RV64I-NEXT: andi a0, a0, 51
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: srli a1, a0, 4
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: andi a0, a0, 15
+; RV64I-NEXT: li a1, 4
+; RV64I-NEXT: sll a0, a1, a0
+; RV64I-NEXT: ret
;
-; RV64ZBBLEGALI32-LABEL: shl_cttz_constant_i8:
-; RV64ZBBLEGALI32: # %bb.0: # %entry
-; RV64ZBBLEGALI32-NEXT: ctzw a0, a0
-; RV64ZBBLEGALI32-NEXT: li a1, 4
-; RV64ZBBLEGALI32-NEXT: sllw a0, a1, a0
-; RV64ZBBLEGALI32-NEXT: ret
+; RV64ZBB-LABEL: shl_cttz_constant_i8:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: ctz a0, a0
+; RV64ZBB-NEXT: li a1, 4
+; RV64ZBB-NEXT: sll a0, a1, a0
+; RV64ZBB-NEXT: ret
entry:
%cttz = call i8 @llvm.cttz.i8(i8 %y, i1 true)
%res = shl i8 4, %cttz
@@ -206,67 +152,36 @@ define i16 @shl_cttz_i16(i16 %x, i16 %y) {
; RV32ZBB-NEXT: sll a0, a0, a1
; RV32ZBB-NEXT: ret
;
-; RV64IILLEGALI32-LABEL: shl_cttz_i16:
-; RV64IILLEGALI32: # %bb.0: # %entry
-; RV64IILLEGALI32-NEXT: addi a2, a1, -1
-; RV64IILLEGALI32-NEXT: not a1, a1
-; RV64IILLEGALI32-NEXT: and a1, a1, a2
-; RV64IILLEGALI32-NEXT: srli a2, a1, 1
-; RV64IILLEGALI32-NEXT: lui a3, 5
-; RV64IILLEGALI32-NEXT: addiw a3, a3, 1365
-; RV64IILLEGALI32-NEXT: and a2, a2, a3
-; RV64IILLEGALI32-NEXT: sub a1, a1, a2
-; RV64IILLEGALI32-NEXT: lui a2, 3
-; RV64IILLEGALI32-NEXT: addiw a2, a2, 819
-; RV64IILLEGALI32-NEXT: and a3, a1, a2
-; RV64IILLEGALI32-NEXT: srli a1, a1, 2
-; RV64IILLEGALI32-NEXT: and a1, a1, a2
-; RV64IILLEGALI32-NEXT: add a1, a3, a1
-; RV64IILLEGALI32-NEXT: srli a2, a1, 4
-; RV64IILLEGALI32-NEXT: add a1, a1, a2
-; RV64IILLEGALI32-NEXT: andi a2, a1, 15
-; RV64IILLEGALI32-NEXT: slli a1, a1, 52
-; RV64IILLEGALI32-NEXT: srli a1, a1, 60
-; RV64IILLEGALI32-NEXT: add a1, a2, a1
-; RV64IILLEGALI32-NEXT: sll a0, a0, a1
-; RV64IILLEGALI32-NEXT: ret
-;
-; RV64ZBBILLEGALI32-LABEL: shl_cttz_i16:
-; RV64ZBBILLEGALI32: # %bb.0: # %entry
-; RV64ZBBILLEGALI32-NEXT: ctz a1, a1
-; RV64ZBBILLEGALI32-NEXT: sll a0, a0, a1
-; RV64ZBBILLEGALI32-NEXT: ret
-;
-; RV64ILEGALI32-LABEL: shl_cttz_i16:
-; RV64ILEGALI32: # %bb.0: # %entry
-; RV64ILEGALI32-NEXT: addi a2, a1, -1
-; RV64ILEGALI32-NEXT: not a1, a1
-; RV64ILEGALI32-NEXT: and a1, a1, a2
-; RV64ILEGALI32-NEXT: srliw a2, a1, 1
-; RV64ILEGALI32-NEXT: lui a3, 5
-; RV64ILEGALI32-NEXT: addi a3, a3, 1365
-; RV64ILEGALI32-NEXT: and a2, a2, a3
-; RV64ILEGALI32-NEXT: subw a1, a1, a2
-; RV64ILEGALI32-NEXT: lui a2, 3
-; RV64ILEGALI32-NEXT: addi a2, a2, 819
-; RV64ILEGALI32-NEXT: and a3, a1, a2
-; RV64ILEGALI32-NEXT: srliw a1, a1, 2
-; RV64ILEGALI32-NEXT: and a1, a1, a2
-; RV64ILEGALI32-NEXT: add a1, a3, a1
-; RV64ILEGALI32-NEXT: srliw a2, a1, 4
-; RV64ILEGALI32-NEXT: add a1, a1, a2
-; RV64ILEGALI32-NEXT: andi a2, a1, 15
-; RV64ILEGALI32-NEXT: slli a1, a1, 52
-; RV64ILEGALI32-NEXT: srli a1, a1, 60
-; RV64ILEGALI32-NEXT: add a1, a2, a1
-; RV64ILEGALI32-NEXT: sllw a0, a0, a1
-; RV64ILEGALI32-NEXT: ret
+; RV64I-LABEL: shl_cttz_i16:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: addi a2, a1, -1
+; RV64I-NEXT: not a1, a1
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: srli a2, a1, 1
+; RV64I-NEXT: lui a3, 5
+; RV64I-NEXT: addiw a3, a3, 1365
+; RV64I-NEXT: and a2, a2, a3
+; RV64I-NEXT: sub a1, a1, a2
+; RV64I-NEXT: lui a2, 3
+; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: and a3, a1, a2
+; RV64I-NEXT: srli a1, a1, 2
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: add a1, a3, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: andi a2, a1, 15
+; RV64I-NEXT: slli a1, a1, 52
+; RV64I-NEXT: srli a1, a1, 60
+; RV64I-NEXT: add a1, a2, a1
+; RV64I-NEXT: sll a0, a0, a1
+; RV64I-NEXT: ret
;
-; RV64ZBBLEGALI32-LABEL: shl_cttz_i16:
-; RV64ZBBLEGALI32: # %bb.0: # %entry
-; RV64ZBBLEGALI32-NEXT: ctzw a1, a1
-; RV64ZBBLEGALI32-NEXT: sllw a0, a0, a1
-; RV64ZBBLEGALI32-NEXT: ret
+; RV64ZBB-LABEL: shl_cttz_i16:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: ctz a1, a1
+; RV64ZBB-NEXT: sll a0, a0, a1
+; RV64ZBB-NEXT: ret
entry:
%cttz = call i16 @llvm.cttz.i16(i16 %y, i1 true)
%res = shl i16 %x, %cttz
@@ -307,71 +222,38 @@ define i16 @shl_cttz_constant_i16(i16 %y) {
; RV32ZBB-NEXT: sll a0, a1, a0
; RV32ZBB-NEXT: ret
;
-; RV64IILLEGALI32-LABEL: shl_cttz_constant_i16:
-; RV64IILLEGALI32: # %bb.0: # %entry
-; RV64IILLEGALI32-NEXT: addi a1, a0, -1
-; RV64IILLEGALI32-NEXT: not a0, a0
-; RV64IILLEGALI32-NEXT: and a0, a0, a1
-; RV64IILLEGALI32-NEXT: srli a1, a0, 1
-; RV64IILLEGALI32-NEXT: lui a2, 5
-; RV64IILLEGALI32-NEXT: addiw a2, a2, 1365
-; RV64IILLEGALI32-NEXT: and a1, a1, a2
-; RV64IILLEGALI32-NEXT: sub a0, a0, a1
-; RV64IILLEGALI32-NEXT: lui a1, 3
-; RV64IILLEGALI32-NEXT: addiw a1, a1, 819
-; RV64IILLEGALI32-NEXT: and a2, a0, a1
-; RV64IILLEGALI32-NEXT: srli a0, a0, 2
-; RV64IILLEGALI32-NEXT: and a0, a0, a1
-; RV64IILLEGALI32-NEXT: add a0, a2, a0
-; RV64IILLEGALI32-NEXT: srli a1, a0, 4
-; RV64IILLEGALI32-NEXT: add a0, a0, a1
-; RV64IILLEGALI32-NEXT: andi a1, a0, 15
-; RV64IILLEGALI32-NEXT: slli a0, a0, 52
-; RV64IILLEGALI32-NEXT: srli a0, a0, 60
-; RV64IILLEGALI32-NEXT: add a0, a1, a0
-; RV64IILLEGALI32-NEXT: li a1, 4
-; RV64IILLEGALI32-NEXT: sll a0, a1, a0
-; RV64IILLEGALI32-NEXT: ret
-;
-; RV64ZBBILLEGALI32-LABEL: shl_cttz_constant_i16:
-; RV64ZBBILLEGALI32: # %bb.0: # %entry
-; RV64ZBBILLEGALI32-NEXT: ctz a0, a0
-; RV64ZBBILLEGALI32-NEXT: li a1, 4
-; RV64ZBBILLEGALI32-NEXT: sll a0, a1, a0
-; RV64ZBBILLEGALI32-NEXT: ret
-;
-; RV64ILEGALI32-LABEL: shl_cttz_constant_i16:
-; RV64ILEGALI32: # %bb.0: # %entry
-; RV64ILEGALI32-NEXT: addi a1, a0, -1
-; RV64ILEGALI32-NEXT: not a0, a0
-; RV64ILEGALI32-NEXT: and a0, a0, a1
-; RV64ILEGALI32-NEXT: srliw a1, a0, 1
-; RV64ILEGALI32-NEXT: lui a2, 5
-; RV64ILEGALI32-NEXT: addi a2, a2, 1365
-; RV64ILEGALI32-NEXT: and a1, a1, a2
-; RV64ILEGALI32-NEXT: subw a0, a0, a1
-; RV64ILEGALI32-NEXT: lui a1, 3
-; RV64ILEGALI32-NEXT: addi a1, a1, 819
-; RV64ILEGALI32-NEXT: and a2, a0, a1
-; RV64ILEGALI32-NEXT: srliw a0, a0, 2
-; RV64ILEGALI32-NEXT: and a0, a0, a1
-; RV64ILEGALI32-NEXT: add a0, a2, a0
-; RV64ILEGALI32-NEXT: srliw a1, a0, 4
-; RV64ILEGALI32-NEXT: add a0, a0, a1
-; RV64ILEGALI32-NEXT: andi a1, a0, 15
-; RV64ILEGALI32-NEXT: slli a0, a0, 52
-; RV64ILEGALI32-NEXT: srli a0, a0, 60
-; RV64ILEGALI32-NEXT: add a0, a1, a0
-; RV64ILEGALI32-NEXT: li a1, 4
-; RV64ILEGALI32-NEXT: sllw a0, a1, a0
-; RV64ILEGALI32-NEXT: ret
+; RV64I-LABEL: shl_cttz_constant_i16:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: addi a1, a0, -1
+; RV64I-NEXT: not a0, a0
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: srli a1, a0, 1
+; RV64I-NEXT: lui a2, 5
+; RV64I-NEXT: addiw a2, a2, 1365
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: sub a0, a0, a1
+; RV64I-NEXT: lui a1, 3
+; RV64I-NEXT: addiw a1, a1, 819
+; RV64I-NEXT: and a2, a0, a1
+; RV64I-NEXT: srli a0, a0, 2
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: add a0, a2, a0
+; RV64I-NEXT: srli a1, a0, 4
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: andi a1, a0, 15
+; RV64I-NEXT: slli a0, a0, 52
+; RV64I-NEXT: srli a0, a0, 60
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: li a1, 4
+; RV64I-NEXT: sll a0, a1, a0
+; RV64I-NEXT: ret
;
-; RV64ZBBLEGALI32-LABEL: shl_cttz_constant_i16:
-; RV64ZBBLEGALI32: # %bb.0: # %entry
-; RV64ZBBLEGALI32-NEXT: ctzw a0, a0
-; RV64ZBBLEGALI32-NEXT: li a1, 4
-; RV64ZBBLEGALI32-NEXT: sllw a0, a1, a0
-; RV64ZBBLEGALI32-NEXT: ret
+; RV64ZBB-LABEL: shl_cttz_constant_i16:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: ctz a0, a0
+; RV64ZBB-NEXT: li a1, 4
+; RV64ZBB-NEXT: sll a0, a1, a0
+; RV64ZBB-NEXT: ret
entry:
%cttz = call i16 @llvm.cttz.i16(i16 %y, i1 true)
%res = shl i16 4, %cttz