aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/RISCV
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/RISCV')
-rw-r--r--llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp148
-rw-r--r--llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp24
-rw-r--r--llvm/lib/Target/RISCV/RISCVFeatures.td2
-rw-r--r--llvm/lib/Target/RISCV/RISCVGISel.td108
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp5
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.td4
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoA.td74
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZa.td52
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td16
9 files changed, 239 insertions, 194 deletions
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 71c21e4..53633ea 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -675,6 +675,45 @@ static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC,
CC = getRISCVCCFromICmp(Pred);
}
+/// Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation
+/// \p GenericOpc, appropriate for the GPR register bank and of memory access
+/// size \p OpSize.
+static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
+ const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
+ switch (OpSize) {
+ default:
+ llvm_unreachable("Unexpected memory size");
+ case 8:
+ return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;
+ case 16:
+ return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;
+ case 32:
+ return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;
+ case 64:
+ return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;
+ }
+}
+
+/// Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation
+/// \p GenericOpc, appropriate for the GPR register bank and of memory access
+/// size \p OpSize. \returns \p GenericOpc if the combination is unsupported.
+static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
+ const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
+ switch (OpSize) {
+ case 8:
+ // Prefer unsigned due to no c.lb in Zcb.
+ return IsStore ? RISCV::SB : RISCV::LBU;
+ case 16:
+ return IsStore ? RISCV::SH : RISCV::LH;
+ case 32:
+ return IsStore ? RISCV::SW : RISCV::LW;
+ case 64:
+ return IsStore ? RISCV::SD : RISCV::LD;
+ }
+
+ return GenericOpc;
+}
+
bool RISCVInstructionSelector::select(MachineInstr &MI) {
MachineIRBuilder MIB(MI);
@@ -736,6 +775,62 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
MI.eraseFromParent();
return true;
}
+ case TargetOpcode::G_ZEXT:
+ case TargetOpcode::G_SEXT: {
+ bool IsSigned = Opc != TargetOpcode::G_ZEXT;
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = MI.getOperand(1).getReg();
+ LLT SrcTy = MRI->getType(SrcReg);
+ unsigned SrcSize = SrcTy.getSizeInBits();
+
+ if (SrcTy.isVector())
+ return false; // Should be handled by imported patterns.
+
+ assert((*RBI.getRegBank(DstReg, *MRI, TRI)).getID() ==
+ RISCV::GPRBRegBankID &&
+ "Unexpected ext regbank");
+
+ // Use addiw SrcReg, 0 (sext.w) for i32.
+ if (IsSigned && SrcSize == 32) {
+ MI.setDesc(TII.get(RISCV::ADDIW));
+ MI.addOperand(MachineOperand::CreateImm(0));
+ return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
+ }
+
+ // Use add.uw SrcReg, X0 (zext.w) for i32 with Zba.
+ if (!IsSigned && SrcSize == 32 && STI.hasStdExtZba()) {
+ MI.setDesc(TII.get(RISCV::ADD_UW));
+ MI.addOperand(MachineOperand::CreateReg(RISCV::X0, /*isDef=*/false));
+ return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
+ }
+
+ // Use sext.h/zext.h for i16 with Zbb.
+ if (SrcSize == 16 && STI.hasStdExtZbb()) {
+ MI.setDesc(TII.get(IsSigned ? RISCV::SEXT_H
+ : STI.isRV64() ? RISCV::ZEXT_H_RV64
+ : RISCV::ZEXT_H_RV32));
+ return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
+ }
+
+ // Use pack(w) SrcReg, X0 for i16 zext with Zbkb.
+ if (!IsSigned && SrcSize == 16 && STI.hasStdExtZbkb()) {
+ MI.setDesc(TII.get(STI.is64Bit() ? RISCV::PACKW : RISCV::PACK));
+ MI.addOperand(MachineOperand::CreateReg(RISCV::X0, /*isDef=*/false));
+ return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
+ }
+
+ // Fall back to shift pair.
+ auto ShiftLeft =
+ MIB.buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {SrcReg})
+ .addImm(STI.getXLen() - SrcSize);
+ constrainSelectedInstRegOperands(*ShiftLeft, TII, TRI, RBI);
+ auto ShiftRight = MIB.buildInstr(IsSigned ? RISCV::SRAI : RISCV::SRLI,
+ {DstReg}, {ShiftLeft})
+ .addImm(STI.getXLen() - SrcSize);
+ constrainSelectedInstRegOperands(*ShiftRight, TII, TRI, RBI);
+ MI.eraseFromParent();
+ return true;
+ }
case TargetOpcode::G_FCONSTANT: {
// TODO: Use constant pool for complex constants.
Register DstReg = MI.getOperand(0).getReg();
@@ -836,6 +931,59 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
return selectImplicitDef(MI, MIB);
case TargetOpcode::G_UNMERGE_VALUES:
return selectUnmergeValues(MI, MIB);
+ case TargetOpcode::G_LOAD:
+ case TargetOpcode::G_STORE: {
+ GLoadStore &LdSt = cast<GLoadStore>(MI);
+ const Register ValReg = LdSt.getReg(0);
+ const Register PtrReg = LdSt.getPointerReg();
+ LLT PtrTy = MRI->getType(PtrReg);
+
+ const RegisterBank &RB = *RBI.getRegBank(ValReg, *MRI, TRI);
+ if (RB.getID() != RISCV::GPRBRegBankID)
+ return false;
+
+#ifndef NDEBUG
+ const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, *MRI, TRI);
+ // Check that the pointer register is valid.
+ assert(PtrRB.getID() == RISCV::GPRBRegBankID &&
+ "Load/Store pointer operand isn't a GPR");
+ assert(PtrTy.isPointer() && "Load/Store pointer operand isn't a pointer");
+#endif
+
+ // Can only handle AddressSpace 0.
+ if (PtrTy.getAddressSpace() != 0)
+ return false;
+
+ unsigned MemSize = LdSt.getMemSizeInBits().getValue();
+ AtomicOrdering Order = LdSt.getMMO().getSuccessOrdering();
+
+ if (isStrongerThanMonotonic(Order)) {
+ MI.setDesc(TII.get(selectZalasrLoadStoreOp(Opc, MemSize)));
+ return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
+ }
+
+ const unsigned NewOpc = selectRegImmLoadStoreOp(MI.getOpcode(), MemSize);
+ if (NewOpc == MI.getOpcode())
+ return false;
+
+ // Check if we can fold anything into the addressing mode.
+ auto AddrModeFns = selectAddrRegImm(MI.getOperand(1));
+ if (!AddrModeFns)
+ return false;
+
+ // Folded something. Create a new instruction and return it.
+ auto NewInst = MIB.buildInstr(NewOpc, {}, {}, MI.getFlags());
+ if (isa<GStore>(MI))
+ NewInst.addUse(ValReg);
+ else
+ NewInst.addDef(ValReg);
+ NewInst.cloneMemRefs(MI);
+ for (auto &Fn : *AddrModeFns)
+ Fn(NewInst);
+ MI.eraseFromParent();
+
+ return constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI);
+ }
default:
return false;
}
diff --git a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp
index a537904..5dd4bf4 100644
--- a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp
+++ b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp
@@ -166,7 +166,7 @@ static unsigned getLRForRMW32(AtomicOrdering Ordering,
return RISCV::LR_W;
return RISCV::LR_W_AQ;
case AtomicOrdering::SequentiallyConsistent:
- return RISCV::LR_W_AQ_RL;
+ return RISCV::LR_W_AQRL;
}
}
@@ -210,7 +210,7 @@ static unsigned getLRForRMW64(AtomicOrdering Ordering,
return RISCV::LR_D;
return RISCV::LR_D_AQ;
case AtomicOrdering::SequentiallyConsistent:
- return RISCV::LR_D_AQ_RL;
+ return RISCV::LR_D_AQRL;
}
}
@@ -287,8 +287,8 @@ static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,
break;
}
BuildMI(LoopMBB, DL, TII->get(getSCForRMW(Ordering, Width, STI)), ScratchReg)
- .addReg(AddrReg)
- .addReg(ScratchReg);
+ .addReg(ScratchReg)
+ .addReg(AddrReg);
BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
.addReg(ScratchReg)
.addReg(RISCV::X0)
@@ -375,8 +375,8 @@ static void doMaskedAtomicBinOpExpansion(const RISCVInstrInfo *TII,
ScratchReg);
BuildMI(LoopMBB, DL, TII->get(getSCForRMW32(Ordering, STI)), ScratchReg)
- .addReg(AddrReg)
- .addReg(ScratchReg);
+ .addReg(ScratchReg)
+ .addReg(AddrReg);
BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
.addReg(ScratchReg)
.addReg(RISCV::X0)
@@ -535,8 +535,8 @@ bool RISCVExpandAtomicPseudo::expandAtomicMinMaxOp(
// sc.w scratch1, scratch1, (addr)
// bnez scratch1, loop
BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering, STI)), Scratch1Reg)
- .addReg(AddrReg)
- .addReg(Scratch1Reg);
+ .addReg(Scratch1Reg)
+ .addReg(AddrReg);
BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
.addReg(Scratch1Reg)
.addReg(RISCV::X0)
@@ -674,8 +674,8 @@ bool RISCVExpandAtomicPseudo::expandAtomicCmpXchg(
// bnez scratch, loophead
BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width, STI)),
ScratchReg)
- .addReg(AddrReg)
- .addReg(NewValReg);
+ .addReg(NewValReg)
+ .addReg(AddrReg);
BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
.addReg(ScratchReg)
.addReg(RISCV::X0)
@@ -707,8 +707,8 @@ bool RISCVExpandAtomicPseudo::expandAtomicCmpXchg(
MaskReg, ScratchReg);
BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width, STI)),
ScratchReg)
- .addReg(AddrReg)
- .addReg(ScratchReg);
+ .addReg(ScratchReg)
+ .addReg(AddrReg);
BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
.addReg(ScratchReg)
.addReg(RISCV::X0)
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 27cf057..40c05e8 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -265,7 +265,7 @@ def HasStdExtZacas : Predicate<"Subtarget->hasStdExtZacas()">,
def NoStdExtZacas : Predicate<"!Subtarget->hasStdExtZacas()">;
def FeatureStdExtZalasr
- : RISCVExperimentalExtension<0, 1, "Load-Acquire and Store-Release Instructions">;
+ : RISCVExperimentalExtension<0, 9, "Load-Acquire and Store-Release Instructions">;
def HasStdExtZalasr : Predicate<"Subtarget->hasStdExtZalasr()">,
AssemblerPredicate<(all_of FeatureStdExtZalasr),
"'Zalasr' (Load-Acquire and Store-Release Instructions)">;
diff --git a/llvm/lib/Target/RISCV/RISCVGISel.td b/llvm/lib/Target/RISCV/RISCVGISel.td
index 6d01250..eba35ef 100644
--- a/llvm/lib/Target/RISCV/RISCVGISel.td
+++ b/llvm/lib/Target/RISCV/RISCVGISel.td
@@ -100,119 +100,11 @@ def : LdPat<load, LD, PtrVT>;
def : StPat<store, SD, GPR, PtrVT>;
}
-// Load and store patterns for i16, needed because Zfh makes s16 load/store
-// legal and regbank select may not constrain registers to FP.
-def : LdPat<load, LH, i16>;
-def : StPat<store, SH, GPR, i16>;
-
-def : LdPat<extloadi8, LBU, i16>; // Prefer unsigned due to no c.lb in Zcb.
-def : StPat<truncstorei8, SB, GPR, i16>;
-
-let Predicates = [HasAtomicLdSt] in {
- // Prefer unsigned due to no c.lb in Zcb.
- def : LdPat<relaxed_load<atomic_load_aext_8>, LBU, i16>;
- def : LdPat<relaxed_load<atomic_load_nonext_16>, LH, i16>;
-
- def : StPat<relaxed_store<atomic_store_8>, SB, GPR, i16>;
- def : StPat<relaxed_store<atomic_store_16>, SH, GPR, i16>;
-}
-
-let Predicates = [HasAtomicLdSt, IsRV64] in {
- // Load pattern is in RISCVInstrInfoA.td and shared with RV32.
- def : StPat<relaxed_store<atomic_store_32>, SW, GPR, i32>;
-}
-
//===----------------------------------------------------------------------===//
// RV64 i32 patterns not used by SelectionDAG
//===----------------------------------------------------------------------===//
let Predicates = [IsRV64] in {
-def : LdPat<extloadi8, LBU, i32>; // Prefer unsigned due to no c.lb in Zcb.
-def : LdPat<extloadi16, LH, i32>;
-
-def : StPat<truncstorei8, SB, GPR, i32>;
-def : StPat<truncstorei16, SH, GPR, i32>;
-
-def : Pat<(sext (i32 GPR:$src)), (ADDIW GPR:$src, 0)>;
-
def : Pat<(sext_inreg (i64 (add GPR:$rs1, simm12_lo:$imm)), i32),
(ADDIW GPR:$rs1, simm12_lo:$imm)>;
}
-
-let Predicates = [IsRV64, NoStdExtZba] in
-def : Pat<(zext (i32 GPR:$src)), (SRLI (i64 (SLLI GPR:$src, 32)), 32)>;
-
-let Predicates = [IsRV32, NoStdExtZbb, NoStdExtZbkb] in
-def : Pat<(XLenVT (zext (i16 GPR:$src))),
- (SRLI (XLenVT (SLLI GPR:$src, 16)), 16)>;
-
-let Predicates = [IsRV64, NoStdExtZbb, NoStdExtZbkb] in {
-def : Pat<(i64 (zext (i16 GPR:$src))),
- (SRLI (XLenVT (SLLI GPR:$src, 48)), 48)>;
-def : Pat<(i32 (zext (i16 GPR:$src))),
- (SRLI (XLenVT (SLLI GPR:$src, 48)), 48)>;
-}
-
-let Predicates = [IsRV32, NoStdExtZbb] in
-def : Pat<(XLenVT (sext (i16 GPR:$src))),
- (SRAI (XLenVT (SLLI GPR:$src, 16)), 16)>;
-
-let Predicates = [IsRV64, NoStdExtZbb] in {
-def : Pat<(i64 (sext (i16 GPR:$src))),
- (SRAI (XLenVT (SLLI GPR:$src, 48)), 48)>;
-def : Pat<(i32 (sext (i16 GPR:$src))),
- (SRAI (XLenVT (SLLI GPR:$src, 48)), 48)>;
-}
-
-//===----------------------------------------------------------------------===//
-// Zb* RV64 patterns not used by SelectionDAG.
-//===----------------------------------------------------------------------===//
-
-let Predicates = [HasStdExtZba, IsRV64] in {
-def : Pat<(zext (i32 GPR:$src)), (ADD_UW GPR:$src, (XLenVT X0))>;
-}
-
-let Predicates = [HasStdExtZbb] in
-def : Pat<(i32 (sext (i16 GPR:$rs))), (SEXT_H GPR:$rs)>;
-let Predicates = [HasStdExtZbb, IsRV64] in
-def : Pat<(i64 (sext (i16 GPR:$rs))), (SEXT_H GPR:$rs)>;
-
-let Predicates = [HasStdExtZbb, IsRV32] in
-def : Pat<(i32 (zext (i16 GPR:$rs))), (ZEXT_H_RV32 GPR:$rs)>;
-let Predicates = [HasStdExtZbb, IsRV64] in {
-def : Pat<(i64 (zext (i16 GPR:$rs))), (ZEXT_H_RV64 GPR:$rs)>;
-def : Pat<(i32 (zext (i16 GPR:$rs))), (ZEXT_H_RV64 GPR:$rs)>;
-}
-
-let Predicates = [HasStdExtZbkb, NoStdExtZbb, IsRV32] in
-def : Pat<(i32 (zext (i16 GPR:$rs))), (PACK GPR:$rs, (XLenVT X0))>;
-let Predicates = [HasStdExtZbkb, NoStdExtZbb, IsRV64] in {
-def : Pat<(i64 (zext (i16 GPR:$rs))), (PACKW GPR:$rs, (XLenVT X0))>;
-def : Pat<(i32 (zext (i16 GPR:$rs))), (PACKW GPR:$rs, (XLenVT X0))>;
-}
-
-//===----------------------------------------------------------------------===//
-// Zalasr patterns not used by SelectionDAG
-//===----------------------------------------------------------------------===//
-
-let Predicates = [HasStdExtZalasr] in {
- // the sequentially consistent loads use
- // .aq instead of .aqrl to match the psABI/A.7
- def : PatLAQ<acquiring_load<atomic_load_aext_8>, LB_AQ, i16>;
- def : PatLAQ<seq_cst_load<atomic_load_aext_8>, LB_AQ, i16>;
-
- def : PatLAQ<acquiring_load<atomic_load_nonext_16>, LH_AQ, i16>;
- def : PatLAQ<seq_cst_load<atomic_load_nonext_16>, LH_AQ, i16>;
-
- def : PatSRL<releasing_store<atomic_store_8>, SB_RL, i16>;
- def : PatSRL<seq_cst_store<atomic_store_8>, SB_RL, i16>;
-
- def : PatSRL<releasing_store<atomic_store_16>, SH_RL, i16>;
- def : PatSRL<seq_cst_store<atomic_store_16>, SH_RL, i16>;
-}
-
-let Predicates = [HasStdExtZalasr, IsRV64] in {
- // Load pattern is in RISCVInstrInfoZalasr.td and shared with RV32.
- def : PatSRL<releasing_store<atomic_store_32>, SW_RL, i32>;
- def : PatSRL<seq_cst_store<atomic_store_32>, SW_RL, i32>;
-}
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a3a4cf2..7123a2d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -15721,8 +15721,7 @@ static SDValue combineAddOfBooleanXor(SDNode *N, SelectionDAG &DAG) {
return SDValue();
// Emit a negate of the setcc.
- return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
- N0.getOperand(0));
+ return DAG.getNegative(N0.getOperand(0), DL, VT);
}
static SDValue performADDCombine(SDNode *N,
@@ -16974,7 +16973,7 @@ performSIGN_EXTEND_INREGCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
// Fold (sext_inreg (setcc), i1) -> (sub 0, (setcc))
if (Opc == ISD::SETCC && SrcVT == MVT::i1 && DCI.isAfterLegalizeDAG())
- return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
+ return DAG.getNegative(Src, DL, VT);
// Fold (sext_inreg (xor (setcc), -1), i1) -> (add (setcc), -1)
if (Opc == ISD::XOR && SrcVT == MVT::i1 &&
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 9855c47..7a14929 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1980,7 +1980,7 @@ def : LdPat<sextloadi8, LB>;
def : LdPat<extloadi8, LBU>; // Prefer unsigned due to no c.lb in Zcb.
def : LdPat<sextloadi16, LH>;
def : LdPat<extloadi16, LH>;
-def : LdPat<load, LW, i32>;
+def : LdPat<load, LW, i32>, Requires<[IsRV32]>;
def : LdPat<zextloadi8, LBU>;
def : LdPat<zextloadi16, LHU>;
@@ -1994,7 +1994,7 @@ class StPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy,
def : StPat<truncstorei8, SB, GPR, XLenVT>;
def : StPat<truncstorei16, SH, GPR, XLenVT>;
-def : StPat<store, SW, GPR, i32>;
+def : StPat<store, SW, GPR, i32>, Requires<[IsRV32]>;
/// Fences
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
index 25accd9..571d72f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
@@ -24,36 +24,36 @@ class LR_r<bit aq, bit rl, bits<3> funct3, string opcodestr>
}
multiclass LR_r_aq_rl<bits<3> funct3, string opcodestr> {
- def "" : LR_r<0, 0, funct3, opcodestr>;
- def _AQ : LR_r<1, 0, funct3, opcodestr # ".aq">;
- def _RL : LR_r<0, 1, funct3, opcodestr # ".rl">;
- def _AQ_RL : LR_r<1, 1, funct3, opcodestr # ".aqrl">;
+ def "" : LR_r<0, 0, funct3, opcodestr>;
+ def _AQ : LR_r<1, 0, funct3, opcodestr # ".aq">;
+ def _RL : LR_r<0, 1, funct3, opcodestr # ".rl">;
+ def _AQRL : LR_r<1, 1, funct3, opcodestr # ".aqrl">;
}
let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in
class SC_r<bit aq, bit rl, bits<3> funct3, string opcodestr>
: RVInstRAtomic<0b00011, aq, rl, funct3, OPC_AMO,
- (outs GPR:$rd), (ins GPRMemZeroOffset:$rs1, GPR:$rs2),
+ (outs GPR:$rd), (ins GPR:$rs2, GPRMemZeroOffset:$rs1),
opcodestr, "$rd, $rs2, $rs1">;
multiclass SC_r_aq_rl<bits<3> funct3, string opcodestr> {
- def "" : SC_r<0, 0, funct3, opcodestr>;
- def _AQ : SC_r<1, 0, funct3, opcodestr # ".aq">;
- def _RL : SC_r<0, 1, funct3, opcodestr # ".rl">;
- def _AQ_RL : SC_r<1, 1, funct3, opcodestr # ".aqrl">;
+ def "" : SC_r<0, 0, funct3, opcodestr>;
+ def _AQ : SC_r<1, 0, funct3, opcodestr # ".aq">;
+ def _RL : SC_r<0, 1, funct3, opcodestr # ".rl">;
+ def _AQRL : SC_r<1, 1, funct3, opcodestr # ".aqrl">;
}
let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in
class AMO_rr<bits<5> funct5, bit aq, bit rl, bits<3> funct3, string opcodestr>
: RVInstRAtomic<funct5, aq, rl, funct3, OPC_AMO,
- (outs GPR:$rd), (ins GPRMemZeroOffset:$rs1, GPR:$rs2),
+ (outs GPR:$rd), (ins GPR:$rs2, GPRMemZeroOffset:$rs1),
opcodestr, "$rd, $rs2, $rs1">;
multiclass AMO_rr_aq_rl<bits<5> funct5, bits<3> funct3, string opcodestr> {
- def "" : AMO_rr<funct5, 0, 0, funct3, opcodestr>;
- def _AQ : AMO_rr<funct5, 1, 0, funct3, opcodestr # ".aq">;
- def _RL : AMO_rr<funct5, 0, 1, funct3, opcodestr # ".rl">;
- def _AQ_RL : AMO_rr<funct5, 1, 1, funct3, opcodestr # ".aqrl">;
+ def "" : AMO_rr<funct5, 0, 0, funct3, opcodestr>;
+ def _AQ : AMO_rr<funct5, 1, 0, funct3, opcodestr # ".aq">;
+ def _RL : AMO_rr<funct5, 0, 1, funct3, opcodestr # ".rl">;
+ def _AQRL : AMO_rr<funct5, 1, 1, funct3, opcodestr # ".aqrl">;
}
//===----------------------------------------------------------------------===//
@@ -174,8 +174,9 @@ let Predicates = [HasAtomicLdSt] in {
def : StPat<relaxed_store<atomic_store_8>, SB, GPR, XLenVT>;
def : StPat<relaxed_store<atomic_store_16>, SH, GPR, XLenVT>;
def : StPat<relaxed_store<atomic_store_32>, SW, GPR, XLenVT>;
+}
- // Used by GISel for RV32 and RV64.
+let Predicates = [HasAtomicLdSt, IsRV32] in {
def : LdPat<relaxed_load<atomic_load_nonext_32>, LW, i32>;
}
@@ -188,31 +189,34 @@ let Predicates = [HasAtomicLdSt, IsRV64] in {
/// AMOs
+class PatAMO<SDPatternOperator OpNode, RVInst Inst, ValueType vt = XLenVT>
+ : Pat<(vt (OpNode (XLenVT GPR:$rs1), (vt GPR:$rs2))), (Inst GPR:$rs2, GPR:$rs1)>;
+
multiclass AMOPat<string AtomicOp, string BaseInst, ValueType vt = XLenVT,
list<Predicate> ExtraPreds = []> {
let Predicates = !listconcat([HasStdExtA, NoStdExtZtso], ExtraPreds) in {
- def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"),
- !cast<RVInst>(BaseInst), vt>;
- def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"),
- !cast<RVInst>(BaseInst#"_AQ"), vt>;
- def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"),
- !cast<RVInst>(BaseInst#"_RL"), vt>;
- def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"),
- !cast<RVInst>(BaseInst#"_AQ_RL"), vt>;
- def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"),
- !cast<RVInst>(BaseInst#"_AQ_RL"), vt>;
+ def : PatAMO<!cast<PatFrag>(AtomicOp#"_monotonic"),
+ !cast<RVInst>(BaseInst), vt>;
+ def : PatAMO<!cast<PatFrag>(AtomicOp#"_acquire"),
+ !cast<RVInst>(BaseInst#"_AQ"), vt>;
+ def : PatAMO<!cast<PatFrag>(AtomicOp#"_release"),
+ !cast<RVInst>(BaseInst#"_RL"), vt>;
+ def : PatAMO<!cast<PatFrag>(AtomicOp#"_acq_rel"),
+ !cast<RVInst>(BaseInst#"_AQRL"), vt>;
+ def : PatAMO<!cast<PatFrag>(AtomicOp#"_seq_cst"),
+ !cast<RVInst>(BaseInst#"_AQRL"), vt>;
}
let Predicates = !listconcat([HasStdExtA, HasStdExtZtso], ExtraPreds) in {
- def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"),
- !cast<RVInst>(BaseInst), vt>;
- def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"),
- !cast<RVInst>(BaseInst), vt>;
- def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"),
- !cast<RVInst>(BaseInst), vt>;
- def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"),
- !cast<RVInst>(BaseInst), vt>;
- def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"),
- !cast<RVInst>(BaseInst), vt>;
+ def : PatAMO<!cast<PatFrag>(AtomicOp#"_monotonic"),
+ !cast<RVInst>(BaseInst), vt>;
+ def : PatAMO<!cast<PatFrag>(AtomicOp#"_acquire"),
+ !cast<RVInst>(BaseInst), vt>;
+ def : PatAMO<!cast<PatFrag>(AtomicOp#"_release"),
+ !cast<RVInst>(BaseInst), vt>;
+ def : PatAMO<!cast<PatFrag>(AtomicOp#"_acq_rel"),
+ !cast<RVInst>(BaseInst), vt>;
+ def : PatAMO<!cast<PatFrag>(AtomicOp#"_seq_cst"),
+ !cast<RVInst>(BaseInst), vt>;
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZa.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZa.td
index 87b9f45..20e2142 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZa.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZa.td
@@ -44,15 +44,15 @@ let hasSideEffects = 0, mayLoad = 1, mayStore = 1, Constraints = "$rd = $rd_wb"
class AMO_cas<bits<5> funct5, bit aq, bit rl, bits<3> funct3, string opcodestr,
DAGOperand RC>
: RVInstRAtomic<funct5, aq, rl, funct3, OPC_AMO,
- (outs RC:$rd_wb), (ins RC:$rd, GPRMemZeroOffset:$rs1, RC:$rs2),
+ (outs RC:$rd_wb), (ins RC:$rd, RC:$rs2, GPRMemZeroOffset:$rs1),
opcodestr, "$rd, $rs2, $rs1">;
multiclass AMO_cas_aq_rl<bits<5> funct5, bits<3> funct3, string opcodestr,
DAGOperand RC> {
- def "" : AMO_cas<funct5, 0, 0, funct3, opcodestr, RC>;
- def _AQ : AMO_cas<funct5, 1, 0, funct3, opcodestr # ".aq", RC>;
- def _RL : AMO_cas<funct5, 0, 1, funct3, opcodestr # ".rl", RC>;
- def _AQ_RL : AMO_cas<funct5, 1, 1, funct3, opcodestr # ".aqrl", RC>;
+ def "" : AMO_cas<funct5, 0, 0, funct3, opcodestr, RC>;
+ def _AQ : AMO_cas<funct5, 1, 0, funct3, opcodestr # ".aq", RC>;
+ def _RL : AMO_cas<funct5, 0, 1, funct3, opcodestr # ".rl", RC>;
+ def _AQRL : AMO_cas<funct5, 1, 1, funct3, opcodestr # ".aqrl", RC>;
}
let Predicates = [HasStdExtZacas], IsSignExtendingOpW = 1 in {
@@ -71,48 +71,48 @@ defm AMOCAS_Q : AMO_cas_aq_rl<0b00101, 0b100, "amocas.q", GPRPairRV64>;
multiclass AMOCASPat<string AtomicOp, string BaseInst, ValueType vt = XLenVT,
list<Predicate> ExtraPreds = []> {
let Predicates = !listconcat([HasStdExtZacas, NoStdExtZtso], ExtraPreds) in {
- def : Pat<(!cast<PatFrag>(AtomicOp#"_monotonic") (vt GPR:$addr),
+ def : Pat<(!cast<PatFrag>(AtomicOp#"_monotonic") (XLenVT GPR:$addr),
(vt GPR:$cmp),
(vt GPR:$new)),
- (!cast<RVInst>(BaseInst) GPR:$cmp, GPR:$addr, GPR:$new)>;
- def : Pat<(!cast<PatFrag>(AtomicOp#"_acquire") (vt GPR:$addr),
+ (!cast<RVInst>(BaseInst) GPR:$cmp, GPR:$new, GPR:$addr)>;
+ def : Pat<(!cast<PatFrag>(AtomicOp#"_acquire") (XLenVT GPR:$addr),
(vt GPR:$cmp),
(vt GPR:$new)),
- (!cast<RVInst>(BaseInst#"_AQ") GPR:$cmp, GPR:$addr, GPR:$new)>;
- def : Pat<(!cast<PatFrag>(AtomicOp#"_release") (vt GPR:$addr),
+ (!cast<RVInst>(BaseInst#"_AQ") GPR:$cmp, GPR:$new, GPR:$addr)>;
+ def : Pat<(!cast<PatFrag>(AtomicOp#"_release") (XLenVT GPR:$addr),
(vt GPR:$cmp),
(vt GPR:$new)),
- (!cast<RVInst>(BaseInst#"_RL") GPR:$cmp, GPR:$addr, GPR:$new)>;
- def : Pat<(!cast<PatFrag>(AtomicOp#"_acq_rel") (vt GPR:$addr),
+ (!cast<RVInst>(BaseInst#"_RL") GPR:$cmp, GPR:$new, GPR:$addr)>;
+ def : Pat<(!cast<PatFrag>(AtomicOp#"_acq_rel") (XLenVT GPR:$addr),
(vt GPR:$cmp),
(vt GPR:$new)),
- (!cast<RVInst>(BaseInst#"_AQ_RL") GPR:$cmp, GPR:$addr, GPR:$new)>;
+ (!cast<RVInst>(BaseInst#"_AQRL") GPR:$cmp, GPR:$new, GPR:$addr)>;
def : Pat<(!cast<PatFrag>(AtomicOp#"_seq_cst") (vt GPR:$addr),
(vt GPR:$cmp),
(vt GPR:$new)),
- (!cast<RVInst>(BaseInst#"_AQ_RL") GPR:$cmp, GPR:$addr, GPR:$new)>;
+ (!cast<RVInst>(BaseInst#"_AQRL") GPR:$cmp, GPR:$new, GPR:$addr)>;
} // Predicates = !listconcat([HasStdExtZacas, NoStdExtZtso], ExtraPreds)
let Predicates = !listconcat([HasStdExtZacas, HasStdExtZtso], ExtraPreds) in {
- def : Pat<(!cast<PatFrag>(AtomicOp#"_monotonic") (vt GPR:$addr),
+ def : Pat<(!cast<PatFrag>(AtomicOp#"_monotonic") (XLenVT GPR:$addr),
(vt GPR:$cmp),
(vt GPR:$new)),
- (!cast<RVInst>(BaseInst) GPR:$cmp, GPR:$addr, GPR:$new)>;
- def : Pat<(!cast<PatFrag>(AtomicOp#"_acquire") (vt GPR:$addr),
+ (!cast<RVInst>(BaseInst) GPR:$cmp, GPR:$new, GPR:$addr)>;
+ def : Pat<(!cast<PatFrag>(AtomicOp#"_acquire") (XLenVT GPR:$addr),
(vt GPR:$cmp),
(vt GPR:$new)),
- (!cast<RVInst>(BaseInst) GPR:$cmp, GPR:$addr, GPR:$new)>;
- def : Pat<(!cast<PatFrag>(AtomicOp#"_release") (vt GPR:$addr),
+ (!cast<RVInst>(BaseInst) GPR:$cmp, GPR:$new, GPR:$addr)>;
+ def : Pat<(!cast<PatFrag>(AtomicOp#"_release") (XLenVT GPR:$addr),
(vt GPR:$cmp),
(vt GPR:$new)),
- (!cast<RVInst>(BaseInst) GPR:$cmp, GPR:$addr, GPR:$new)>;
- def : Pat<(!cast<PatFrag>(AtomicOp#"_acq_rel") (vt GPR:$addr),
+ (!cast<RVInst>(BaseInst) GPR:$cmp, GPR:$new, GPR:$addr)>;
+ def : Pat<(!cast<PatFrag>(AtomicOp#"_acq_rel") (XLenVT GPR:$addr),
(vt GPR:$cmp),
(vt GPR:$new)),
- (!cast<RVInst>(BaseInst) GPR:$cmp, GPR:$addr, GPR:$new)>;
- def : Pat<(!cast<PatFrag>(AtomicOp#"_seq_cst") (vt GPR:$addr),
+ (!cast<RVInst>(BaseInst) GPR:$cmp, GPR:$new, GPR:$addr)>;
+ def : Pat<(!cast<PatFrag>(AtomicOp#"_seq_cst") (XLenVT GPR:$addr),
(vt GPR:$cmp),
(vt GPR:$new)),
- (!cast<RVInst>(BaseInst) GPR:$cmp, GPR:$addr, GPR:$new)>;
+ (!cast<RVInst>(BaseInst) GPR:$cmp, GPR:$new, GPR:$addr)>;
} // Predicates = !listconcat([HasStdExtZacas, HasStdExtZtso], ExtraPreds)
}
@@ -140,7 +140,7 @@ def WRS_STO : WRSInst<0b000000011101, "wrs.sto">, Sched<[]>;
// Zabha (Byte and Halfword Atomic Memory Operations)
//===----------------------------------------------------------------------===//
-let Predicates = [HasStdExtZabha] in {
+let Predicates = [HasStdExtZabha], IsSignExtendingOpW = 1 in {
defm AMOSWAP_B : AMO_rr_aq_rl<0b00001, 0b000, "amoswap.b">,
Sched<[WriteAtomicB, ReadAtomicBA, ReadAtomicBD]>;
defm AMOADD_B : AMO_rr_aq_rl<0b00000, 0b000, "amoadd.b">,
@@ -181,7 +181,7 @@ defm AMOMAXU_H : AMO_rr_aq_rl<0b11100, 0b001, "amomaxu.h">,
}
// If Zacas extension is also implemented, Zabha further provides AMOCAS.[B|H].
-let Predicates = [HasStdExtZabha, HasStdExtZacas] in {
+let Predicates = [HasStdExtZabha, HasStdExtZacas], IsSignExtendingOpW = 1 in {
defm AMOCAS_B : AMO_cas_aq_rl<0b00101, 0b000, "amocas.b", GPR>;
defm AMOCAS_H : AMO_cas_aq_rl<0b00101, 0b001, "amocas.h", GPR>;
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td
index 1deecd2..5f944034 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td
@@ -30,21 +30,22 @@ class SRL_r<bit aq, bit rl, bits<3> funct3, string opcodestr>
opcodestr, "$rs2, $rs1"> {
let rd = 0;
}
+
multiclass LAQ_r_aq_rl<bits<3> funct3, string opcodestr> {
- def _AQ : LAQ_r<1, 0, funct3, opcodestr # ".aq">;
- def _AQ_RL : LAQ_r<1, 1, funct3, opcodestr # ".aqrl">;
+ def _AQ : LAQ_r<1, 0, funct3, opcodestr # ".aq">;
+ def _AQRL : LAQ_r<1, 1, funct3, opcodestr # ".aqrl">;
}
multiclass SRL_r_aq_rl<bits<3> funct3, string opcodestr> {
- def _RL : SRL_r<0, 1, funct3, opcodestr # ".rl">;
- def _AQ_RL : SRL_r<1, 1, funct3, opcodestr # ".aqrl">;
+ def _RL : SRL_r<0, 1, funct3, opcodestr # ".rl">;
+ def _AQRL : SRL_r<1, 1, funct3, opcodestr # ".aqrl">;
}
//===----------------------------------------------------------------------===//
// Instructions
//===----------------------------------------------------------------------===//
-let Predicates = [HasStdExtZalasr] in {
+let Predicates = [HasStdExtZalasr], IsSignExtendingOpW = 1 in {
defm LB : LAQ_r_aq_rl<0b000, "lb">;
defm LH : LAQ_r_aq_rl<0b001, "lh">;
defm LW : LAQ_r_aq_rl<0b010, "lw">;
@@ -93,11 +94,12 @@ let Predicates = [HasStdExtZalasr] in {
def : PatSRL<releasing_store<atomic_store_32>, SW_RL>;
def : PatSRL<seq_cst_store<atomic_store_32>, SW_RL>;
+}
- // Used by GISel for RV32 and RV64.
+let Predicates = [HasStdExtZalasr, IsRV32] in {
def : PatLAQ<acquiring_load<atomic_load_nonext_32>, LW_AQ, i32>;
def : PatLAQ<seq_cst_load<atomic_load_nonext_32>, LW_AQ, i32>;
-} // Predicates = [HasStdExtZalasr]
+} // Predicates = [HasStdExtZalasr, IsRV32]
let Predicates = [HasStdExtZalasr, IsRV64] in {
def : PatLAQ<acquiring_load<atomic_load_asext_32>, LW_AQ, i64>;