diff options
Diffstat (limited to 'llvm/lib/Target/RISCV')
| -rw-r--r-- | llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp | 1 | ||||
| -rw-r--r-- | llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp | 1 | ||||
| -rw-r--r-- | llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp | 23 | ||||
| -rw-r--r-- | llvm/lib/Target/RISCV/RISCVFeatures.td | 19 | ||||
| -rw-r--r-- | llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 1 | ||||
| -rw-r--r-- | llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 14 | ||||
| -rw-r--r-- | llvm/lib/Target/RISCV/RISCVInstrInfoP.td | 5 | ||||
| -rw-r--r-- | llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp | 1 | 
8 files changed, 64 insertions, 1 deletions
| diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp index 41a9c92..96e8afc 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp @@ -823,6 +823,7 @@ static bool relaxableFixupNeedsRelocation(const MCFixupKind Kind) {      break;    case RISCV::fixup_riscv_rvc_jump:    case RISCV::fixup_riscv_rvc_branch: +  case RISCV::fixup_riscv_rvc_imm:    case RISCV::fixup_riscv_jal:      return false;    } diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp index 6d587e6..5934c91 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp @@ -688,6 +688,7 @@ uint64_t RISCVMCCodeEmitter::getImmOpValue(const MCInst &MI, unsigned OpNo,        // the `jal` again in the assembler.      } else if (MIFrm == RISCVII::InstFormatCI) {        FixupKind = RISCV::fixup_riscv_rvc_imm; +      AsmRelaxToLinkerRelaxableWithFeature(RISCV::FeatureVendorXqcili);      } else if (MIFrm == RISCVII::InstFormatI) {        FixupKind = RISCV::fixup_riscv_12_i;      } else if (MIFrm == RISCVII::InstFormatQC_EB) { diff --git a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp index 98b636e..9bd66a4 100644 --- a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp +++ b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp @@ -373,6 +373,26 @@ static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,          .addReg(ScratchReg)          .addImm(-1);      break; +  case AtomicRMWInst::Max: +    BuildMI(LoopMBB, DL, TII->get(RISCV::MAX), ScratchReg) +        .addReg(DestReg) +        .addReg(IncrReg); +    break; +  case AtomicRMWInst::Min: +    BuildMI(LoopMBB, DL, TII->get(RISCV::MIN), ScratchReg) +        .addReg(DestReg) +        .addReg(IncrReg); +    break; +  case AtomicRMWInst::UMax: +    BuildMI(LoopMBB, DL, TII->get(RISCV::MAXU), ScratchReg) +        .addReg(DestReg) +        .addReg(IncrReg); +    break; +  case AtomicRMWInst::UMin: +    BuildMI(LoopMBB, DL, TII->get(RISCV::MINU), ScratchReg) +        .addReg(DestReg) +        .addReg(IncrReg); +    break;    }    BuildMI(LoopMBB, DL, TII->get(getSCForRMW(Ordering, Width, STI)), ScratchReg)        .addReg(ScratchReg) @@ -682,6 +702,9 @@ bool RISCVExpandAtomicPseudo::expandAtomicMinMaxOp(      MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,      AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width,      MachineBasicBlock::iterator &NextMBBI) { +  // Using MIN(U)/MAX(U) is preferrable if permitted +  if (STI->hasPermissiveZalrsc() && STI->hasStdExtZbb() && !IsMasked) +    return expandAtomicBinOp(MBB, MBBI, BinOp, IsMasked, Width, NextMBBI);    MachineInstr &MI = *MBBI;    DebugLoc DL = MI.getDebugLoc(); diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td index 2754d78..b4556f6 100644 --- a/llvm/lib/Target/RISCV/RISCVFeatures.td +++ b/llvm/lib/Target/RISCV/RISCVFeatures.td @@ -1906,6 +1906,25 @@ def FeatureForcedAtomics : SubtargetFeature<  def HasAtomicLdSt      : Predicate<"Subtarget->hasStdExtZalrsc() || Subtarget->hasForcedAtomics()">; +// The RISC-V Unprivileged Architecture - ISA Volume 1 (Version: 20250508) +// [https://docs.riscv.org/reference/isa/_attachments/riscv-unprivileged.pdf] +// in section 13.3. Eventual Success of Store-Conditional Instructions, defines +// _constrained_ LR/SC loops: +//   The dynamic code executed between the LR and SC instructions can only +//   contain instructions from the base ''I'' instruction set, excluding loads, +//   stores, backward jumps, taken backward branches, JALR, FENCE, and SYSTEM +//   instructions. Compressed forms of the aforementioned ''I'' instructions in +//   the Zca and Zcb extensions are also permitted. +// LR/SC loops that do not adhere to the above are _unconstrained_ LR/SC loops, +// and success is implementation specific. For implementations which know that +// non-base instructions (such as the ''B'' extension) will not violate any +// forward progress guarantees, using these instructions to reduce the LR/SC +// sequence length is desirable. +def FeaturePermissiveZalrsc +    : SubtargetFeature< +          "permissive-zalrsc", "HasPermissiveZalrsc", "true", +          "Implementation permits non-base instructions between LR/SC pairs">; +  def FeatureTaggedGlobals : SubtargetFeature<"tagged-globals",      "AllowTaggedGlobals",      "true", "Use an instruction sequence for taking the address of a global " diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index 9a6afa1..b25a054 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -3995,6 +3995,7 @@ bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits,      case RISCV::CTZW:      case RISCV::CPOPW:      case RISCV::SLLI_UW: +    case RISCV::ABSW:      case RISCV::FMV_W_X:      case RISCV::FCVT_H_W:      case RISCV::FCVT_H_W_INX: diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 1c930ac..56881f7 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -433,6 +433,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,    if (Subtarget.hasStdExtP() ||        (Subtarget.hasVendorXCValu() && !Subtarget.is64Bit())) {      setOperationAction(ISD::ABS, XLenVT, Legal); +    if (Subtarget.is64Bit()) +      setOperationAction(ISD::ABS, MVT::i32, Custom);    } else if (Subtarget.hasShortForwardBranchOpt()) {      // We can use PseudoCCSUB to implement ABS.      setOperationAction(ISD::ABS, XLenVT, Legal); @@ -14816,8 +14818,16 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,      assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&             "Unexpected custom legalisation"); +    if (Subtarget.hasStdExtP()) { +      SDValue Src = +          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); +      SDValue Abs = DAG.getNode(RISCVISD::ABSW, DL, MVT::i64, Src); +      Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Abs)); +      return; +    } +      if (Subtarget.hasStdExtZbb()) { -      // Emit a special ABSW node that will be expanded to NEGW+MAX at isel. +      // Emit a special node that will be expanded to NEGW+MAX at isel.        // This allows us to remember that the result is sign extended. Expanding        // to NEGW+MAX here requires a Freeze which breaks ComputeNumSignBits.        SDValue Src = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, @@ -20290,6 +20300,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,      break;    } +  case RISCVISD::ABSW:    case RISCVISD::CLZW:    case RISCVISD::CTZW: {      // Only the lower 32 bits of the first operand are read @@ -21862,6 +21873,7 @@ unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(    case RISCVISD::REMUW:    case RISCVISD::ROLW:    case RISCVISD::RORW: +  case RISCVISD::ABSW:    case RISCVISD::FCVT_W_RV64:    case RISCVISD::FCVT_WU_RV64:    case RISCVISD::STRICT_FCVT_W_RV64: diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td index cc085bb..4cbbba3 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td @@ -1461,5 +1461,10 @@ let Predicates = [HasStdExtP, IsRV32] in {  // Codegen patterns  //===----------------------------------------------------------------------===// +def riscv_absw : RVSDNode<"ABSW", SDTIntUnaryOp>; +  let Predicates = [HasStdExtP] in  def : PatGpr<abs, ABS>; + +let Predicates = [HasStdExtP, IsRV64] in +def : PatGpr<riscv_absw, ABSW>; diff --git a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp index d08115b..ea98cdb 100644 --- a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp +++ b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp @@ -172,6 +172,7 @@ static bool hasAllNBitUsers(const MachineInstr &OrigMI,        case RISCV::CTZW:        case RISCV::CPOPW:        case RISCV::SLLI_UW: +      case RISCV::ABSW:        case RISCV::FMV_W_X:        case RISCV::FCVT_H_W:        case RISCV::FCVT_H_W_INX: | 
