diff options
author | Philip Reames <preames@rivosinc.com> | 2023-12-01 11:00:59 -0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-12-01 11:00:59 -0800 |
commit | e81796671890b59c110f8e41adc7ca26f8484d20 (patch) | |
tree | 2343ed9003a92b3f60f8e3a5fb6715b697c9329a /llvm/lib/Target/RISCV | |
parent | ca2d79f9cad48b7165bf81a7cc24b67f277915f1 (diff) | |
download | llvm-e81796671890b59c110f8e41adc7ca26f8484d20.zip llvm-e81796671890b59c110f8e41adc7ca26f8484d20.tar.gz llvm-e81796671890b59c110f8e41adc7ca26f8484d20.tar.bz2 |
[RISCV] Collapse fast unaligned access into a single feature [nfc-ish] (#73971)
When we'd originally added unaligned-scalar-mem and
unaligned-vector-mem, they were separated into two parts under the
theory that some processor might implement one, but not the other. At
the moment, we don't have evidence of such a processor. The C/C++ level
interface, and the clang driver command lines have settled on a single
unaligned flag which indicates both scalar and vector support unaligned.
Given that, let's remove the test matrix complexity for a set of
configurations which don't appear useful.
Given these are internal feature names, I don't think we need to provide
any forward compatibility. Anyone disagree?
Note: The immediate trigger for this patch was finding another case
where the unaligned-vector-mem wasn't being properly serialized to IR
from clang which resulted in problems reproducing assembly from clang's
-emit-llvm feature. Instead of fixing this, I decided getting rid of the
complexity was the better approach.
Diffstat (limited to 'llvm/lib/Target/RISCV')
-rw-r--r-- | llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Target/RISCV/RISCVFeatures.td | 13 | ||||
-rw-r--r-- | llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 16 | ||||
-rw-r--r-- | llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h | 4 |
4 files changed, 16 insertions, 21 deletions
diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp index ff35a8f..aa1bdbd 100644 --- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp +++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp @@ -309,8 +309,8 @@ bool RISCVExpandPseudo::expandRV32ZdinxStore(MachineBasicBlock &MBB, .addReg(MBBI->getOperand(1).getReg()) .add(MBBI->getOperand(2)); if (MBBI->getOperand(2).isGlobal() || MBBI->getOperand(2).isCPI()) { - // FIXME: Zdinx RV32 can not work on unaligned scalar memory. - assert(!STI->enableUnalignedScalarMem()); + // FIXME: Zdinx RV32 can not work on unaligned memory. + assert(!STI->hasFastUnalignedAccess()); assert(MBBI->getOperand(2).getOffset() % 8 == 0); MBBI->getOperand(2).setOffset(MBBI->getOperand(2).getOffset() + 4); diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td index c5d88ca..7d142d3 100644 --- a/llvm/lib/Target/RISCV/RISCVFeatures.td +++ b/llvm/lib/Target/RISCV/RISCVFeatures.td @@ -946,15 +946,10 @@ def FeatureTrailingSeqCstFence : SubtargetFeature<"seq-cst-trailing-fence", "true", "Enable trailing fence for seq-cst store.">; -def FeatureUnalignedScalarMem - : SubtargetFeature<"unaligned-scalar-mem", "EnableUnalignedScalarMem", - "true", "Has reasonably performant unaligned scalar " - "loads and stores">; - -def FeatureUnalignedVectorMem - : SubtargetFeature<"unaligned-vector-mem", "EnableUnalignedVectorMem", - "true", "Has reasonably performant unaligned vector " - "loads and stores">; +def FeatureFastUnalignedAccess + : SubtargetFeature<"fast-unaligned-access", "HasFastUnalignedAccess", + "true", "Has reasonably performant unaligned " + "loads and stores (both scalar and vector)">; def FeaturePostRAScheduler : SubtargetFeature<"use-postra-scheduler", "UsePostRAScheduler", "true", "Schedule again after register allocation">; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 9e21cf3..241bc96 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1874,7 +1874,7 @@ bool RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, // replace. If we don't support unaligned scalar mem, prefer the constant // pool. // TODO: Can the caller pass down the alignment? - if (!Subtarget.enableUnalignedScalarMem()) + if (!Subtarget.hasFastUnalignedAccess()) return true; // Prefer to keep the load if it would require many instructions. @@ -14689,7 +14689,7 @@ static bool matchIndexAsWiderOp(EVT VT, SDValue Index, SDValue Mask, if (WiderElementSize > ST.getELen()/8) return false; - if (!ST.enableUnalignedVectorMem() && BaseAlign < WiderElementSize) + if (!ST.hasFastUnalignedAccess() && BaseAlign < WiderElementSize) return false; for (unsigned i = 0; i < Index->getNumOperands(); i++) { @@ -19288,8 +19288,8 @@ bool RISCVTargetLowering::allowsMisalignedMemoryAccesses( unsigned *Fast) const { if (!VT.isVector()) { if (Fast) - *Fast = Subtarget.enableUnalignedScalarMem(); - return Subtarget.enableUnalignedScalarMem(); + *Fast = Subtarget.hasFastUnalignedAccess(); + return Subtarget.hasFastUnalignedAccess(); } // All vector implementations must support element alignment @@ -19305,8 +19305,8 @@ bool RISCVTargetLowering::allowsMisalignedMemoryAccesses( // misaligned accesses. TODO: Work through the codegen implications of // allowing such accesses to be formed, and considered fast. if (Fast) - *Fast = Subtarget.enableUnalignedVectorMem(); - return Subtarget.enableUnalignedVectorMem(); + *Fast = Subtarget.hasFastUnalignedAccess(); + return Subtarget.hasFastUnalignedAccess(); } @@ -19341,7 +19341,7 @@ EVT RISCVTargetLowering::getOptimalMemOpType(const MemOp &Op, // Do we have sufficient alignment for our preferred VT? If not, revert // to largest size allowed by our alignment criteria. - if (PreferredVT != MVT::i8 && !Subtarget.enableUnalignedVectorMem()) { + if (PreferredVT != MVT::i8 && !Subtarget.hasFastUnalignedAccess()) { Align RequiredAlign(PreferredVT.getStoreSize()); if (Op.isFixedDstAlign()) RequiredAlign = std::min(RequiredAlign, Op.getDstAlign()); @@ -19533,7 +19533,7 @@ bool RISCVTargetLowering::isLegalStridedLoadStore(EVT DataType, if (!isLegalElementTypeForRVV(ScalarType)) return false; - if (!Subtarget.enableUnalignedVectorMem() && + if (!Subtarget.hasFastUnalignedAccess() && Alignment < ScalarType.getStoreSize()) return false; diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h index 75ae55e..efc8350 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h @@ -199,7 +199,7 @@ public: return false; EVT ElemType = DataTypeVT.getScalarType(); - if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize()) + if (!ST->hasFastUnalignedAccess() && Alignment < ElemType.getStoreSize()) return false; return TLI->isLegalElementTypeForRVV(ElemType); @@ -224,7 +224,7 @@ public: return false; EVT ElemType = DataTypeVT.getScalarType(); - if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize()) + if (!ST->hasFastUnalignedAccess() && Alignment < ElemType.getStoreSize()) return false; return TLI->isLegalElementTypeForRVV(ElemType); |