aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/Sparc
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/Sparc')
-rw-r--r--llvm/lib/Target/Sparc/SparcAsmPrinter.cpp9
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.cpp26
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.h5
-rw-r--r--llvm/lib/Target/Sparc/SparcInstrInfo.cpp17
-rw-r--r--llvm/lib/Target/Sparc/SparcInstrInfo.td29
5 files changed, 82 insertions, 4 deletions
diff --git a/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp b/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp
index 8e7e2e5..f1d487c87 100644
--- a/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp
+++ b/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp
@@ -326,6 +326,15 @@ void SparcAsmPrinter::lowerToMCInst(const MachineInstr *MI, MCInst &OutMI) {
void SparcAsmPrinter::emitInstruction(const MachineInstr *MI) {
Sparc_MC::verifyInstructionPredicates(MI->getOpcode(),
getSubtargetInfo().getFeatureBits());
+ if (MI->isBundle()) {
+ const MachineBasicBlock *MBB = MI->getParent();
+ MachineBasicBlock::const_instr_iterator I = ++MI->getIterator();
+ while (I != MBB->instr_end() && I->isInsideBundle()) {
+ emitInstruction(&*I);
+ ++I;
+ }
+ return;
+ }
switch (MI->getOpcode()) {
default: break;
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index a160709..cbb7db6 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -33,6 +33,7 @@
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
@@ -3557,3 +3558,28 @@ void SparcTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
if (!Node->hasAnyUseOfValue(0))
MI.getOperand(0).setReg(SP::G0);
}
+
+Instruction *SparcTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
+ Instruction *Inst,
+ AtomicOrdering Ord) const {
+ bool HasStoreSemantics =
+ isa<AtomicCmpXchgInst, AtomicRMWInst, StoreInst>(Inst);
+ if (HasStoreSemantics && isReleaseOrStronger(Ord))
+ return Builder.CreateFence(AtomicOrdering::Release);
+ return nullptr;
+}
+
+Instruction *SparcTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
+ Instruction *Inst,
+ AtomicOrdering Ord) const {
+ // V8 loads already come with implicit acquire barrier so there's no need to
+ // emit it again.
+ bool HasLoadSemantics = isa<AtomicCmpXchgInst, AtomicRMWInst, LoadInst>(Inst);
+ if (Subtarget->isV9() && HasLoadSemantics && isAcquireOrStronger(Ord))
+ return Builder.CreateFence(AtomicOrdering::Acquire);
+
+ // SC plain stores would need a trailing full barrier.
+ if (isa<StoreInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
+ return Builder.CreateFence(Ord);
+ return nullptr;
+}
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.h b/llvm/lib/Target/Sparc/SparcISelLowering.h
index e7040f7..f3efd94 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.h
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.h
@@ -183,6 +183,11 @@ namespace llvm {
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
EVT VT) const override;
+ Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
+ AtomicOrdering Ord) const override;
+ Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
+ AtomicOrdering Ord) const override;
+
bool shouldInsertFencesForAtomic(const Instruction *I) const override {
// FIXME: We insert fences for each atomics and generate
// sub-optimal code for PSO/TSO. (Approximately nobody uses any
diff --git a/llvm/lib/Target/Sparc/SparcInstrInfo.cpp b/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
index e28f445..f66eb9d 100644
--- a/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
+++ b/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -653,6 +653,23 @@ bool SparcInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
.addImm(Offset);
return true;
}
+ case SP::V8BAR: {
+ assert(!Subtarget.isV9() &&
+ "V8BAR should not be emitted on V9 processors!");
+
+ // Emit stbar; ldstub [%sp-1], %g0
+ // The sequence acts as a full barrier on V8 systems.
+ MachineBasicBlock &MBB = *MI.getParent();
+ MachineInstr &InstSTBAR =
+ *BuildMI(MBB, MI, MI.getDebugLoc(), get(SP::STBAR));
+ MachineInstr &InstLDSTUB =
+ *BuildMI(MBB, MI, MI.getDebugLoc(), get(SP::LDSTUBri), SP::G0)
+ .addReg(SP::O6)
+ .addImm(-1);
+ MIBundleBuilder(MBB, InstSTBAR, InstLDSTUB);
+ MBB.erase(MI);
+ return true;
+ }
}
return false;
}
diff --git a/llvm/lib/Target/Sparc/SparcInstrInfo.td b/llvm/lib/Target/Sparc/SparcInstrInfo.td
index 97e7fd7..bc192c2 100644
--- a/llvm/lib/Target/Sparc/SparcInstrInfo.td
+++ b/llvm/lib/Target/Sparc/SparcInstrInfo.td
@@ -578,6 +578,9 @@ class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
let isPseudo = 1;
}
+// Full memory barrier for V8.
+def V8BAR : Pseudo<(outs), (ins), "!V8BAR", []>, Requires<[HasNoV9]>;
+
// GETPCX for PIC
let Defs = [O7] in {
def GETPCX : Pseudo<(outs getPCX:$getpcseq), (ins), "$getpcseq", [] >;
@@ -1974,12 +1977,30 @@ def : Pat<(i32 (zextloadi1 ADDRri:$src)), (LDUBri ADDRri:$src)>;
def : Pat<(store (i32 0), ADDRrr:$dst), (STrr ADDRrr:$dst, (i32 G0))>;
def : Pat<(store (i32 0), ADDRri:$dst), (STri ADDRri:$dst, (i32 G0))>;
-// store bar for all atomic_fence in V8.
-let Predicates = [HasNoV9] in
- def : Pat<(atomic_fence timm, timm), (STBAR)>;
+// All load-type operations in V8 comes with implicit acquire semantics.
+let Predicates = [HasNoV9] in {
+ // Acquire -> nop
+ def : Pat<(atomic_fence (i32 4), timm), (NOP)>;
+ // Release / AcqRel -> stbar
+ def : Pat<(atomic_fence (i32 5), timm), (STBAR)>;
+ // AcqRel and stronger -> stbar; ldstub [%sp-1], %g0
+ def : Pat<(atomic_fence timm, timm), (V8BAR)>;
+}
-let Predicates = [HasV9] in
+// We have to handle both 32 and 64-bit cases.
+let Predicates = [HasV9] in {
+ // Acquire -> membar #LoadLoad | #LoadStore
+ def : Pat<(atomic_fence (i32 4), timm), (MEMBARi 0x5)>;
+ def : Pat<(atomic_fence (i64 4), timm), (MEMBARi 0x5)>;
+ // Release -> membar #LoadStore | #StoreStore
+ def : Pat<(atomic_fence (i32 5), timm), (MEMBARi 0xc)>;
+ def : Pat<(atomic_fence (i64 5), timm), (MEMBARi 0xc)>;
+ // AcqRel -> membar #LoadLoad | #LoadStore | #StoreStore
+ def : Pat<(atomic_fence (i32 6), timm), (MEMBARi 0xd)>;
+ def : Pat<(atomic_fence (i64 6), timm), (MEMBARi 0xd)>;
+ // SeqCst -> membar #StoreLoad | #LoadLoad | #LoadStore | #StoreStore
def : Pat<(atomic_fence timm, timm), (MEMBARi 0xf)>;
+}
// atomic_load addr -> load addr
def : Pat<(i32 (atomic_load_azext_8 ADDRrr:$src)), (LDUBrr ADDRrr:$src)>;