diff options
Diffstat (limited to 'llvm/lib/Target/PowerPC')
8 files changed, 144 insertions, 34 deletions
diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp index 5779d4e..0e8828f 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp @@ -243,8 +243,6 @@ public: createObjectTargetWriter() const override { return createPPCXCOFFObjectWriter(TT.isArch64Bit()); } - - std::optional<MCFixupKind> getFixupKind(StringRef Name) const override; }; } // end anonymous namespace @@ -279,13 +277,6 @@ ELFPPCAsmBackend::getFixupKind(StringRef Name) const { return std::nullopt; } -std::optional<MCFixupKind> -XCOFFPPCAsmBackend::getFixupKind(StringRef Name) const { - return StringSwitch<std::optional<MCFixupKind>>(Name) - .Case("R_REF", PPC::fixup_ppc_nofixup) - .Default(std::nullopt); -} - MCAsmBackend *llvm::createPPCAsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h index 9e8ee9f..df0c666 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h @@ -48,8 +48,7 @@ enum Fixups { /// Not a true fixup, but ties a symbol to a call to __tls_get_addr for the /// TLS general and local dynamic models, or inserts the thread-pointer - /// register number. It can also be used to tie the ref symbol to prevent it - /// from being garbage collected on AIX. + /// register number. fixup_ppc_nofixup, /// A 16-bit fixup corresponding to lo16(_foo) with implied 3 zero bits for diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp index 8baf866..1af2f9c 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp @@ -220,8 +220,6 @@ bool PPCELFMCAsmInfo::evaluateAsRelocatableImpl(const MCSpecifierExpr &Expr, return evaluateAsRelocatable(Expr, Res, Asm); } -void PPCXCOFFMCAsmInfo::anchor() {} - PPCXCOFFMCAsmInfo::PPCXCOFFMCAsmInfo(bool Is64Bit, const Triple &T) { if (T.getArch() == Triple::ppc64le || T.getArch() == Triple::ppcle) report_fatal_error("XCOFF is not supported for little-endian targets"); diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h index 0f945b3..6af1bd7 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h @@ -33,8 +33,6 @@ public: }; class PPCXCOFFMCAsmInfo : public MCAsmInfoXCOFF { - void anchor() override; - public: explicit PPCXCOFFMCAsmInfo(bool is64Bit, const Triple &); void printSpecifierExpr(raw_ostream &OS, diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCXCOFFObjectWriter.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCXCOFFObjectWriter.cpp index f75ab62..a04f404 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCXCOFFObjectWriter.cpp +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCXCOFFObjectWriter.cpp @@ -56,6 +56,8 @@ std::pair<uint8_t, uint8_t> PPCXCOFFObjectWriter::getRelocTypeAndSignSize( switch ((unsigned)Fixup.getKind()) { default: report_fatal_error("Unimplemented fixup kind."); + case XCOFF::RelocationType::R_REF: + return {XCOFF::RelocationType::R_REF, 0}; case PPC::fixup_ppc_half16: { const uint8_t SignAndSizeForHalf16 = EncodedSignednessIndicator | 15; switch (Specifier) { @@ -96,12 +98,6 @@ std::pair<uint8_t, uint8_t> PPCXCOFFObjectWriter::getRelocTypeAndSignSize( return {XCOFF::RelocationType::R_RBR, EncodedSignednessIndicator | 25}; case PPC::fixup_ppc_br24abs: return {XCOFF::RelocationType::R_RBA, EncodedSignednessIndicator | 25}; - case PPC::fixup_ppc_nofixup: { - if (Specifier == PPC::S_None) - return {XCOFF::RelocationType::R_REF, 0}; - else - llvm_unreachable("Unsupported Modifier"); - } break; case FK_Data_4: case FK_Data_8: const uint8_t SignAndSizeForFKData = diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 459525e..f179873 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -7296,9 +7296,17 @@ SDValue PPCTargetLowering::LowerFormalArguments_AIX( if (!ArgVT.isVector() && !ValVT.isVector() && ArgVT.isInteger() && ValVT.isInteger() && ArgVT.getScalarSizeInBits() < ValVT.getScalarSizeInBits()) { - SDValue ArgValueTrunc = DAG.getNode( - ISD::TRUNCATE, dl, ArgVT.getSimpleVT() == MVT::i1 ? MVT::i8 : ArgVT, - ArgValue); + // It is possible to have either real integer values + // or integers that were not originally integers. + // In the latter case, these could have came from structs, + // and these integers would not have an extend on the parameter. + // Since these types of integers do not have an extend specified + // in the first place, the type of extend that we do should not matter. + EVT TruncatedArgVT = ArgVT.isSimple() && ArgVT.getSimpleVT() == MVT::i1 + ? MVT::i8 + : ArgVT; + SDValue ArgValueTrunc = + DAG.getNode(ISD::TRUNCATE, dl, TruncatedArgVT, ArgValue); SDValue ArgValueExt = ArgSignExt ? DAG.getSExtOrTrunc(ArgValueTrunc, dl, ValVT) : DAG.getZExtOrTrunc(ArgValueTrunc, dl, ValVT); diff --git a/llvm/lib/Target/PowerPC/PPCInstrP10.td b/llvm/lib/Target/PowerPC/PPCInstrP10.td index d295f35..1dc485d 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrP10.td +++ b/llvm/lib/Target/PowerPC/PPCInstrP10.td @@ -2159,8 +2159,115 @@ let AddedComplexity = 400, Predicates = [IsISA3_1, HasVSX] in { (COPY_TO_REGCLASS $VRB, VSRC), 2)))>; } -class XXEvalPattern <dag pattern, bits<8> imm> : - Pat<(v4i32 pattern), (XXEVAL $vA, $vB, $vC, imm)> {} +// ============================================================================= +// XXEVAL Instruction Pattern Definitions +// ============================================================================= +// +// XXEVAL instruction performs 256 different logical operations on three vector +// operands using an 8-bit immediate value to select the operation. +// Format: xxeval XT, XA, XB, XC, IMM +// For example: +// Equivalent function A?xor(B,C):and(B,C) is performed by +// xxeval XT, XA, XB, XC, 22 +// +// REGISTER CLASS CONSTRAINTS: +// - XXEVAL natively supports: VSRC register class [v4i32, v4f32, v2f64, v2i64] +// - Other vector types [v16i8, v8i16] require COPY_TO_REGCLASS to/from VRRC +// ============================================================================= + +class XXEvalPattern<dag pattern, bits<8> imm> + : Pat<(v4i32 pattern), (XXEVAL $vA, $vB, $vC, imm)> {} + +class XXEvalPatterns<ValueType Vt, dag InputPattern, bits<8> Imm> + : Pat<(Vt InputPattern), + !if(!or(!eq(Vt, v4i32), !eq(Vt, v2i64)), + // VSRC path: direct XXEVAL for v4i32 and v2i64 + (XXEVAL $vA, $vB, $vC, Imm), + // VRRC path: wrap with COPY_TO_REGCLASS for other types + (COPY_TO_REGCLASS(XXEVAL(COPY_TO_REGCLASS Vt:$vA, VSRC), + (COPY_TO_REGCLASS Vt:$vB, VSRC), + (COPY_TO_REGCLASS Vt:$vC, VSRC), Imm), + VRRC))> {} + +// ============================================================================= +// PatFrags for Bitcast-Aware Vector bitwise Operations +// +// Each PatFrags defines TWO alternatives for pattern matcher to choose: +// - Direct operation (for v4i32) +// - Bitcast operation (for other types: v2i64, v16i8, v8i16) +// ============================================================================= + +// Basic Binary Operations +def VAnd + : PatFrags<(ops node:$a, node:$b), [(and node:$a, node:$b), + (bitconvert(and + (v4i32(bitconvert node:$a)), + (v4i32(bitconvert node:$b))))]>; + +def VXor + : PatFrags<(ops node:$a, node:$b), [(xor node:$a, node:$b), + (bitconvert(xor + (v4i32(bitconvert node:$a)), + (v4i32(bitconvert node:$b))))]>; + +def VOr : PatFrags<(ops node:$a, node:$b), [(or node:$a, node:$b), + (bitconvert(or + (v4i32(bitconvert node:$a)), + (v4i32(bitconvert node:$b))))]>; + +def VNot + : PatFrags<(ops node:$a), [(vnot node:$a), + (bitconvert(vnot(v4i32(bitconvert node:$a))))]>; + +// Derived bitwise operations +// Vector NOR operation (not(or)) +def VNor + : PatFrags<(ops node:$a, node:$b), [(vnot(or node:$a, node:$b)), + (bitconvert(vnot(or + (v4i32(bitconvert node:$a)), + (v4i32(bitconvert node:$b)))))]>; + +// Vector EQV operation (not(xor)) +def VEqv + : PatFrags<(ops node:$a, node:$b), [(vnot(xor node:$a, node:$b)), + (bitconvert(vnot(xor + (v4i32(bitconvert node:$a)), + (v4i32(bitconvert node:$b)))))]>; + +// ============================================================================= +// XXEVAL Ternary Pattern Multiclass: XXEvalTernarySelectAnd +// This class matches the equivalent Ternary Operation: A ? f(B,C) : AND(B,C) +// and emit the corresponding xxeval instruction with the imm value. +// +// The patterns implement xxeval vector select operations where: +// - A is the selector vector +// - f(B,C) is the "true" case op on vectors B and C (XOR, NOR, EQV, or NOT) +// - AND(B,C) is the "false" case op on vectors B and C +// ============================================================================= +multiclass XXEvalTernarySelectAnd<ValueType Vt> { + // Pattern: A ? XOR(B,C) : AND(B,C) XXEVAL immediate value: 22 + def : XXEvalPatterns< + Vt, (vselect Vt:$vA, (VXor Vt:$vB, Vt:$vC), (VAnd Vt:$vB, Vt:$vC)), + 22>; + + // Pattern: A ? NOR(B,C) : AND(B,C) XXEVAL immediate value: 24 + def : XXEvalPatterns< + Vt, (vselect Vt:$vA, (VNor Vt:$vB, Vt:$vC), (VAnd Vt:$vB, Vt:$vC)), + 24>; + + // Pattern: A ? EQV(B,C) : AND(B,C) XXEVAL immediate value: 25 + def : XXEvalPatterns< + Vt, (vselect Vt:$vA, (VEqv Vt:$vB, Vt:$vC), (VAnd Vt:$vB, Vt:$vC)), + 25>; + + // Pattern: A ? NOT(C) : AND(B,C) XXEVAL immediate value: 26 + def : XXEvalPatterns< + Vt, (vselect Vt:$vA, (VNot Vt:$vC), (VAnd Vt:$vB, Vt:$vC)), 26>; + + // Pattern: A ? NOT(B) : AND(B,C) XXEVAL immediate value: 28 + def : XXEvalPatterns< + Vt, (vselect Vt:$vA, (VNot Vt:$vB), (VAnd Vt:$vB, Vt:$vC)), 28>; +} let Predicates = [PrefixInstrs, HasP10Vector] in { let AddedComplexity = 400 in { @@ -2270,6 +2377,11 @@ let Predicates = [PrefixInstrs, HasP10Vector] in { // (xor A, (or B, C)) def : XXEvalPattern<(xor v4i32:$vA, (or v4i32:$vB, v4i32:$vC)), 120>; + // XXEval Patterns for ternary Operations. + foreach Ty = [v4i32, v2i64, v8i16, v16i8] in { + defm : XXEvalTernarySelectAnd<Ty>; + } + // Anonymous patterns to select prefixed VSX loads and stores. // Load / Store f128 def : Pat<(f128 (load PDForm:$src)), diff --git a/llvm/lib/Target/PowerPC/PPCMachineScheduler.cpp b/llvm/lib/Target/PowerPC/PPCMachineScheduler.cpp index 5eb1f01..b7e2263 100644 --- a/llvm/lib/Target/PowerPC/PPCMachineScheduler.cpp +++ b/llvm/lib/Target/PowerPC/PPCMachineScheduler.cpp @@ -100,10 +100,14 @@ bool PPCPreRASchedStrategy::tryCandidate(SchedCandidate &Cand, // This is a best effort to set things up for a post-RA pass. Optimizations // like generating loads of multiple registers should ideally be done within // the scheduler pass by combining the loads during DAG postprocessing. - const ClusterInfo *CandCluster = Cand.AtTop ? TopCluster : BotCluster; - const ClusterInfo *TryCandCluster = TryCand.AtTop ? TopCluster : BotCluster; - if (tryGreater(TryCandCluster && TryCandCluster->contains(TryCand.SU), - CandCluster && CandCluster->contains(Cand.SU), TryCand, Cand, + unsigned CandZoneCluster = Cand.AtTop ? TopClusterID : BotClusterID; + unsigned TryCandZoneCluster = TryCand.AtTop ? TopClusterID : BotClusterID; + bool CandIsClusterSucc = + isTheSameCluster(CandZoneCluster, Cand.SU->ParentClusterIdx); + bool TryCandIsClusterSucc = + isTheSameCluster(TryCandZoneCluster, TryCand.SU->ParentClusterIdx); + + if (tryGreater(TryCandIsClusterSucc, CandIsClusterSucc, TryCand, Cand, Cluster)) return TryCand.Reason != NoCand; @@ -189,10 +193,14 @@ bool PPCPostRASchedStrategy::tryCandidate(SchedCandidate &Cand, return TryCand.Reason != NoCand; // Keep clustered nodes together. - const ClusterInfo *CandCluster = Cand.AtTop ? TopCluster : BotCluster; - const ClusterInfo *TryCandCluster = TryCand.AtTop ? TopCluster : BotCluster; - if (tryGreater(TryCandCluster && TryCandCluster->contains(TryCand.SU), - CandCluster && CandCluster->contains(Cand.SU), TryCand, Cand, + unsigned CandZoneCluster = Cand.AtTop ? TopClusterID : BotClusterID; + unsigned TryCandZoneCluster = TryCand.AtTop ? TopClusterID : BotClusterID; + bool CandIsClusterSucc = + isTheSameCluster(CandZoneCluster, Cand.SU->ParentClusterIdx); + bool TryCandIsClusterSucc = + isTheSameCluster(TryCandZoneCluster, TryCand.SU->ParentClusterIdx); + + if (tryGreater(TryCandIsClusterSucc, CandIsClusterSucc, TryCand, Cand, Cluster)) return TryCand.Reason != NoCand; |