diff options
author | Brox Chen <guochen2@amd.com> | 2024-11-14 18:22:37 -0500 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-11-14 18:22:37 -0500 |
commit | abff8fe2a940212b1c43af2d86a68fc92849f019 (patch) | |
tree | 09604cee163daafc644be46e7e0b5168f0756c86 /llvm/lib | |
parent | 0f0e2fe97b6c771b7a70964bf321ad91788e6a22 (diff) | |
download | llvm-abff8fe2a940212b1c43af2d86a68fc92849f019.zip llvm-abff8fe2a940212b1c43af2d86a68fc92849f019.tar.gz llvm-abff8fe2a940212b1c43af2d86a68fc92849f019.tar.bz2 |
[AMDGPU][True16][MC] VINTERP instructions supporting true16/fake16 (#113634)
Update VInterp instructions with true16 and fake16 formats.
This patch includes instructions:
v_interp_p10_f16_f32
v_interp_p2_f16_f32
v_interp_p10_rtz_f16_f32
v_interp_p2_rtz_f16_f32
dasm test vinterp-fake16.txt is removed and the testline are merged into
vinterp.txt which handles both true16/fake16 cases
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp | 38 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIRegisterInfo.td | 8 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/VINTERPInstructions.td | 137 |
3 files changed, 121 insertions, 62 deletions
diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp index fdef986..7c293c1 100644 --- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp +++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp @@ -363,6 +363,19 @@ static DecodeStatus decodeOperand_VSrcT16(MCInst &Inst, unsigned Imm, (AMDGPU::OperandSemantics)OperandSemantics)); } +static DecodeStatus decodeOperand_VGPR_16(MCInst &Inst, unsigned Imm, + uint64_t /*Addr*/, + const MCDisassembler *Decoder) { + assert(isUInt<10>(Imm) && "10-bit encoding expected"); + assert(Imm & AMDGPU::EncValues::IS_VGPR && "VGPR expected"); + + const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder); + + bool IsHi = Imm & (1 << 9); + unsigned RegIdx = Imm & 0xff; + return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi)); +} + static DecodeStatus decodeOperand_KImmFP(MCInst &Inst, unsigned Imm, uint64_t Addr, const MCDisassembler *Decoder) { @@ -763,14 +776,23 @@ void AMDGPUDisassembler::convertEXPInst(MCInst &MI) const { } void AMDGPUDisassembler::convertVINTERPInst(MCInst &MI) const { - if (MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_gfx11 || - MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_gfx12 || - MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_gfx11 || - MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_gfx12 || - MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_gfx11 || - MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_gfx12 || - MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_gfx11 || - MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_gfx12) { + convertTrue16OpSel(MI); + if (MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_t16_gfx11 || + MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_fake16_gfx11 || + MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_t16_gfx12 || + MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_fake16_gfx12 || + MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_t16_gfx11 || + MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_fake16_gfx11 || + MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_t16_gfx12 || + MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_fake16_gfx12 || + MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_t16_gfx11 || + MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_fake16_gfx11 || + MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_t16_gfx12 || + MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_fake16_gfx12 || + MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_t16_gfx11 || + MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_fake16_gfx11 || + MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_t16_gfx12 || + MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_fake16_gfx12) { // The MCInst has this field that is not directly encoded in the // instruction. insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::op_sel); diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td index 8524301..d47ff9f 100644 --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td @@ -1244,6 +1244,14 @@ def VRegSrc_128: SrcReg9<VReg_128, "OPW128">; def VRegSrc_256: SrcReg9<VReg_256, "OPW256">; def VRegOrLdsSrc_32 : SrcReg9<VRegOrLds_32, "OPW32">; +// True 16 Operands +def VRegSrc_16 : RegisterOperand<VGPR_16> { + let DecoderMethod = "decodeOperand_VGPR_16"; + let EncoderMethod = "getMachineOpValueT16"; +} +def VRegSrc_fake16: SrcReg9<VGPR_32, "OPW16"> { + let EncoderMethod = "getMachineOpValueT16"; +} //===----------------------------------------------------------------------===// // VGPRSrc_* //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/VINTERPInstructions.td b/llvm/lib/Target/AMDGPU/VINTERPInstructions.td index 81768c1..860a3d7 100644 --- a/llvm/lib/Target/AMDGPU/VINTERPInstructions.td +++ b/llvm/lib/Target/AMDGPU/VINTERPInstructions.td @@ -11,29 +11,30 @@ //===----------------------------------------------------------------------===// class VINTERPe <VOPProfile P> : Enc64 { - bits<8> vdst; + bits<11> vdst; bits<4> src0_modifiers; - bits<9> src0; + bits<11> src0; bits<3> src1_modifiers; - bits<9> src1; + bits<11> src1; bits<3> src2_modifiers; - bits<9> src2; + bits<11> src2; bits<1> clamp; bits<3> waitexp; let Inst{31-26} = 0x33; // VOP3P encoding let Inst{25-24} = 0x1; // VINTERP sub-encoding - let Inst{7-0} = vdst; + let Inst{7-0} = vdst{7-0}; let Inst{10-8} = waitexp; - let Inst{11} = !if(P.HasOpSel, src0_modifiers{2}, 0); // op_sel(0) - let Inst{12} = !if(P.HasOpSel, src1_modifiers{2}, 0); // op_sel(1) - let Inst{13} = !if(P.HasOpSel, src2_modifiers{2}, 0); // op_sel(2) - let Inst{14} = !if(P.HasOpSel, src0_modifiers{3}, 0); // op_sel(3) + // Fields for hi/lo 16-bits of register selection + let Inst{11} = !if(P.HasSrc0, src0_modifiers{2}, 0); + let Inst{12} = !if(P.HasSrc1, src1_modifiers{2}, 0); + let Inst{13} = !if(P.HasSrc2, src2_modifiers{2}, 0); + let Inst{14} = !if(P.HasDst, src0_modifiers{3}, 0); let Inst{15} = clamp; - let Inst{40-32} = src0; - let Inst{49-41} = src1; - let Inst{58-50} = src2; + let Inst{40-32} = src0{8-0}; + let Inst{49-41} = src1{8-0}; + let Inst{58-50} = src2{8-0}; let Inst{61} = src0_modifiers{0}; // neg(0) let Inst{62} = src1_modifiers{0}; // neg(1) let Inst{63} = src2_modifiers{0}; // neg(2) @@ -60,9 +61,10 @@ class VINTERP_Pseudo <string OpName, VOPProfile P, list<dag> pattern = []> : let VINTERP = 1; } -class VINTERP_Real <VOP_Pseudo ps, int EncodingFamily> : - VOP3_Real <ps, EncodingFamily> { +class VINTERP_Real <VOP_Pseudo ps, int EncodingFamily, string asmName> : + VOP3_Real <ps, EncodingFamily, asmName> { let VINTERP = 1; + let IsSingle = 1; } def VOP3_VINTERP_F32 : VOPProfile<[f32, f32, f32, f32]> { @@ -83,22 +85,35 @@ def VOP3_VINTERP_F32 : VOPProfile<[f32, f32, f32, f32]> { let Asm64 = " $vdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$clamp$waitexp"; } -class VOP3_VINTERP_F16 <list<ValueType> ArgVT> : VOPProfile<ArgVT> { - let HasOpSel = 1; - let HasModifiers = 1; +class VOP3_VINTERP_F16_t16 <list<ValueType> ArgVT> : VOPProfile_True16<VOPProfile<ArgVT>> { + let Src0Mod = FPT16VRegInputMods</*Fake16*/0>; + let Src1Mod = FPVRegInputMods; + let Src2Mod = !if(!eq(ArgVT[3].Size, 16), FPT16VRegInputMods</*Fake16*/0>, + FPVRegInputMods); + let Ins64 = (ins Src0Mod:$src0_modifiers, VRegSrc_16:$src0, + Src1Mod:$src1_modifiers, VRegSrc_32:$src1, + Src2Mod:$src2_modifiers, + !if(!eq(ArgVT[3].Size, 16), VRegSrc_16, VRegSrc_32):$src2, + Clamp:$clamp, op_sel0:$op_sel, + WaitEXP:$waitexp); - let Src0Mod = FPVRegInputMods; + let Asm64 = "$vdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$clamp$op_sel$waitexp"; +} + +class VOP3_VINTERP_F16_fake16 <list<ValueType> ArgVT> : VOPProfile_Fake16<VOPProfile<ArgVT>> { + let Src0Mod = FPT16VRegInputMods</*Fake16*/1>; let Src1Mod = FPVRegInputMods; - let Src2Mod = FPVRegInputMods; + let Src2Mod = !if(!eq(ArgVT[3].Size, 16), FPT16VRegInputMods</*Fake16*/1>, + FPVRegInputMods); - let Outs64 = (outs VGPR_32:$vdst); - let Ins64 = (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0, + let Ins64 = (ins Src0Mod:$src0_modifiers, VRegSrc_fake16:$src0, Src1Mod:$src1_modifiers, VRegSrc_32:$src1, - Src2Mod:$src2_modifiers, VRegSrc_32:$src2, + Src2Mod:$src2_modifiers, + !if(!eq(ArgVT[3].Size, 16), VRegSrc_fake16, VRegSrc_32):$src2, Clamp:$clamp, op_sel0:$op_sel, WaitEXP:$waitexp); - let Asm64 = " $vdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$clamp$op_sel$waitexp"; + let Asm64 = "$vdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$clamp$op_sel$waitexp"; } //===----------------------------------------------------------------------===// @@ -107,20 +122,26 @@ class VOP3_VINTERP_F16 <list<ValueType> ArgVT> : VOPProfile<ArgVT> { let SubtargetPredicate = HasVINTERPEncoding in { +multiclass VINTERP_t16<string OpName, list<ValueType> ArgVT> { + let True16Predicate = UseRealTrue16Insts in { + def _t16 : VINTERP_Pseudo<OpName#"_t16", VOP3_VINTERP_F16_t16<ArgVT>> ; + } + let True16Predicate = UseFakeTrue16Insts in { + def _fake16 : VINTERP_Pseudo<OpName#"_fake16", VOP3_VINTERP_F16_fake16<ArgVT>> ; + } +} + let Uses = [M0, EXEC, MODE] in { def V_INTERP_P10_F32_inreg : VINTERP_Pseudo <"v_interp_p10_f32", VOP3_VINTERP_F32>; def V_INTERP_P2_F32_inreg : VINTERP_Pseudo <"v_interp_p2_f32", VOP3_VINTERP_F32>; -def V_INTERP_P10_F16_F32_inreg : - VINTERP_Pseudo <"v_interp_p10_f16_f32", VOP3_VINTERP_F16<[f32, f32, f32, f32]>>; -def V_INTERP_P2_F16_F32_inreg : - VINTERP_Pseudo <"v_interp_p2_f16_f32", VOP3_VINTERP_F16<[f16, f32, f32, f32]>>; + +defm V_INTERP_P10_F16_F32_inreg : VINTERP_t16<"v_interp_p10_f16_f32", [f32, f16, f32, f16]>; +defm V_INTERP_P2_F16_F32_inreg : VINTERP_t16<"v_interp_p2_f16_f32", [f16, f16, f32, f32]>; } // Uses = [M0, EXEC, MODE] let Uses = [M0, EXEC] in { -def V_INTERP_P10_RTZ_F16_F32_inreg : - VINTERP_Pseudo <"v_interp_p10_rtz_f16_f32", VOP3_VINTERP_F16<[f32, f32, f32, f32]>>; -def V_INTERP_P2_RTZ_F16_F32_inreg : - VINTERP_Pseudo <"v_interp_p2_rtz_f16_f32", VOP3_VINTERP_F16<[f16, f32, f32, f32]>>; +defm V_INTERP_P10_RTZ_F16_F32_inreg : VINTERP_t16<"v_interp_p10_rtz_f16_f32", [f32, f16, f32, f16]>; +defm V_INTERP_P2_RTZ_F16_F32_inreg : VINTERP_t16 <"v_interp_p2_rtz_f16_f32", [f16, f16, f32, f32]>; } // Uses = [M0, EXEC] } // SubtargetPredicate = HasVINTERPEncoding. @@ -137,11 +158,6 @@ class VInterpF32Pat <SDPatternOperator op, Instruction inst> : GCNPat < 7) /* wait_exp */ >; -def VINTERP_OPSEL { - int LOW = 0; - int HIGH = 0xa; -} - class VInterpF16Pat <SDPatternOperator op, Instruction inst, ValueType dst_type, bit high, list<ComplexPattern> pat> : GCNPat < @@ -167,45 +183,58 @@ multiclass VInterpF16Pat <SDPatternOperator op, Instruction inst, def : VInterpF32Pat<int_amdgcn_interp_inreg_p10, V_INTERP_P10_F32_inreg>; def : VInterpF32Pat<int_amdgcn_interp_inreg_p2, V_INTERP_P2_F32_inreg>; + +let True16Predicate = UseFakeTrue16Insts in { defm : VInterpF16Pat<int_amdgcn_interp_inreg_p10_f16, - V_INTERP_P10_F16_F32_inreg, f32, + V_INTERP_P10_F16_F32_inreg_fake16, f32, [VINTERPModsHi, VINTERPMods, VINTERPModsHi]>; defm : VInterpF16Pat<int_amdgcn_interp_inreg_p2_f16, - V_INTERP_P2_F16_F32_inreg, f16, + V_INTERP_P2_F16_F32_inreg_fake16, f16, [VINTERPModsHi, VINTERPMods, VINTERPMods]>; defm : VInterpF16Pat<int_amdgcn_interp_p10_rtz_f16, - V_INTERP_P10_RTZ_F16_F32_inreg, f32, + V_INTERP_P10_RTZ_F16_F32_inreg_fake16, f32, [VINTERPModsHi, VINTERPMods, VINTERPModsHi]>; defm : VInterpF16Pat<int_amdgcn_interp_p2_rtz_f16, - V_INTERP_P2_RTZ_F16_F32_inreg, f16, + V_INTERP_P2_RTZ_F16_F32_inreg_fake16, f16, [VINTERPModsHi, VINTERPMods, VINTERPMods]>; +} //===----------------------------------------------------------------------===// // VINTERP Real Instructions //===----------------------------------------------------------------------===// -multiclass VINTERP_Real_gfx11 <bits<7> op> { - let AssemblerPredicate = isGFX11Only, DecoderNamespace = "GFX11" in { +multiclass VINTERP_Real_gfx11 <bits<7> op, string asmName> { + defvar ps = !cast<VOP3_Pseudo>(NAME); + let AssemblerPredicate = isGFX11Only, DecoderNamespace = "GFX11" # + !if(ps.Pfl.IsRealTrue16, "", "_FAKE16") in { def _gfx11 : - VINTERP_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.GFX11>, - VINTERPe_gfx11<op, !cast<VOP3_Pseudo>(NAME).Pfl>; + VINTERP_Real<ps, SIEncodingFamily.GFX11, asmName>, + VINTERPe_gfx11<op, ps.Pfl>; } } -multiclass VINTERP_Real_gfx12 <bits<7> op> { - let AssemblerPredicate = isGFX12Only, DecoderNamespace = "GFX12" in { +multiclass VINTERP_Real_gfx12 <bits<7> op, string asmName> { + defvar ps = !cast<VOP3_Pseudo>(NAME); + let AssemblerPredicate = isGFX12Only, DecoderNamespace = "GFX12" # + !if(ps.Pfl.IsRealTrue16, "", "_FAKE16") in { def _gfx12 : - VINTERP_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.GFX12>, - VINTERPe_gfx12<op, !cast<VOP3_Pseudo>(NAME).Pfl>; + VINTERP_Real<ps, SIEncodingFamily.GFX12, asmName>, + VINTERPe_gfx12<op, ps.Pfl>; } } -multiclass VINTERP_Real_gfx11_gfx12 <bits<7> op> : - VINTERP_Real_gfx11<op>, VINTERP_Real_gfx12<op>; +multiclass VINTERP_Real_gfx11_gfx12 <bits<7> op, string asmName = !cast<VOP3_Pseudo>(NAME).Mnemonic, string opName = NAME> : + VINTERP_Real_gfx11<op, asmName>, VINTERP_Real_gfx12<op, asmName>; + +multiclass VINTERP_Real_t16_and_fake16_gfx11_gfx12 <bits<7> op, string asmName = !cast<VOP3_Pseudo>(NAME).Mnemonic, string opName = NAME> { + defm _t16: VINTERP_Real_gfx11_gfx12<op, asmName, opName#"_t16">; + defm _fake16: VINTERP_Real_gfx11_gfx12<op, asmName, opName#"_fake16">; +} + defm V_INTERP_P10_F32_inreg : VINTERP_Real_gfx11_gfx12<0x000>; defm V_INTERP_P2_F32_inreg : VINTERP_Real_gfx11_gfx12<0x001>; -defm V_INTERP_P10_F16_F32_inreg : VINTERP_Real_gfx11_gfx12<0x002>; -defm V_INTERP_P2_F16_F32_inreg : VINTERP_Real_gfx11_gfx12<0x003>; -defm V_INTERP_P10_RTZ_F16_F32_inreg : VINTERP_Real_gfx11_gfx12<0x004>; -defm V_INTERP_P2_RTZ_F16_F32_inreg : VINTERP_Real_gfx11_gfx12<0x005>; +defm V_INTERP_P10_F16_F32_inreg : VINTERP_Real_t16_and_fake16_gfx11_gfx12<0x002, "v_interp_p10_f16_f32">; +defm V_INTERP_P2_F16_F32_inreg : VINTERP_Real_t16_and_fake16_gfx11_gfx12<0x003, "v_interp_p2_f16_f32">; +defm V_INTERP_P10_RTZ_F16_F32_inreg : VINTERP_Real_t16_and_fake16_gfx11_gfx12<0x004, "v_interp_p10_rtz_f16_f32">; +defm V_INTERP_P2_RTZ_F16_F32_inreg : VINTERP_Real_t16_and_fake16_gfx11_gfx12<0x005, "v_interp_p2_rtz_f16_f32">; |