aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2023-06-14 10:53:56 -0400
committerMatt Arsenault <Matthew.Arsenault@amd.com>2023-07-05 17:23:49 -0400
commit549166624851094d251f1625d133cfea2dce4ceb (patch)
treed48982c4c037823a59d4c457924b1f8a69cc3eaa /llvm/lib
parented556a1ad5461e5cce105f8a42b802cc5e9dbb28 (diff)
downloadllvm-549166624851094d251f1625d133cfea2dce4ceb.zip
llvm-549166624851094d251f1625d133cfea2dce4ceb.tar.gz
llvm-549166624851094d251f1625d133cfea2dce4ceb.tar.bz2
AMDGPU: Correctly lower llvm.exp.f32
The library expansion has too many paths for all the permutations of DAZ, unsafe and the 3 exp functions. It's easier to expand it in the backend when we know all of these things. The library currently misses the no-infinity check on the overflow, which this handles optimizing out. Some of the <3 x half> fast tests regress due to vector widening dropping flags which will be fixed separately. Apparently there is no exp10 intrinsic, but there should be. Adds some deadish code in preparation for adding one while I'm following along with the current library expansion.
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp169
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h3
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp176
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h2
4 files changed, 322 insertions, 28 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index da2403f..8d12588 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -351,7 +351,7 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
setOperationAction({ISD::FLOG2, ISD::FEXP2}, MVT::f16, Custom);
}
- setOperationAction({ISD::FLOG10, ISD::FLOG}, MVT::f16, Custom);
+ setOperationAction({ISD::FLOG10, ISD::FLOG, ISD::FEXP}, MVT::f16, Custom);
// FIXME: These IS_FPCLASS vector fp types are marked custom so it reaches
// scalarization code. Can be removed when IS_FPCLASS expand isn't called by
@@ -1359,6 +1359,10 @@ void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
if (SDValue Lowered = lowerFEXP2(SDValue(N, 0), DAG))
Results.push_back(Lowered);
return;
+ case ISD::FEXP:
+ if (SDValue Lowered = lowerFEXP(SDValue(N, 0), DAG))
+ Results.push_back(Lowered);
+ return;
default:
return;
}
@@ -2460,12 +2464,16 @@ static bool valueIsKnownNeverF32Denorm(SDValue Src) {
llvm_unreachable("covered opcode switch");
}
+static bool allowApproxFunc(const SelectionDAG &DAG, SDNodeFlags Flags) {
+ if (Flags.hasApproximateFuncs())
+ return true;
+ auto &Options = DAG.getTarget().Options;
+ return Options.UnsafeFPMath || Options.ApproxFuncFPMath;
+}
+
static bool needsDenormHandlingF32(const SelectionDAG &DAG, SDValue Src,
SDNodeFlags Flags) {
- return !Flags.hasApproximateFuncs() &&
- !DAG.getTarget().Options.UnsafeFPMath &&
- !DAG.getTarget().Options.ApproxFuncFPMath &&
- !valueIsKnownNeverF32Denorm(Src) &&
+ return !valueIsKnownNeverF32Denorm(Src) &&
DAG.getMachineFunction()
.getDenormalMode(APFloat::IEEEsingle())
.Input != DenormalMode::PreserveSign;
@@ -2508,7 +2516,7 @@ SDValue AMDGPUTargetLowering::getIsFinite(SelectionDAG &DAG, SDValue Src,
std::pair<SDValue, SDValue>
AMDGPUTargetLowering::getScaledLogInput(SelectionDAG &DAG, const SDLoc SL,
SDValue Src, SDNodeFlags Flags) const {
- if (!needsDenormHandlingF32(DAG, Src, Flags))
+ if (allowApproxFunc(DAG, Flags) || !needsDenormHandlingF32(DAG, Src, Flags))
return {};
MVT VT = MVT::f32;
@@ -2706,7 +2714,9 @@ SDValue AMDGPUTargetLowering::lowerFEXP2(SDValue Op, SelectionDAG &DAG) const {
DAG.getTargetConstant(0, SL, MVT::i32), Flags);
}
- if (!needsDenormHandlingF32(DAG, Src, Flags))
+ assert(VT == MVT::f32);
+
+ if (allowApproxFunc(DAG, Flags) || !needsDenormHandlingF32(DAG, Src, Flags))
return DAG.getNode(AMDGPUISD::EXP, SL, MVT::f32, Src, Flags);
// bool needs_scaling = x < -0x1.f80000p+6f;
@@ -2715,9 +2725,10 @@ SDValue AMDGPUTargetLowering::lowerFEXP2(SDValue Op, SelectionDAG &DAG) const {
// -nextafter(128.0, -1)
SDValue RangeCheckConst = DAG.getConstantFP(-0x1.f80000p+6f, SL, VT);
- SDValue NeedsScaling = DAG.getSetCC(
- SL, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), Src,
- RangeCheckConst, ISD::SETOLT);
+ EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
+
+ SDValue NeedsScaling =
+ DAG.getSetCC(SL, SetCCVT, Src, RangeCheckConst, ISD::SETOLT);
SDValue SixtyFour = DAG.getConstantFP(0x1.0p+6f, SL, VT);
SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
@@ -2736,15 +2747,143 @@ SDValue AMDGPUTargetLowering::lowerFEXP2(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::FMUL, SL, VT, Exp2, ResultScale, Flags);
}
-// exp2(M_LOG2E_F * f);
+SDValue AMDGPUTargetLowering::lowerFEXPUnsafe(SDValue Op, const SDLoc &SL,
+ SelectionDAG &DAG,
+ SDNodeFlags Flags) const {
+ // exp2(M_LOG2E_F * f);
+ EVT VT = Op.getValueType();
+ const SDValue K = DAG.getConstantFP(numbers::log2e, SL, VT);
+ SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Op, K, Flags);
+ return DAG.getNode(VT == MVT::f32 ? AMDGPUISD::EXP : ISD::FEXP2, SL, VT, Mul,
+ Flags);
+}
+
SDValue AMDGPUTargetLowering::lowerFEXP(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
SDLoc SL(Op);
- SDValue Src = Op.getOperand(0);
+ SDValue X = Op.getOperand(0);
+ SDNodeFlags Flags = Op->getFlags();
+ const bool IsExp10 = false; // TODO: For some reason exp10 is missing
- const SDValue K = DAG.getConstantFP(numbers::log2e, SL, VT);
- SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Src, K, Op->getFlags());
- return DAG.getNode(ISD::FEXP2, SL, VT, Mul, Op->getFlags());
+ if (VT.getScalarType() == MVT::f16) {
+ // v_exp_f16 (fmul x, log2e)
+ if (allowApproxFunc(DAG, Flags)) // TODO: Does this really require fast?
+ return lowerFEXPUnsafe(X, SL, DAG, Flags);
+
+ if (VT.isVector())
+ return SDValue();
+
+ // exp(f16 x) ->
+ // fptrunc (v_exp_f32 (fmul (fpext x), log2e))
+
+ // Nothing in half is a denormal when promoted to f32.
+ SDValue Ext = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, X, Flags);
+ SDValue Lowered = lowerFEXPUnsafe(Ext, SL, DAG, Flags);
+ return DAG.getNode(ISD::FP_ROUND, SL, VT, Lowered,
+ DAG.getTargetConstant(0, SL, MVT::i32), Flags);
+ }
+
+ assert(VT == MVT::f32);
+
+ // TODO: Interpret allowApproxFunc as ignoring DAZ. This is currently copying
+ // library behavior. Also, is known-not-daz source sufficient?
+ if (allowApproxFunc(DAG, Flags) && !needsDenormHandlingF32(DAG, X, Flags)) {
+ assert(!IsExp10 && "todo exp10 support");
+ return lowerFEXPUnsafe(X, SL, DAG, Flags);
+ }
+
+ // Algorithm:
+ //
+ // e^x = 2^(x/ln(2)) = 2^(x*(64/ln(2))/64)
+ //
+ // x*(64/ln(2)) = n + f, |f| <= 0.5, n is integer
+ // n = 64*m + j, 0 <= j < 64
+ //
+ // e^x = 2^((64*m + j + f)/64)
+ // = (2^m) * (2^(j/64)) * 2^(f/64)
+ // = (2^m) * (2^(j/64)) * e^(f*(ln(2)/64))
+ //
+ // f = x*(64/ln(2)) - n
+ // r = f*(ln(2)/64) = x - n*(ln(2)/64)
+ //
+ // e^x = (2^m) * (2^(j/64)) * e^r
+ //
+ // (2^(j/64)) is precomputed
+ //
+ // e^r = 1 + r + (r^2)/2! + (r^3)/3! + (r^4)/4! + (r^5)/5!
+ // e^r = 1 + q
+ //
+ // q = r + (r^2)/2! + (r^3)/3! + (r^4)/4! + (r^5)/5!
+ //
+ // e^x = (2^m) * ( (2^(j/64)) + q*(2^(j/64)) )
+
+ SDValue PH, PL;
+ if (Subtarget->hasFastFMAF32()) {
+ const float c_exp = numbers::log2ef;
+ const float cc_exp = 0x1.4ae0bep-26f; // c+cc are 49 bits
+ const float c_exp10 = 0x1.a934f0p+1f;
+ const float cc_exp10 = 0x1.2f346ep-24f;
+
+ SDValue C = DAG.getConstantFP(IsExp10 ? c_exp10 : c_exp, SL, VT);
+ SDValue CC = DAG.getConstantFP(IsExp10 ? cc_exp10 : cc_exp, SL, VT);
+
+ PH = DAG.getNode(ISD::FMUL, SL, VT, X, C, Flags);
+ SDValue NegPH = DAG.getNode(ISD::FNEG, SL, VT, PH, Flags);
+ SDValue FMA0 = DAG.getNode(ISD::FMA, SL, VT, X, C, NegPH, Flags);
+ PL = DAG.getNode(ISD::FMA, SL, VT, X, CC, FMA0, Flags);
+ } else {
+ const float ch_exp = 0x1.714000p+0f;
+ const float cl_exp = 0x1.47652ap-12f; // ch + cl are 36 bits
+
+ const float ch_exp10 = 0x1.a92000p+1f;
+ const float cl_exp10 = 0x1.4f0978p-11f;
+
+ SDValue CH = DAG.getConstantFP(IsExp10 ? ch_exp10 : ch_exp, SL, VT);
+ SDValue CL = DAG.getConstantFP(IsExp10 ? cl_exp10 : cl_exp, SL, VT);
+
+ SDValue XAsInt = DAG.getNode(ISD::BITCAST, SL, MVT::i32, X);
+ SDValue MaskConst = DAG.getConstant(0xfffff000, SL, MVT::i32);
+ SDValue XHAsInt = DAG.getNode(ISD::AND, SL, MVT::i32, XAsInt, MaskConst);
+ SDValue XH = DAG.getNode(ISD::BITCAST, SL, VT, XHAsInt);
+ SDValue XL = DAG.getNode(ISD::FSUB, SL, VT, X, XH, Flags);
+
+ PH = DAG.getNode(ISD::FMUL, SL, VT, XH, CH, Flags);
+
+ SDValue XLCL = DAG.getNode(ISD::FMUL, SL, VT, XL, CL, Flags);
+ SDValue Mad0 = getMad(DAG, SL, VT, XL, CH, XLCL, Flags);
+ PL = getMad(DAG, SL, VT, XH, CL, Mad0, Flags);
+ }
+
+ SDValue E = DAG.getNode(ISD::FRINT, SL, VT, PH, Flags);
+ SDValue PHSubE = DAG.getNode(ISD::FSUB, SL, VT, PH, E, Flags);
+ SDValue A = DAG.getNode(ISD::FADD, SL, VT, PHSubE, PL, Flags);
+ SDValue IntE = DAG.getNode(ISD::FP_TO_SINT, SL, MVT::i32, E);
+ SDValue Exp2 = DAG.getNode(AMDGPUISD::EXP, SL, VT, A, Flags);
+
+ SDValue R = DAG.getNode(ISD::FLDEXP, SL, VT, Exp2, IntE, Flags);
+
+ SDValue UnderflowCheckConst =
+ DAG.getConstantFP(IsExp10 ? -0x1.66d3e8p+5f : -0x1.9d1da0p+6f, SL, VT);
+
+ EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
+ SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
+ SDValue Underflow =
+ DAG.getSetCC(SL, SetCCVT, X, UnderflowCheckConst, ISD::SETOLT);
+
+ R = DAG.getNode(ISD::SELECT, SL, VT, Underflow, Zero, R);
+ const auto &Options = getTargetMachine().Options;
+
+ if (!Flags.hasNoInfs() && !Options.NoInfsFPMath) {
+ SDValue OverflowCheckConst =
+ DAG.getConstantFP(IsExp10 ? 0x1.344136p+5f : 0x1.62e430p+6f, SL, VT);
+ SDValue Overflow =
+ DAG.getSetCC(SL, SetCCVT, X, OverflowCheckConst, ISD::SETOGT);
+ SDValue Inf =
+ DAG.getConstantFP(APFloat::getInf(APFloat::IEEEsingle()), SL, VT);
+ R = DAG.getNode(ISD::SELECT, SL, VT, Overflow, Inf, R);
+ }
+
+ return R;
}
static bool isCtlzOpc(unsigned Opc) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
index 3d5ba50..26b9115 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -74,6 +74,9 @@ protected:
SDValue LowerFLOGUnsafe(SDValue Op, const SDLoc &SL, SelectionDAG &DAG,
double Log2BaseInverted, SDNodeFlags Flags) const;
SDValue lowerFEXP2(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue lowerFEXPUnsafe(SDValue Op, const SDLoc &SL, SelectionDAG &DAG,
+ SDNodeFlags Flags) const;
SDValue lowerFEXP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index dfd2243..12b5d9d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -1122,7 +1122,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
.scalarize(0);
// FIXME: fpow has a selection pattern that should move to custom lowering.
- auto &ExpOps = getActionDefinitionsBuilder({G_FEXP, G_FPOW});
+ auto &ExpOps = getActionDefinitionsBuilder(G_FPOW);
if (ST.has16BitInsts())
ExpOps.customFor({{S32}, {S16}});
else
@@ -1143,7 +1143,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
Log2Ops.scalarize(0)
.lower();
- auto &LogOps = getActionDefinitionsBuilder({G_FLOG, G_FLOG10});
+ auto &LogOps = getActionDefinitionsBuilder({G_FLOG, G_FLOG10, G_FEXP});
LogOps.customFor({S32, S16});
LogOps.clampScalar(0, MinScalarFPTy, S32)
.scalarize(0);
@@ -3010,12 +3010,16 @@ static bool valueIsKnownNeverF32Denorm(const MachineRegisterInfo &MRI,
return false;
}
+static bool allowApproxFunc(const MachineFunction &MF, unsigned Flags) {
+ if (Flags & MachineInstr::FmAfn)
+ return true;
+ const auto &Options = MF.getTarget().Options;
+ return Options.UnsafeFPMath || Options.ApproxFuncFPMath;
+}
+
static bool needsDenormHandlingF32(const MachineFunction &MF, Register Src,
unsigned Flags) {
- return (Flags & MachineInstr::FmAfn) == 0 &&
- !MF.getTarget().Options.UnsafeFPMath &&
- !MF.getTarget().Options.ApproxFuncFPMath &&
- !valueIsKnownNeverF32Denorm(MF.getRegInfo(), Src) &&
+ return !valueIsKnownNeverF32Denorm(MF.getRegInfo(), Src) &&
MF.getDenormalMode(APFloat::IEEEsingle()).Input !=
DenormalMode::PreserveSign;
}
@@ -3023,7 +3027,8 @@ static bool needsDenormHandlingF32(const MachineFunction &MF, Register Src,
std::pair<Register, Register>
AMDGPULegalizerInfo::getScaledLogInput(MachineIRBuilder &B, Register Src,
unsigned Flags) const {
- if (!needsDenormHandlingF32(B.getMF(), Src, Flags))
+ if (allowApproxFunc(B.getMF(), Flags) ||
+ !needsDenormHandlingF32(B.getMF(), Src, Flags))
return {};
const LLT F32 = LLT::scalar(32);
@@ -3249,7 +3254,8 @@ bool AMDGPULegalizerInfo::legalizeFExp2(MachineInstr &MI,
assert(Ty == F32);
- if (!needsDenormHandlingF32(B.getMF(), Src, Flags)) {
+ if (allowApproxFunc(B.getMF(), Flags) ||
+ !needsDenormHandlingF32(B.getMF(), Src, Flags)) {
B.buildIntrinsic(Intrinsic::amdgcn_exp2, ArrayRef<Register>{Dst}, false)
.addUse(Src)
.setMIFlags(Flags);
@@ -3282,16 +3288,160 @@ bool AMDGPULegalizerInfo::legalizeFExp2(MachineInstr &MI,
return true;
}
+bool AMDGPULegalizerInfo::legalizeFExpUnsafe(MachineIRBuilder &B, Register Dst,
+ Register Src,
+ unsigned Flags) const {
+ LLT Ty = B.getMRI()->getType(Dst);
+ auto K = B.buildFConstant(Ty, numbers::log2e);
+ auto Mul = B.buildFMul(Ty, Src, K, Flags);
+
+ if (Ty == LLT::scalar(32)) {
+ B.buildIntrinsic(Intrinsic::amdgcn_exp2, ArrayRef<Register>{Dst}, false)
+ .addUse(Mul.getReg(0))
+ .setMIFlags(Flags);
+ } else {
+ B.buildFExp2(Dst, Mul.getReg(0), Flags);
+ }
+
+ return true;
+}
+
bool AMDGPULegalizerInfo::legalizeFExp(MachineInstr &MI,
MachineIRBuilder &B) const {
Register Dst = MI.getOperand(0).getReg();
- Register Src = MI.getOperand(1).getReg();
+ Register X = MI.getOperand(1).getReg();
unsigned Flags = MI.getFlags();
- LLT Ty = B.getMRI()->getType(Dst);
+ MachineFunction &MF = B.getMF();
+ MachineRegisterInfo &MRI = *B.getMRI();
+ LLT Ty = MRI.getType(Dst);
+ const LLT F16 = LLT::scalar(16);
+ const LLT F32 = LLT::scalar(32);
+ const bool IsExp10 = false; // TODO: For some reason exp10 is missing
- auto K = B.buildFConstant(Ty, numbers::log2e);
- auto Mul = B.buildFMul(Ty, Src, K, Flags);
- B.buildFExp2(Dst, Mul, Flags);
+ if (Ty == F16) {
+ // v_exp_f16 (fmul x, log2e)
+ if (allowApproxFunc(MF, Flags)) {
+ // TODO: Does this really require fast?
+ legalizeFExpUnsafe(B, Dst, X, Flags);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ // exp(f16 x) ->
+ // fptrunc (v_exp_f32 (fmul (fpext x), log2e))
+
+ // Nothing in half is a denormal when promoted to f32.
+ auto Ext = B.buildFPExt(F32, X, Flags);
+ Register Lowered = MRI.createGenericVirtualRegister(F32);
+ legalizeFExpUnsafe(B, Lowered, Ext.getReg(0), Flags);
+ B.buildFPTrunc(Dst, Lowered, Flags);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ assert(Ty == F32);
+
+ // TODO: Interpret allowApproxFunc as ignoring DAZ. This is currently copying
+ // library behavior. Also, is known-not-daz source sufficient?
+ if (allowApproxFunc(MF, Flags) && !needsDenormHandlingF32(MF, X, Flags)) {
+ legalizeFExpUnsafe(B, Dst, X, Flags);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ // Algorithm:
+ //
+ // e^x = 2^(x/ln(2)) = 2^(x*(64/ln(2))/64)
+ //
+ // x*(64/ln(2)) = n + f, |f| <= 0.5, n is integer
+ // n = 64*m + j, 0 <= j < 64
+ //
+ // e^x = 2^((64*m + j + f)/64)
+ // = (2^m) * (2^(j/64)) * 2^(f/64)
+ // = (2^m) * (2^(j/64)) * e^(f*(ln(2)/64))
+ //
+ // f = x*(64/ln(2)) - n
+ // r = f*(ln(2)/64) = x - n*(ln(2)/64)
+ //
+ // e^x = (2^m) * (2^(j/64)) * e^r
+ //
+ // (2^(j/64)) is precomputed
+ //
+ // e^r = 1 + r + (r^2)/2! + (r^3)/3! + (r^4)/4! + (r^5)/5!
+ // e^r = 1 + q
+ //
+ // q = r + (r^2)/2! + (r^3)/3! + (r^4)/4! + (r^5)/5!
+ //
+ // e^x = (2^m) * ( (2^(j/64)) + q*(2^(j/64)) )
+
+ Register PH, PL;
+
+ if (ST.hasFastFMAF32()) {
+ const float c_exp = numbers::log2ef;
+ const float cc_exp = 0x1.4ae0bep-26f; // c+cc are 49 bits
+ const float c_exp10 = 0x1.a934f0p+1f;
+ const float cc_exp10 = 0x1.2f346ep-24f;
+
+ auto C = B.buildFConstant(Ty, IsExp10 ? c_exp10 : c_exp);
+ PH = B.buildFMul(Ty, X, C, Flags).getReg(0);
+ auto NegPH = B.buildFNeg(Ty, PH, Flags);
+ auto FMA0 = B.buildFMA(Ty, X, C, NegPH, Flags);
+
+ auto CC = B.buildFConstant(Ty, IsExp10 ? cc_exp10 : cc_exp);
+ PL = B.buildFMA(Ty, X, CC, FMA0, Flags).getReg(0);
+ } else {
+ const float ch_exp = 0x1.714000p+0f;
+ const float cl_exp = 0x1.47652ap-12f; // ch + cl are 36 bits
+
+ const float ch_exp10 = 0x1.a92000p+1f;
+ const float cl_exp10 = 0x1.4f0978p-11f;
+
+ auto MaskConst = B.buildConstant(Ty, 0xfffff000);
+ auto XH = B.buildAnd(Ty, X, MaskConst);
+ auto XL = B.buildFSub(Ty, X, XH, Flags);
+
+ auto CH = B.buildFConstant(Ty, IsExp10 ? ch_exp10 : ch_exp);
+ PH = B.buildFMul(Ty, XH, CH, Flags).getReg(0);
+
+ auto CL = B.buildFConstant(Ty, IsExp10 ? cl_exp10 : cl_exp);
+ auto XLCL = B.buildFMul(Ty, XL, CL, Flags);
+
+ Register Mad0 =
+ getMad(B, Ty, XL.getReg(0), CH.getReg(0), XLCL.getReg(0), Flags);
+ PL = getMad(B, Ty, XH.getReg(0), CL.getReg(0), Mad0, Flags);
+ }
+
+ auto E = B.buildFRint(Ty, PH, Flags);
+ auto PHSubE = B.buildFSub(Ty, PH, E, Flags);
+ auto A = B.buildFAdd(Ty, PHSubE, PL, Flags);
+ auto IntE = B.buildFPTOSI(LLT::scalar(32), E);
+
+ auto Exp2 = B.buildIntrinsic(Intrinsic::amdgcn_exp2, {Ty}, false)
+ .addUse(A.getReg(0))
+ .setMIFlags(Flags);
+ auto R = B.buildFLdexp(Ty, Exp2, IntE, Flags);
+
+ auto UnderflowCheckConst =
+ B.buildFConstant(Ty, IsExp10 ? -0x1.66d3e8p+5f : -0x1.9d1da0p+6f);
+ auto Zero = B.buildFConstant(Ty, 0.0);
+ auto Underflow =
+ B.buildFCmp(CmpInst::FCMP_OLT, LLT::scalar(1), X, UnderflowCheckConst);
+
+ R = B.buildSelect(Ty, Underflow, Zero, R);
+
+ const auto &Options = MF.getTarget().Options;
+
+ if (!(Flags & MachineInstr::FmNoInfs) && !Options.NoInfsFPMath) {
+ auto OverflowCheckConst =
+ B.buildFConstant(Ty, IsExp10 ? 0x1.344136p+5f : 0x1.62e430p+6f);
+
+ auto Overflow =
+ B.buildFCmp(CmpInst::FCMP_OGT, LLT::scalar(1), X, OverflowCheckConst);
+ auto Inf = B.buildFConstant(Ty, APFloat::getInf(APFloat::IEEEsingle()));
+ R = B.buildSelect(Ty, Overflow, Inf, R, Flags);
+ }
+
+ B.buildCopy(Dst, R);
MI.eraseFromParent();
return true;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
index 2d4cf4c..1a91be1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
@@ -87,6 +87,8 @@ public:
bool legalizeFlogUnsafe(MachineIRBuilder &B, Register Dst, Register Src,
double Log2BaseInverted, unsigned Flags) const;
bool legalizeFExp2(MachineInstr &MI, MachineIRBuilder &B) const;
+ bool legalizeFExpUnsafe(MachineIRBuilder &B, Register Dst, Register Src,
+ unsigned Flags) const;
bool legalizeFExp(MachineInstr &MI, MachineIRBuilder &B) const;
bool legalizeFPow(MachineInstr &MI, MachineIRBuilder &B) const;
bool legalizeFFloor(MachineInstr &MI, MachineRegisterInfo &MRI,