aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
diff options
context:
space:
mode:
authorNicolai Hähnle <nicolai.haehnle@amd.com>2024-01-04 00:10:15 +0100
committerGitHub <noreply@github.com>2024-01-04 00:10:15 +0100
commit49b492048af2b2093aaed899c0bbd6d740aad83c (patch)
tree1f4f9b2ba68133bd36b607d09abed154b32ee783 /llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
parent49029f926d359075d59ad4aec2d01a21d9514b02 (diff)
downloadllvm-49b492048af2b2093aaed899c0bbd6d740aad83c.zip
llvm-49b492048af2b2093aaed899c0bbd6d740aad83c.tar.gz
llvm-49b492048af2b2093aaed899c0bbd6d740aad83c.tar.bz2
AMDGPU: Fix packed 16-bit inline constants (#76522)
Consistently treat packed 16-bit operands as 32-bit values, because that's really what they are. The attempt to treat them differently was ultimately incorrect and lead to miscompiles, e.g. when using non-splat constants such as (1, 0) as operands. Recognize 32-bit float constants for i/u16 instructions. This is a bit odd conceptually, but it matches HW behavior and SP3. Remove isFoldableLiteralV216; there was too much magic in the dependency between it and its use in SIFoldOperands. Instead, we now simply rely on checking whether a constant is an inline constant, and trying a bunch of permutations of the low and high halves. This is more obviously correct and leads to some new cases where inline constants are used as shown by tests. Move the logic for switching packed add vs. sub into SIFoldOperands. This has two benefits: all logic that optimizes for inline constants in packed math is now in one place; and it applies to both SelectionDAG and GISel paths. Disable the use of opsel with v_dot* instructions on gfx11. They are documented to ignore opsel on src0 and src1. It may be interesting to re-enable to use of opsel on src2 as a future optimization. A similar "proper" fix of what inline constants mean could potentially be applied to unpacked 16-bit ops. However, it's less clear what the benefit would be, and there are surely places where we'd have to carefully audit whether values are properly sign- or zero-extended. It is best to keep such a change separate. Fixes: Corruption in FSR 2.0 (latent bug exposed by an LLPC change)
Diffstat (limited to 'llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp')
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp106
1 files changed, 74 insertions, 32 deletions
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index a91d771..26ba257 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -2506,53 +2506,95 @@ bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) {
Val == 0x3118; // 1/2pi
}
-bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) {
- assert(HasInv2Pi);
-
- if (isInt<16>(Literal) || isUInt<16>(Literal)) {
- int16_t Trunc = static_cast<int16_t>(Literal);
- return AMDGPU::isInlinableLiteral16(Trunc, HasInv2Pi);
+std::optional<unsigned> getInlineEncodingV216(bool IsFloat, uint32_t Literal) {
+ // Unfortunately, the Instruction Set Architecture Reference Guide is
+ // misleading about how the inline operands work for (packed) 16-bit
+ // instructions. In a nutshell, the actual HW behavior is:
+ //
+ // - integer encodings (-16 .. 64) are always produced as sign-extended
+ // 32-bit values
+ // - float encodings are produced as:
+ // - for F16 instructions: corresponding half-precision float values in
+ // the LSBs, 0 in the MSBs
+ // - for UI16 instructions: corresponding single-precision float value
+ int32_t Signed = static_cast<int32_t>(Literal);
+ if (Signed >= 0 && Signed <= 64)
+ return 128 + Signed;
+
+ if (Signed >= -16 && Signed <= -1)
+ return 192 + std::abs(Signed);
+
+ if (IsFloat) {
+ // clang-format off
+ switch (Literal) {
+ case 0x3800: return 240; // 0.5
+ case 0xB800: return 241; // -0.5
+ case 0x3C00: return 242; // 1.0
+ case 0xBC00: return 243; // -1.0
+ case 0x4000: return 244; // 2.0
+ case 0xC000: return 245; // -2.0
+ case 0x4400: return 246; // 4.0
+ case 0xC400: return 247; // -4.0
+ case 0x3118: return 248; // 1.0 / (2.0 * pi)
+ default: break;
+ }
+ // clang-format on
+ } else {
+ // clang-format off
+ switch (Literal) {
+ case 0x3F000000: return 240; // 0.5
+ case 0xBF000000: return 241; // -0.5
+ case 0x3F800000: return 242; // 1.0
+ case 0xBF800000: return 243; // -1.0
+ case 0x40000000: return 244; // 2.0
+ case 0xC0000000: return 245; // -2.0
+ case 0x40800000: return 246; // 4.0
+ case 0xC0800000: return 247; // -4.0
+ case 0x3E22F983: return 248; // 1.0 / (2.0 * pi)
+ default: break;
+ }
+ // clang-format on
}
- if (!(Literal & 0xffff))
- return AMDGPU::isInlinableLiteral16(Literal >> 16, HasInv2Pi);
- int16_t Lo16 = static_cast<int16_t>(Literal);
- int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
- return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi);
+ return {};
}
-bool isInlinableIntLiteralV216(int32_t Literal) {
- int16_t Lo16 = static_cast<int16_t>(Literal);
- if (isInt<16>(Literal) || isUInt<16>(Literal))
- return isInlinableIntLiteral(Lo16);
+// Encoding of the literal as an inline constant for a V_PK_*_IU16 instruction
+// or nullopt.
+std::optional<unsigned> getInlineEncodingV2I16(uint32_t Literal) {
+ return getInlineEncodingV216(false, Literal);
+}
- int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
- if (!(Literal & 0xffff))
- return isInlinableIntLiteral(Hi16);
- return Lo16 == Hi16 && isInlinableIntLiteral(Lo16);
+// Encoding of the literal as an inline constant for a V_PK_*_F16 instruction
+// or nullopt.
+std::optional<unsigned> getInlineEncodingV2F16(uint32_t Literal) {
+ return getInlineEncodingV216(true, Literal);
}
-bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi, uint8_t OpType) {
+// Whether the given literal can be inlined for a V_PK_* instruction.
+bool isInlinableLiteralV216(uint32_t Literal, uint8_t OpType) {
switch (OpType) {
+ case AMDGPU::OPERAND_REG_IMM_V2INT16:
+ case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
+ case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
+ return getInlineEncodingV216(false, Literal).has_value();
case AMDGPU::OPERAND_REG_IMM_V2FP16:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
- return isInlinableLiteralV216(Literal, HasInv2Pi);
+ case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16:
+ return getInlineEncodingV216(true, Literal).has_value();
default:
- return isInlinableIntLiteralV216(Literal);
+ llvm_unreachable("bad packed operand type");
}
}
-bool isFoldableLiteralV216(int32_t Literal, bool HasInv2Pi) {
- assert(HasInv2Pi);
-
- int16_t Lo16 = static_cast<int16_t>(Literal);
- if (isInt<16>(Literal) || isUInt<16>(Literal))
- return true;
+// Whether the given literal can be inlined for a V_PK_*_IU16 instruction.
+bool isInlinableLiteralV2I16(uint32_t Literal) {
+ return getInlineEncodingV2I16(Literal).has_value();
+}
- int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
- if (!(Literal & 0xffff))
- return true;
- return Lo16 == Hi16;
+// Whether the given literal can be inlined for a V_PK_*_F16 instruction.
+bool isInlinableLiteralV2F16(uint32_t Literal) {
+ return getInlineEncodingV2F16(Literal).has_value();
}
bool isValid32BitLiteral(uint64_t Val, bool IsFP64) {