aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Analysis/ConstantFolding.cpp
diff options
context:
space:
mode:
authorJames Y Knight <jyknight@google.com>2024-07-25 09:19:22 -0400
committerGitHub <noreply@github.com>2024-07-25 09:19:22 -0400
commitdfeb3991fb489a703f631ab0c34b58f80568038d (patch)
tree7117ce620e5bf49aef8810d5651c4aba2b31499e /llvm/lib/Analysis/ConstantFolding.cpp
parent0fedfd83d75415837eb91f56ec24f4b392bf6c57 (diff)
downloadllvm-dfeb3991fb489a703f631ab0c34b58f80568038d.zip
llvm-dfeb3991fb489a703f631ab0c34b58f80568038d.tar.gz
llvm-dfeb3991fb489a703f631ab0c34b58f80568038d.tar.bz2
Remove the `x86_mmx` IR type. (#98505)
It is now translated to `<1 x i64>`, which allows the removal of a bunch of special casing. This _incompatibly_ changes the ABI of any LLVM IR function with `x86_mmx` arguments or returns: instead of passing in mmx registers, they will now be passed via integer registers. However, the real-world incompatibility caused by this is expected to be minimal, because Clang never uses the x86_mmx type -- it lowers `__m64` to either `<1 x i64>` or `double`, depending on ABI. This change does _not_ eliminate the SelectionDAG `MVT::x86mmx` type. That type simply no longer corresponds to an IR type, and is used only by MMX intrinsics and inline-asm operands. Because SelectionDAGBuilder only knows how to generate the operands/results of intrinsics based on the IR type, it thus now generates the intrinsics with the type MVT::v1i64, instead of MVT::x86mmx. We need to fix this before the DAG LegalizeTypes, and thus have the X86 backend fix them up in DAGCombine. (This may be a short-lived hack, if all the MMX intrinsics can be removed in upcoming changes.) Works towards issue #98272.
Diffstat (limited to 'llvm/lib/Analysis/ConstantFolding.cpp')
-rw-r--r--llvm/lib/Analysis/ConstantFolding.cpp8
1 files changed, 3 insertions, 5 deletions
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index df75745..85ee231 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -564,16 +564,14 @@ Constant *FoldReinterpretLoadFromConst(Constant *C, Type *LoadTy,
Type *MapTy = Type::getIntNTy(C->getContext(),
DL.getTypeSizeInBits(LoadTy).getFixedValue());
if (Constant *Res = FoldReinterpretLoadFromConst(C, MapTy, Offset, DL)) {
- if (Res->isNullValue() && !LoadTy->isX86_MMXTy() &&
- !LoadTy->isX86_AMXTy())
+ if (Res->isNullValue() && !LoadTy->isX86_AMXTy())
// Materializing a zero can be done trivially without a bitcast
return Constant::getNullValue(LoadTy);
Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy;
Res = FoldBitCast(Res, CastTy, DL);
if (LoadTy->isPtrOrPtrVectorTy()) {
// For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr
- if (Res->isNullValue() && !LoadTy->isX86_MMXTy() &&
- !LoadTy->isX86_AMXTy())
+ if (Res->isNullValue() && !LoadTy->isX86_AMXTy())
return Constant::getNullValue(LoadTy);
if (DL.isNonIntegralPointerType(LoadTy->getScalarType()))
// Be careful not to replace a load of an addrspace value with an inttoptr here
@@ -764,7 +762,7 @@ Constant *llvm::ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty,
// uniform.
if (!DL.typeSizeEqualsStoreSize(C->getType()))
return nullptr;
- if (C->isNullValue() && !Ty->isX86_MMXTy() && !Ty->isX86_AMXTy())
+ if (C->isNullValue() && !Ty->isX86_AMXTy())
return Constant::getNullValue(Ty);
if (C->isAllOnesValue() &&
(Ty->isIntOrIntVectorTy() || Ty->isFPOrFPVectorTy()))