aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp117
1 files changed, 80 insertions, 37 deletions
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 348b0e7..64e8ee7 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -285,8 +285,15 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
.clampScalar(0, s32, (XLen == 64 || ST.hasStdExtD()) ? s64 : s32)
.clampScalar(1, sXLen, sXLen);
- auto &LoadStoreActions =
- getActionDefinitionsBuilder({G_LOAD, G_STORE})
+ auto &LoadActions = getActionDefinitionsBuilder(G_LOAD);
+ auto &StoreActions = getActionDefinitionsBuilder(G_STORE);
+
+ LoadActions
+ .legalForTypesWithMemDesc({{s32, p0, s8, 8},
+ {s32, p0, s16, 16},
+ {s32, p0, s32, 32},
+ {p0, p0, sXLen, XLen}});
+ StoreActions
.legalForTypesWithMemDesc({{s32, p0, s8, 8},
{s32, p0, s16, 16},
{s32, p0, s32, 32},
@@ -295,58 +302,94 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
.legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 16}});
if (XLen == 64) {
- LoadStoreActions.legalForTypesWithMemDesc({{s64, p0, s8, 8},
- {s64, p0, s16, 16},
- {s64, p0, s32, 32},
- {s64, p0, s64, 64}});
+ LoadActions.legalForTypesWithMemDesc({{s64, p0, s8, 8},
+ {s64, p0, s16, 16},
+ {s64, p0, s32, 32},
+ {s64, p0, s64, 64}});
+ StoreActions.legalForTypesWithMemDesc({{s64, p0, s8, 8},
+ {s64, p0, s16, 16},
+ {s64, p0, s32, 32},
+ {s64, p0, s64, 64}});
ExtLoadActions.legalForTypesWithMemDesc(
{{s64, p0, s8, 8}, {s64, p0, s16, 16}, {s64, p0, s32, 32}});
} else if (ST.hasStdExtD()) {
- LoadStoreActions.legalForTypesWithMemDesc({{s64, p0, s64, 64}});
+ LoadActions.legalForTypesWithMemDesc({{s64, p0, s64, 64}});
+ StoreActions.legalForTypesWithMemDesc({{s64, p0, s64, 64}});
}
// Vector loads/stores.
if (ST.hasVInstructions()) {
- LoadStoreActions.legalForTypesWithMemDesc({{nxv2s8, p0, nxv2s8, 8},
- {nxv4s8, p0, nxv4s8, 8},
- {nxv8s8, p0, nxv8s8, 8},
- {nxv16s8, p0, nxv16s8, 8},
- {nxv32s8, p0, nxv32s8, 8},
- {nxv64s8, p0, nxv64s8, 8},
- {nxv2s16, p0, nxv2s16, 16},
- {nxv4s16, p0, nxv4s16, 16},
- {nxv8s16, p0, nxv8s16, 16},
- {nxv16s16, p0, nxv16s16, 16},
- {nxv32s16, p0, nxv32s16, 16},
- {nxv2s32, p0, nxv2s32, 32},
- {nxv4s32, p0, nxv4s32, 32},
- {nxv8s32, p0, nxv8s32, 32},
- {nxv16s32, p0, nxv16s32, 32}});
-
- if (ST.getELen() == 64)
- LoadStoreActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
- {nxv1s16, p0, nxv1s16, 16},
- {nxv1s32, p0, nxv1s32, 32}});
-
- if (ST.hasVInstructionsI64())
- LoadStoreActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
- {nxv2s64, p0, nxv2s64, 64},
- {nxv4s64, p0, nxv4s64, 64},
- {nxv8s64, p0, nxv8s64, 64}});
+ LoadActions.legalForTypesWithMemDesc({{nxv2s8, p0, nxv2s8, 8},
+ {nxv4s8, p0, nxv4s8, 8},
+ {nxv8s8, p0, nxv8s8, 8},
+ {nxv16s8, p0, nxv16s8, 8},
+ {nxv32s8, p0, nxv32s8, 8},
+ {nxv64s8, p0, nxv64s8, 8},
+ {nxv2s16, p0, nxv2s16, 16},
+ {nxv4s16, p0, nxv4s16, 16},
+ {nxv8s16, p0, nxv8s16, 16},
+ {nxv16s16, p0, nxv16s16, 16},
+ {nxv32s16, p0, nxv32s16, 16},
+ {nxv2s32, p0, nxv2s32, 32},
+ {nxv4s32, p0, nxv4s32, 32},
+ {nxv8s32, p0, nxv8s32, 32},
+ {nxv16s32, p0, nxv16s32, 32}});
+ StoreActions.legalForTypesWithMemDesc({{nxv2s8, p0, nxv2s8, 8},
+ {nxv4s8, p0, nxv4s8, 8},
+ {nxv8s8, p0, nxv8s8, 8},
+ {nxv16s8, p0, nxv16s8, 8},
+ {nxv32s8, p0, nxv32s8, 8},
+ {nxv64s8, p0, nxv64s8, 8},
+ {nxv2s16, p0, nxv2s16, 16},
+ {nxv4s16, p0, nxv4s16, 16},
+ {nxv8s16, p0, nxv8s16, 16},
+ {nxv16s16, p0, nxv16s16, 16},
+ {nxv32s16, p0, nxv32s16, 16},
+ {nxv2s32, p0, nxv2s32, 32},
+ {nxv4s32, p0, nxv4s32, 32},
+ {nxv8s32, p0, nxv8s32, 32},
+ {nxv16s32, p0, nxv16s32, 32}});
+
+ if (ST.getELen() == 64) {
+ LoadActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
+ {nxv1s16, p0, nxv1s16, 16},
+ {nxv1s32, p0, nxv1s32, 32}});
+ StoreActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
+ {nxv1s16, p0, nxv1s16, 16},
+ {nxv1s32, p0, nxv1s32, 32}});
+ }
+
+ if (ST.hasVInstructionsI64()) {
+ LoadActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
+ {nxv2s64, p0, nxv2s64, 64},
+ {nxv4s64, p0, nxv4s64, 64},
+ {nxv8s64, p0, nxv8s64, 64}});
+ StoreActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
+ {nxv2s64, p0, nxv2s64, 64},
+ {nxv4s64, p0, nxv4s64, 64},
+ {nxv8s64, p0, nxv8s64, 64}});
+ }
// we will take the custom lowering logic if we have scalable vector types
// with non-standard alignments
- LoadStoreActions.customIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST));
+ LoadActions.customIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST));
+ StoreActions.customIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST));
// Pointers require that XLen sized elements are legal.
- if (XLen <= ST.getELen())
- LoadStoreActions.customIf(typeIsLegalPtrVec(0, PtrVecTys, ST));
+ if (XLen <= ST.getELen()) {
+ LoadActions.customIf(typeIsLegalPtrVec(0, PtrVecTys, ST));
+ StoreActions.customIf(typeIsLegalPtrVec(0, PtrVecTys, ST));
+ }
}
- LoadStoreActions.widenScalarToNextPow2(0, /* MinSize = */ 8)
+ LoadActions.widenScalarToNextPow2(0, /* MinSize = */ 8)
.lowerIfMemSizeNotByteSizePow2()
.clampScalar(0, s32, sXLen)
.lower();
+ StoreActions
+ .clampScalar(0, s32, sXLen)
+ .lowerIfMemSizeNotByteSizePow2()
+ .lower();
ExtLoadActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen).lower();