aboutsummaryrefslogtreecommitdiff
path: root/llvm
diff options
context:
space:
mode:
authorEli Friedman <efriedma@quicinc.com>2024-04-04 11:25:44 -0700
committerGitHub <noreply@github.com>2024-04-04 11:25:44 -0700
commitc83f23d6abb6f8d693c643bc1b43f9b9e06bc537 (patch)
treeb7586fbbbf5e468c160261a670a7f385e6397316 /llvm
parent53fe94a0ce262c6e38117429a30814f54ea55b0f (diff)
downloadllvm-c83f23d6abb6f8d693c643bc1b43f9b9e06bc537.zip
llvm-c83f23d6abb6f8d693c643bc1b43f9b9e06bc537.tar.gz
llvm-c83f23d6abb6f8d693c643bc1b43f9b9e06bc537.tar.bz2
[AArch64] Fix heuristics for folding "lsl" into load/store ops. (#86894)
The existing heuristics were assuming that every core behaves like an Apple A7, where any extend/shift costs an extra micro-op... but in reality, nothing else behaves like that. On some older Cortex designs, shifts by 1 or 4 cost extra, but all other shifts/extensions are free. On all other cores, as far as I can tell, all shifts/extensions for integer loads are free (i.e. the same cost as an unshifted load). To reflect this, this patch: - Enables aggressive folding of shifts into loads by default. - Removes the old AddrLSLFast feature, since it applies to everything except A7 (and even if you are explicitly targeting A7, we want to assume extensions are free because the code will almost always run on a newer core). - Adds a new feature AddrLSLSlow14 that applies specifically to the Cortex cores where shifts by 1 or 4 cost extra. I didn't add support for AddrLSLSlow14 on the GlobalISel side because it would require a bunch of refactoring to work correctly. Someone can pick this up as a followup.
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Target/AArch64/AArch64.td53
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp29
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp2
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp6
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir12
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll112
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-vector-ldst.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll11
-rw-r--r--llvm/test/CodeGen/AArch64/cheap-as-a-move.ll30
-rw-r--r--llvm/test/CodeGen/AArch64/extract-bits.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/machine-licm-hoist-load.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/sink-and-fold.ll4
14 files changed, 119 insertions, 177 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64.td b/llvm/lib/Target/AArch64/AArch64.td
index 6425aa9..3af427d 100644
--- a/llvm/lib/Target/AArch64/AArch64.td
+++ b/llvm/lib/Target/AArch64/AArch64.td
@@ -391,9 +391,18 @@ def FeatureNoNegativeImmediates : SubtargetFeature<"no-neg-immediates",
"equivalent when the immediate does "
"not fit in the encoding.">;
-def FeatureAddrLSLFast : SubtargetFeature<
- "addr-lsl-fast", "HasAddrLSLFast", "true",
- "Address operands with logical shift of up to 3 places are cheap">;
+// Address operands with shift amount 2 or 3 are fast on all Arm chips except
+// some old Apple cores (A7-A10?) which handle all shifts slowly. Cortex-A57
+// and derived designs through Cortex-X1 take an extra micro-op for shifts
+// of 1 or 4. Other Arm chips handle all shifted operands at the same speed
+// as unshifted operands.
+//
+// We don't try to model the behavior of the old Apple cores because new code
+// targeting A7 is very unlikely to actually run on an A7. The Cortex cores
+// are modeled by FeatureAddrLSLSlow14.
+def FeatureAddrLSLSlow14 : SubtargetFeature<
+ "addr-lsl-slow-14", "HasAddrLSLSlow14", "true",
+ "Address operands with shift amount of 1 or 4 are slow">;
def FeatureALULSLFast : SubtargetFeature<
"alu-lsl-fast", "HasALULSLFast", "true",
@@ -885,6 +894,7 @@ def TuneA57 : SubtargetFeature<"a57", "ARMProcFamily", "CortexA57",
FeatureBalanceFPOps,
FeatureFuseAdrpAdd,
FeatureFuseLiterals,
+ FeatureAddrLSLSlow14,
FeaturePostRAScheduler,
FeatureEnableSelectOptimize,
FeaturePredictableSelectIsExpensive]>;
@@ -903,6 +913,7 @@ def TuneA72 : SubtargetFeature<"a72", "ARMProcFamily", "CortexA72",
FeatureFuseAES,
FeatureFuseAdrpAdd,
FeatureFuseLiterals,
+ FeatureAddrLSLSlow14,
FeatureEnableSelectOptimize,
FeaturePredictableSelectIsExpensive]>;
@@ -910,6 +921,7 @@ def TuneA73 : SubtargetFeature<"a73", "ARMProcFamily", "CortexA73",
"Cortex-A73 ARM processors", [
FeatureFuseAES,
FeatureFuseAdrpAdd,
+ FeatureAddrLSLSlow14,
FeatureEnableSelectOptimize,
FeaturePredictableSelectIsExpensive]>;
@@ -917,6 +929,7 @@ def TuneA75 : SubtargetFeature<"a75", "ARMProcFamily", "CortexA75",
"Cortex-A75 ARM processors", [
FeatureFuseAES,
FeatureFuseAdrpAdd,
+ FeatureAddrLSLSlow14,
FeatureEnableSelectOptimize,
FeaturePredictableSelectIsExpensive]>;
@@ -924,7 +937,7 @@ def TuneA76 : SubtargetFeature<"a76", "ARMProcFamily", "CortexA76",
"Cortex-A76 ARM processors", [
FeatureFuseAES,
FeatureFuseAdrpAdd,
- FeatureAddrLSLFast,
+ FeatureAddrLSLSlow14,
FeatureALULSLFast,
FeatureEnableSelectOptimize,
FeaturePredictableSelectIsExpensive]>;
@@ -934,7 +947,7 @@ def TuneA77 : SubtargetFeature<"a77", "ARMProcFamily", "CortexA77",
FeatureCmpBccFusion,
FeatureFuseAES,
FeatureFuseAdrpAdd,
- FeatureAddrLSLFast,
+ FeatureAddrLSLSlow14,
FeatureALULSLFast,
FeatureEnableSelectOptimize,
FeaturePredictableSelectIsExpensive]>;
@@ -944,7 +957,7 @@ def TuneA78 : SubtargetFeature<"a78", "ARMProcFamily", "CortexA78",
FeatureCmpBccFusion,
FeatureFuseAES,
FeatureFuseAdrpAdd,
- FeatureAddrLSLFast,
+ FeatureAddrLSLSlow14,
FeatureALULSLFast,
FeaturePostRAScheduler,
FeatureEnableSelectOptimize,
@@ -956,7 +969,7 @@ def TuneA78AE : SubtargetFeature<"a78ae", "ARMProcFamily",
FeatureCmpBccFusion,
FeatureFuseAES,
FeatureFuseAdrpAdd,
- FeatureAddrLSLFast,
+ FeatureAddrLSLSlow14,
FeatureALULSLFast,
FeaturePostRAScheduler,
FeatureEnableSelectOptimize,
@@ -968,7 +981,7 @@ def TuneA78C : SubtargetFeature<"a78c", "ARMProcFamily",
FeatureCmpBccFusion,
FeatureFuseAES,
FeatureFuseAdrpAdd,
- FeatureAddrLSLFast,
+ FeatureAddrLSLSlow14,
FeatureALULSLFast,
FeaturePostRAScheduler,
FeatureEnableSelectOptimize,
@@ -979,7 +992,6 @@ def TuneA710 : SubtargetFeature<"a710", "ARMProcFamily", "CortexA710",
FeatureCmpBccFusion,
FeatureFuseAES,
FeatureFuseAdrpAdd,
- FeatureAddrLSLFast,
FeatureALULSLFast,
FeaturePostRAScheduler,
FeatureEnableSelectOptimize,
@@ -990,7 +1002,6 @@ def TuneA715 : SubtargetFeature<"a715", "ARMProcFamily", "CortexA715",
FeatureFuseAES,
FeaturePostRAScheduler,
FeatureCmpBccFusion,
- FeatureAddrLSLFast,
FeatureALULSLFast,
FeatureFuseAdrpAdd,
FeatureEnableSelectOptimize,
@@ -1001,7 +1012,6 @@ def TuneA720 : SubtargetFeature<"a720", "ARMProcFamily", "CortexA720",
FeatureFuseAES,
FeaturePostRAScheduler,
FeatureCmpBccFusion,
- FeatureAddrLSLFast,
FeatureALULSLFast,
FeatureFuseAdrpAdd,
FeatureEnableSelectOptimize,
@@ -1012,7 +1022,6 @@ def TuneA720AE : SubtargetFeature<"a720ae", "ARMProcFamily", "CortexA720",
FeatureFuseAES,
FeaturePostRAScheduler,
FeatureCmpBccFusion,
- FeatureAddrLSLFast,
FeatureALULSLFast,
FeatureFuseAdrpAdd,
FeatureEnableSelectOptimize,
@@ -1028,7 +1037,7 @@ def TuneX1 : SubtargetFeature<"cortex-x1", "ARMProcFamily", "CortexX1",
FeatureCmpBccFusion,
FeatureFuseAES,
FeatureFuseAdrpAdd,
- FeatureAddrLSLFast,
+ FeatureAddrLSLSlow14,
FeatureALULSLFast,
FeaturePostRAScheduler,
FeatureEnableSelectOptimize,
@@ -1039,7 +1048,6 @@ def TuneX2 : SubtargetFeature<"cortex-x2", "ARMProcFamily", "CortexX2",
FeatureCmpBccFusion,
FeatureFuseAES,
FeatureFuseAdrpAdd,
- FeatureAddrLSLFast,
FeatureALULSLFast,
FeaturePostRAScheduler,
FeatureEnableSelectOptimize,
@@ -1047,7 +1055,6 @@ def TuneX2 : SubtargetFeature<"cortex-x2", "ARMProcFamily", "CortexX2",
def TuneX3 : SubtargetFeature<"cortex-x3", "ARMProcFamily", "CortexX3",
"Cortex-X3 ARM processors", [
- FeatureAddrLSLFast,
FeatureALULSLFast,
FeatureFuseAdrpAdd,
FeatureFuseAES,
@@ -1057,7 +1064,6 @@ def TuneX3 : SubtargetFeature<"cortex-x3", "ARMProcFamily", "CortexX3",
def TuneX4 : SubtargetFeature<"cortex-x4", "ARMProcFamily", "CortexX4",
"Cortex-X4 ARM processors", [
- FeatureAddrLSLFast,
FeatureALULSLFast,
FeatureFuseAdrpAdd,
FeatureFuseAES,
@@ -1215,7 +1221,6 @@ def TuneExynosM3 : SubtargetFeature<"exynosm3", "ARMProcFamily", "ExynosM3",
FeatureFuseAdrpAdd,
FeatureFuseLiterals,
FeatureStorePairSuppress,
- FeatureAddrLSLFast,
FeatureALULSLFast,
FeaturePostRAScheduler,
FeaturePredictableSelectIsExpensive]>;
@@ -1234,7 +1239,6 @@ def TuneExynosM4 : SubtargetFeature<"exynosm4", "ARMProcFamily", "ExynosM3",
FeatureFuseAdrpAdd,
FeatureFuseLiterals,
FeatureStorePairSuppress,
- FeatureAddrLSLFast,
FeatureALULSLFast,
FeaturePostRAScheduler,
FeatureZCZeroing]>;
@@ -1244,7 +1248,6 @@ def TuneKryo : SubtargetFeature<"kryo", "ARMProcFamily", "Kryo",
FeaturePostRAScheduler,
FeaturePredictableSelectIsExpensive,
FeatureZCZeroing,
- FeatureAddrLSLFast,
FeatureALULSLFast,
FeatureStorePairSuppress]>;
@@ -1254,7 +1257,6 @@ def TuneFalkor : SubtargetFeature<"falkor", "ARMProcFamily", "Falkor",
FeaturePredictableSelectIsExpensive,
FeatureZCZeroing,
FeatureStorePairSuppress,
- FeatureAddrLSLFast,
FeatureALULSLFast,
FeatureSlowSTRQro]>;
@@ -1268,7 +1270,7 @@ def TuneNeoverseN1 : SubtargetFeature<"neoversen1", "ARMProcFamily", "NeoverseN1
"Neoverse N1 ARM processors", [
FeatureFuseAES,
FeatureFuseAdrpAdd,
- FeatureAddrLSLFast,
+ FeatureAddrLSLSlow14,
FeatureALULSLFast,
FeaturePostRAScheduler,
FeatureEnableSelectOptimize,
@@ -1278,7 +1280,6 @@ def TuneNeoverseN2 : SubtargetFeature<"neoversen2", "ARMProcFamily", "NeoverseN2
"Neoverse N2 ARM processors", [
FeatureFuseAES,
FeatureFuseAdrpAdd,
- FeatureAddrLSLFast,
FeatureALULSLFast,
FeaturePostRAScheduler,
FeatureEnableSelectOptimize,
@@ -1288,7 +1289,6 @@ def TuneNeoverse512TVB : SubtargetFeature<"neoverse512tvb", "ARMProcFamily", "Ne
"Neoverse 512-TVB ARM processors", [
FeatureFuseAES,
FeatureFuseAdrpAdd,
- FeatureAddrLSLFast,
FeatureALULSLFast,
FeaturePostRAScheduler,
FeatureEnableSelectOptimize,
@@ -1298,7 +1298,7 @@ def TuneNeoverseV1 : SubtargetFeature<"neoversev1", "ARMProcFamily", "NeoverseV1
"Neoverse V1 ARM processors", [
FeatureFuseAES,
FeatureFuseAdrpAdd,
- FeatureAddrLSLFast,
+ FeatureAddrLSLSlow14,
FeatureALULSLFast,
FeaturePostRAScheduler,
FeatureEnableSelectOptimize,
@@ -1309,7 +1309,6 @@ def TuneNeoverseV2 : SubtargetFeature<"neoversev2", "ARMProcFamily", "NeoverseV2
"Neoverse V2 ARM processors", [
FeatureFuseAES,
FeatureFuseAdrpAdd,
- FeatureAddrLSLFast,
FeatureALULSLFast,
FeaturePostRAScheduler,
FeatureEnableSelectOptimize,
@@ -1321,7 +1320,6 @@ def TuneSaphira : SubtargetFeature<"saphira", "ARMProcFamily", "Saphira",
FeaturePredictableSelectIsExpensive,
FeatureZCZeroing,
FeatureStorePairSuppress,
- FeatureAddrLSLFast,
FeatureALULSLFast]>;
def TuneThunderX2T99 : SubtargetFeature<"thunderx2t99", "ARMProcFamily", "ThunderX2T99",
@@ -1381,7 +1379,6 @@ def TuneAmpere1 : SubtargetFeature<"ampere1", "ARMProcFamily", "Ampere1",
FeaturePostRAScheduler,
FeatureFuseAES,
FeatureFuseAdrpAdd,
- FeatureAddrLSLFast,
FeatureALULSLFast,
FeatureAggressiveFMA,
FeatureArithmeticBccFusion,
@@ -1397,7 +1394,6 @@ def TuneAmpere1A : SubtargetFeature<"ampere1a", "ARMProcFamily", "Ampere1A",
FeaturePostRAScheduler,
FeatureFuseAES,
FeatureFuseAdrpAdd,
- FeatureAddrLSLFast,
FeatureALULSLFast,
FeatureAggressiveFMA,
FeatureArithmeticBccFusion,
@@ -1414,7 +1410,6 @@ def TuneAmpere1B : SubtargetFeature<"ampere1b", "ARMProcFamily", "Ampere1B",
FeaturePostRAScheduler,
FeatureFuseAES,
FeatureFuseAdrpAdd,
- FeatureAddrLSLFast,
FeatureALULSLFast,
FeatureAggressiveFMA,
FeatureArithmeticBccFusion,
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 163ed52..51bec36 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -462,7 +462,7 @@ private:
SDValue &Offset, SDValue &SignExtend,
SDValue &DoShift);
bool isWorthFoldingALU(SDValue V, bool LSL = false) const;
- bool isWorthFoldingAddr(SDValue V) const;
+ bool isWorthFoldingAddr(SDValue V, unsigned Size) const;
bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
SDValue &Offset, SDValue &SignExtend);
@@ -674,17 +674,22 @@ static bool isWorthFoldingSHL(SDValue V) {
/// Determine whether it is worth to fold V into an extended register addressing
/// mode.
-bool AArch64DAGToDAGISel::isWorthFoldingAddr(SDValue V) const {
+bool AArch64DAGToDAGISel::isWorthFoldingAddr(SDValue V, unsigned Size) const {
// Trivial if we are optimizing for code size or if there is only
// one use of the value.
if (CurDAG->shouldOptForSize() || V.hasOneUse())
return true;
- // If a subtarget has a fastpath LSL we can fold a logical shift into
- // the addressing mode and save a cycle.
- if (Subtarget->hasAddrLSLFast() && V.getOpcode() == ISD::SHL &&
- isWorthFoldingSHL(V))
+
+ // If a subtarget has a slow shift, folding a shift into multiple loads
+ // costs additional micro-ops.
+ if (Subtarget->hasAddrLSLSlow14() && (Size == 2 || Size == 16))
+ return false;
+
+ // Check whether we're going to emit the address arithmetic anyway because
+ // it's used by a non-address operation.
+ if (V.getOpcode() == ISD::SHL && isWorthFoldingSHL(V))
return true;
- if (Subtarget->hasAddrLSLFast() && V.getOpcode() == ISD::ADD) {
+ if (V.getOpcode() == ISD::ADD) {
const SDValue LHS = V.getOperand(0);
const SDValue RHS = V.getOperand(1);
if (LHS.getOpcode() == ISD::SHL && isWorthFoldingSHL(LHS))
@@ -1203,7 +1208,7 @@ bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
return false;
- return isWorthFoldingAddr(N);
+ return isWorthFoldingAddr(N, Size);
}
bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
@@ -1231,7 +1236,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
}
// Remember if it is worth folding N when it produces extended register.
- bool IsExtendedRegisterWorthFolding = isWorthFoldingAddr(N);
+ bool IsExtendedRegisterWorthFolding = isWorthFoldingAddr(N, Size);
// Try to match a shifted extend on the RHS.
if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
@@ -1261,7 +1266,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
MVT::i32);
- if (isWorthFoldingAddr(LHS))
+ if (isWorthFoldingAddr(LHS, Size))
return true;
}
@@ -1273,7 +1278,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
MVT::i32);
- if (isWorthFoldingAddr(RHS))
+ if (isWorthFoldingAddr(RHS, Size))
return true;
}
@@ -1343,7 +1348,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
}
// Remember if it is worth folding N when it produces extended register.
- bool IsExtendedRegisterWorthFolding = isWorthFoldingAddr(N);
+ bool IsExtendedRegisterWorthFolding = isWorthFoldingAddr(N, Size);
// Try to match a shifted extend on the RHS.
if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index d0c5e6b..22687b0 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -2993,7 +2993,7 @@ bool AArch64InstrInfo::canFoldIntoAddrMode(const MachineInstr &MemI,
return false;
Shift = AArch64_AM::getShiftValue(Shift);
if (!OptSize) {
- if ((Shift != 2 && Shift != 3) || !Subtarget.hasAddrLSLFast())
+ if (Shift != 2 && Shift != 3 && Subtarget.hasAddrLSLSlow14())
return false;
if (avoidSlowSTRQ(MemI))
return false;
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index a8f2c45..d4daf17 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -6907,10 +6907,8 @@ bool AArch64InstructionSelector::isWorthFoldingIntoExtendedReg(
MI.getParent()->getParent()->getFunction().hasOptSize())
return true;
- // It's better to avoid folding and recomputing shifts when we don't have a
- // fastpath.
- if (!STI.hasAddrLSLFast())
- return false;
+ // FIXME: Consider checking HasAddrLSLSlow14 and HasALULSLFast as
+ // appropriate.
// We have a fastpath, so folding a shift in and potentially computing it
// many times may be beneficial. Check if this is only used in memory ops.
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir b/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
index 499c08f..7921de6 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
@@ -15,7 +15,7 @@
define void @mul_wrong_pow_2(ptr %addr) { ret void }
define void @more_than_one_use_shl_1(ptr %addr) { ret void }
define void @more_than_one_use_shl_2(ptr %addr) { ret void }
- define void @more_than_one_use_shl_lsl_fast(ptr %addr) #1 { ret void }
+ define void @more_than_one_use_shl_lsl_fast(ptr %addr) { ret void }
define void @more_than_one_use_shl_lsl_slow(ptr %addr) { ret void }
define void @more_than_one_use_shl_minsize(ptr %addr) #0 { ret void }
define void @ldrwrox(ptr %addr) { ret void }
@@ -24,7 +24,6 @@
define void @ldbbrox(ptr %addr) { ret void }
define void @ldrqrox(ptr %addr) { ret void }
attributes #0 = { optsize }
- attributes #1 = { "target-features"="+addr-lsl-fast" }
...
---
@@ -478,11 +477,10 @@ body: |
; CHECK: liveins: $x0, $x1, $x2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK-NEXT: [[ADDXrs:%[0-9]+]]:gpr64common = ADDXrs [[COPY1]], [[COPY]], 3
- ; CHECK-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
- ; CHECK-NEXT: [[LDRXui1:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
- ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[LDRXui1]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[LDRXroX1]]
; CHECK-NEXT: $x2 = COPY [[ADDXrr]]
; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
diff --git a/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll b/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
index 59cd87f..022aaea 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s --check-prefixes=CHECK,CHECK0
-; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+addr-lsl-fast | FileCheck %s --check-prefixes=CHECK,CHECK3
+; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+addr-lsl-slow-14 | FileCheck %s --check-prefixes=CHECK,CHECK0
+; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s --check-prefixes=CHECK,CHECK3
%struct.a = type [256 x i16]
%struct.b = type [256 x i32]
@@ -49,36 +49,20 @@ define i16 @halfword(ptr %ctx, i32 %xor72) nounwind {
}
define i32 @word(ptr %ctx, i32 %xor72) nounwind {
-; CHECK0-LABEL: word:
-; CHECK0: // %bb.0:
-; CHECK0-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
-; CHECK0-NEXT: // kill: def $w1 killed $w1 def $x1
-; CHECK0-NEXT: ubfx x8, x1, #9, #8
-; CHECK0-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
-; CHECK0-NEXT: mov x19, x0
-; CHECK0-NEXT: lsl x21, x8, #2
-; CHECK0-NEXT: ldr w20, [x0, x21]
-; CHECK0-NEXT: bl foo
-; CHECK0-NEXT: mov w0, w20
-; CHECK0-NEXT: str w20, [x19, x21]
-; CHECK0-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
-; CHECK0-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
-; CHECK0-NEXT: ret
-;
-; CHECK3-LABEL: word:
-; CHECK3: // %bb.0:
-; CHECK3-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
-; CHECK3-NEXT: // kill: def $w1 killed $w1 def $x1
-; CHECK3-NEXT: ubfx x21, x1, #9, #8
-; CHECK3-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
-; CHECK3-NEXT: mov x19, x0
-; CHECK3-NEXT: ldr w20, [x0, x21, lsl #2]
-; CHECK3-NEXT: bl foo
-; CHECK3-NEXT: mov w0, w20
-; CHECK3-NEXT: str w20, [x19, x21, lsl #2]
-; CHECK3-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
-; CHECK3-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
-; CHECK3-NEXT: ret
+; CHECK-LABEL: word:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT: ubfx x21, x1, #9, #8
+; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: mov x19, x0
+; CHECK-NEXT: ldr w20, [x0, x21, lsl #2]
+; CHECK-NEXT: bl foo
+; CHECK-NEXT: mov w0, w20
+; CHECK-NEXT: str w20, [x19, x21, lsl #2]
+; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
+; CHECK-NEXT: ret
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
@@ -90,36 +74,20 @@ define i32 @word(ptr %ctx, i32 %xor72) nounwind {
}
define i64 @doubleword(ptr %ctx, i32 %xor72) nounwind {
-; CHECK0-LABEL: doubleword:
-; CHECK0: // %bb.0:
-; CHECK0-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
-; CHECK0-NEXT: // kill: def $w1 killed $w1 def $x1
-; CHECK0-NEXT: ubfx x8, x1, #9, #8
-; CHECK0-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
-; CHECK0-NEXT: mov x19, x0
-; CHECK0-NEXT: lsl x21, x8, #3
-; CHECK0-NEXT: ldr x20, [x0, x21]
-; CHECK0-NEXT: bl foo
-; CHECK0-NEXT: mov x0, x20
-; CHECK0-NEXT: str x20, [x19, x21]
-; CHECK0-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
-; CHECK0-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
-; CHECK0-NEXT: ret
-;
-; CHECK3-LABEL: doubleword:
-; CHECK3: // %bb.0:
-; CHECK3-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
-; CHECK3-NEXT: // kill: def $w1 killed $w1 def $x1
-; CHECK3-NEXT: ubfx x21, x1, #9, #8
-; CHECK3-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
-; CHECK3-NEXT: mov x19, x0
-; CHECK3-NEXT: ldr x20, [x0, x21, lsl #3]
-; CHECK3-NEXT: bl foo
-; CHECK3-NEXT: mov x0, x20
-; CHECK3-NEXT: str x20, [x19, x21, lsl #3]
-; CHECK3-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
-; CHECK3-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
-; CHECK3-NEXT: ret
+; CHECK-LABEL: doubleword:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT: ubfx x21, x1, #9, #8
+; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: mov x19, x0
+; CHECK-NEXT: ldr x20, [x0, x21, lsl #3]
+; CHECK-NEXT: bl foo
+; CHECK-NEXT: mov x0, x20
+; CHECK-NEXT: str x20, [x19, x21, lsl #3]
+; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
+; CHECK-NEXT: ret
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
@@ -163,20 +131,12 @@ endbb:
}
define i64 @gep3(ptr %p, i64 %b) {
-; CHECK0-LABEL: gep3:
-; CHECK0: // %bb.0:
-; CHECK0-NEXT: lsl x9, x1, #3
-; CHECK0-NEXT: mov x8, x0
-; CHECK0-NEXT: ldr x0, [x0, x9]
-; CHECK0-NEXT: str x1, [x8, x9]
-; CHECK0-NEXT: ret
-;
-; CHECK3-LABEL: gep3:
-; CHECK3: // %bb.0:
-; CHECK3-NEXT: mov x8, x0
-; CHECK3-NEXT: ldr x0, [x0, x1, lsl #3]
-; CHECK3-NEXT: str x1, [x8, x1, lsl #3]
-; CHECK3-NEXT: ret
+; CHECK-LABEL: gep3:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x8, x0
+; CHECK-NEXT: ldr x0, [x0, x1, lsl #3]
+; CHECK-NEXT: str x1, [x8, x1, lsl #3]
+; CHECK-NEXT: ret
%g = getelementptr inbounds i64, ptr %p, i64 %b
%l = load i64, ptr %g
store i64 %b, ptr %g
diff --git a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll b/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
index 573f921..e31c9a0 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
@@ -134,9 +134,8 @@ define void @test8(i64 %a, ptr noalias %src, ptr noalias %dst, i64 %n) {
; CHECK-NEXT: b.hs .LBB7_1
; CHECK-NEXT: // %bb.3: // %if.then
; CHECK-NEXT: // in Loop: Header=BB7_2 Depth=1
-; CHECK-NEXT: lsl x10, x8, #3
-; CHECK-NEXT: ldr x11, [x1, x10]
-; CHECK-NEXT: str x11, [x2, x10]
+; CHECK-NEXT: ldr x10, [x1, x8, lsl #3]
+; CHECK-NEXT: str x10, [x2, x8, lsl #3]
; CHECK-NEXT: b .LBB7_1
; CHECK-NEXT: .LBB7_4: // %exit
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
index d593272..6bcd2f0 100644
--- a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
@@ -125,7 +125,7 @@ return: ; preds = %if.end23, %if.then3
}
; CHECK: @test
-; CHECK-NOT: , uxtw #2]
+; CHECK: , uxtw #2]
define i32 @test(ptr %array, i8 zeroext %c, i32 %arg) {
entry:
%conv = zext i8 %c to i32
diff --git a/llvm/test/CodeGen/AArch64/arm64-vector-ldst.ll b/llvm/test/CodeGen/AArch64/arm64-vector-ldst.ll
index 3542b26..5b055a4 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vector-ldst.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vector-ldst.ll
@@ -201,11 +201,10 @@ define void @fct1_64x1(ptr nocapture %array, i64 %offset) nounwind ssp {
; CHECK-LABEL: fct1_64x1:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: adrp x8, :got:globalArray64x1
-; CHECK-NEXT: lsl x9, x1, #3
; CHECK-NEXT: ldr x8, [x8, :got_lo12:globalArray64x1]
-; CHECK-NEXT: ldr d0, [x0, x9]
+; CHECK-NEXT: ldr d0, [x0, x1, lsl #3]
; CHECK-NEXT: ldr x8, [x8]
-; CHECK-NEXT: str d0, [x8, x9]
+; CHECK-NEXT: str d0, [x8, x1, lsl #3]
; CHECK-NEXT: ret
entry:
%arrayidx = getelementptr inbounds <1 x i64>, ptr %array, i64 %offset
@@ -238,11 +237,10 @@ define void @fct1_32x2(ptr nocapture %array, i64 %offset) nounwind ssp {
; CHECK-LABEL: fct1_32x2:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: adrp x8, :got:globalArray32x2
-; CHECK-NEXT: lsl x9, x1, #3
; CHECK-NEXT: ldr x8, [x8, :got_lo12:globalArray32x2]
-; CHECK-NEXT: ldr d0, [x0, x9]
+; CHECK-NEXT: ldr d0, [x0, x1, lsl #3]
; CHECK-NEXT: ldr x8, [x8]
-; CHECK-NEXT: str d0, [x8, x9]
+; CHECK-NEXT: str d0, [x8, x1, lsl #3]
; CHECK-NEXT: ret
entry:
%arrayidx = getelementptr inbounds <2 x i32>, ptr %array, i64 %offset
@@ -275,11 +273,10 @@ define void @fct1_16x4(ptr nocapture %array, i64 %offset) nounwind ssp {
; CHECK-LABEL: fct1_16x4:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: adrp x8, :got:globalArray16x4
-; CHECK-NEXT: lsl x9, x1, #3
; CHECK-NEXT: ldr x8, [x8, :got_lo12:globalArray16x4]
-; CHECK-NEXT: ldr d0, [x0, x9]
+; CHECK-NEXT: ldr d0, [x0, x1, lsl #3]
; CHECK-NEXT: ldr x8, [x8]
-; CHECK-NEXT: str d0, [x8, x9]
+; CHECK-NEXT: str d0, [x8, x1, lsl #3]
; CHECK-NEXT: ret
entry:
%arrayidx = getelementptr inbounds <4 x i16>, ptr %array, i64 %offset
@@ -312,11 +309,10 @@ define void @fct1_8x8(ptr nocapture %array, i64 %offset) nounwind ssp {
; CHECK-LABEL: fct1_8x8:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: adrp x8, :got:globalArray8x8
-; CHECK-NEXT: lsl x9, x1, #3
; CHECK-NEXT: ldr x8, [x8, :got_lo12:globalArray8x8]
-; CHECK-NEXT: ldr d0, [x0, x9]
+; CHECK-NEXT: ldr d0, [x0, x1, lsl #3]
; CHECK-NEXT: ldr x8, [x8]
-; CHECK-NEXT: str d0, [x8, x9]
+; CHECK-NEXT: str d0, [x8, x1, lsl #3]
; CHECK-NEXT: ret
entry:
%arrayidx = getelementptr inbounds <8 x i8>, ptr %array, i64 %offset
diff --git a/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll b/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll
index 8f19553..634d1b9 100644
--- a/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll
+++ b/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll
@@ -82,13 +82,12 @@ define void @avoid_promotion_2_and(ptr nocapture noundef %arg) {
; CHECK-NEXT: eor w10, w10, w11
; CHECK-NEXT: ldur w11, [x8, #-24]
; CHECK-NEXT: and w10, w10, w14
-; CHECK-NEXT: ldp x15, x14, [x8, #-16]
-; CHECK-NEXT: ubfiz x13, x10, #1, #32
+; CHECK-NEXT: ldp x14, x13, [x8, #-16]
; CHECK-NEXT: str w10, [x8]
-; CHECK-NEXT: and w10, w11, w12
-; CHECK-NEXT: ldrh w11, [x14, x13]
-; CHECK-NEXT: strh w11, [x15, w10, uxtw #1]
-; CHECK-NEXT: strh w12, [x14, x13]
+; CHECK-NEXT: and w11, w11, w12
+; CHECK-NEXT: ldrh w15, [x13, w10, uxtw #1]
+; CHECK-NEXT: strh w15, [x14, w11, uxtw #1]
+; CHECK-NEXT: strh w12, [x13, w10, uxtw #1]
; CHECK-NEXT: b LBB1_1
; CHECK-NEXT: LBB1_4: ; %exit
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/cheap-as-a-move.ll b/llvm/test/CodeGen/AArch64/cheap-as-a-move.ll
index b5c2104..50c70c5 100644
--- a/llvm/test/CodeGen/AArch64/cheap-as-a-move.ll
+++ b/llvm/test/CodeGen/AArch64/cheap-as-a-move.ll
@@ -7,7 +7,7 @@ target triple = "aarch64-unknown-linux"
define void @f0(ptr %a, i64 %n) {
; CHECK-LABEL: f0:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: stp x30, x23, [sp, #-48]! // 16-byte Folded Spill
+; CHECK-NEXT: str x30, [sp, #-48]! // 8-byte Folded Spill
; CHECK-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill
; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 48
@@ -15,7 +15,6 @@ define void @f0(ptr %a, i64 %n) {
; CHECK-NEXT: .cfi_offset w20, -16
; CHECK-NEXT: .cfi_offset w21, -24
; CHECK-NEXT: .cfi_offset w22, -32
-; CHECK-NEXT: .cfi_offset w23, -40
; CHECK-NEXT: .cfi_offset w30, -48
; CHECK-NEXT: mov x21, #1 // =0x1
; CHECK-NEXT: mov x19, x1
@@ -27,18 +26,17 @@ define void @f0(ptr %a, i64 %n) {
; CHECK-NEXT: b.ge .LBB0_2
; CHECK-NEXT: .LBB0_1: // %loop.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: lsl x23, x22, #2
+; CHECK-NEXT: ldr w0, [x20, x22, lsl #2]
; CHECK-NEXT: mov x1, x21
-; CHECK-NEXT: ldr w0, [x20, x23]
; CHECK-NEXT: bl g
-; CHECK-NEXT: str w0, [x20, x23]
+; CHECK-NEXT: str w0, [x20, x22, lsl #2]
; CHECK-NEXT: add x22, x22, #1
; CHECK-NEXT: cmp x22, x19
; CHECK-NEXT: b.lt .LBB0_1
; CHECK-NEXT: .LBB0_2: // %exit
; CHECK-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
; CHECK-NEXT: ldp x22, x21, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: ldp x30, x23, [sp], #48 // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp], #48 // 8-byte Folded Reload
; CHECK-NEXT: ret
entry:
br label %loop
@@ -64,15 +62,13 @@ exit:
define void @f1(ptr %a, i64 %n) {
; CHECK-LABEL: f1:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: str x30, [sp, #-48]! // 8-byte Folded Spill
-; CHECK-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: .cfi_offset w19, -8
; CHECK-NEXT: .cfi_offset w20, -16
; CHECK-NEXT: .cfi_offset w21, -24
-; CHECK-NEXT: .cfi_offset w22, -32
-; CHECK-NEXT: .cfi_offset w30, -48
+; CHECK-NEXT: .cfi_offset w30, -32
; CHECK-NEXT: mov x19, x1
; CHECK-NEXT: mov x20, x0
; CHECK-NEXT: mov x21, xzr
@@ -80,19 +76,17 @@ define void @f1(ptr %a, i64 %n) {
; CHECK-NEXT: b.ge .LBB1_2
; CHECK-NEXT: .LBB1_1: // %loop.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: lsl x22, x21, #2
+; CHECK-NEXT: ldr w0, [x20, x21, lsl #2]
; CHECK-NEXT: mov x1, #1450704896 // =0x56780000
; CHECK-NEXT: movk x1, #4660, lsl #48
-; CHECK-NEXT: ldr w0, [x20, x22]
; CHECK-NEXT: bl g
-; CHECK-NEXT: str w0, [x20, x22]
+; CHECK-NEXT: str w0, [x20, x21, lsl #2]
; CHECK-NEXT: add x21, x21, #1
; CHECK-NEXT: cmp x21, x19
; CHECK-NEXT: b.lt .LBB1_1
; CHECK-NEXT: .LBB1_2: // %exit
-; CHECK-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT: ldp x22, x21, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: ldr x30, [sp], #48 // 8-byte Folded Reload
+; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
; CHECK-NEXT: ret
entry:
br label %loop
diff --git a/llvm/test/CodeGen/AArch64/extract-bits.ll b/llvm/test/CodeGen/AArch64/extract-bits.ll
index d4ea143..b87157a 100644
--- a/llvm/test/CodeGen/AArch64/extract-bits.ll
+++ b/llvm/test/CodeGen/AArch64/extract-bits.ll
@@ -972,10 +972,9 @@ define void @pr38938(ptr %a0, ptr %a1) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr x8, [x1]
; CHECK-NEXT: ubfx x8, x8, #21, #10
-; CHECK-NEXT: lsl x8, x8, #2
-; CHECK-NEXT: ldr w9, [x0, x8]
+; CHECK-NEXT: ldr w9, [x0, x8, lsl #2]
; CHECK-NEXT: add w9, w9, #1
-; CHECK-NEXT: str w9, [x0, x8]
+; CHECK-NEXT: str w9, [x0, x8, lsl #2]
; CHECK-NEXT: ret
%tmp = load i64, ptr %a1, align 8
%tmp1 = lshr i64 %tmp, 21
diff --git a/llvm/test/CodeGen/AArch64/machine-licm-hoist-load.ll b/llvm/test/CodeGen/AArch64/machine-licm-hoist-load.ll
index 30123a3..e8dafd5 100644
--- a/llvm/test/CodeGen/AArch64/machine-licm-hoist-load.ll
+++ b/llvm/test/CodeGen/AArch64/machine-licm-hoist-load.ll
@@ -223,10 +223,9 @@ define i64 @three_dimensional_middle(ptr %a, ptr %b, i64 %N, i64 %M, i64 %K) {
; CHECK-NEXT: // Parent Loop BB3_1 Depth=1
; CHECK-NEXT: // => This Loop Header: Depth=2
; CHECK-NEXT: // Child Loop BB3_3 Depth 3
-; CHECK-NEXT: lsl x12, x11, #3
+; CHECK-NEXT: ldr x13, [x1, x11, lsl #3]
+; CHECK-NEXT: ldr x12, [x10, x11, lsl #3]
; CHECK-NEXT: mov x14, x4
-; CHECK-NEXT: ldr x13, [x1, x12]
-; CHECK-NEXT: ldr x12, [x10, x12]
; CHECK-NEXT: ldr w13, [x13]
; CHECK-NEXT: .LBB3_3: // %for.body8
; CHECK-NEXT: // Parent Loop BB3_1 Depth=1
diff --git a/llvm/test/CodeGen/AArch64/sink-and-fold.ll b/llvm/test/CodeGen/AArch64/sink-and-fold.ll
index 5200722..f65a08a 100644
--- a/llvm/test/CodeGen/AArch64/sink-and-fold.ll
+++ b/llvm/test/CodeGen/AArch64/sink-and-fold.ll
@@ -100,7 +100,7 @@ exit:
}
; Address calculation cheap enough on some cores.
-define i32 @f3(i1 %c1, ptr %p, i64 %i) nounwind "target-features"="+alu-lsl-fast,+addr-lsl-fast" {
+define i32 @f3(i1 %c1, ptr %p, i64 %i) nounwind "target-features"="+alu-lsl-fast" {
; CHECK-LABEL: f3:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: tbz w0, #0, .LBB3_2
@@ -130,7 +130,7 @@ exit:
ret i32 %v
}
-define void @f4(ptr %a, i64 %n) nounwind "target-features"="+alu-lsl-fast,+addr-lsl-fast" {
+define void @f4(ptr %a, i64 %n) nounwind "target-features"="+alu-lsl-fast" {
; CHECK-LABEL: f4:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: cmp x1, #1