aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AArch64
diff options
context:
space:
mode:
authorEli Friedman <efriedma@quicinc.com>2024-04-04 11:25:44 -0700
committerGitHub <noreply@github.com>2024-04-04 11:25:44 -0700
commitc83f23d6abb6f8d693c643bc1b43f9b9e06bc537 (patch)
treeb7586fbbbf5e468c160261a670a7f385e6397316 /llvm/test/CodeGen/AArch64
parent53fe94a0ce262c6e38117429a30814f54ea55b0f (diff)
downloadllvm-c83f23d6abb6f8d693c643bc1b43f9b9e06bc537.zip
llvm-c83f23d6abb6f8d693c643bc1b43f9b9e06bc537.tar.gz
llvm-c83f23d6abb6f8d693c643bc1b43f9b9e06bc537.tar.bz2
[AArch64] Fix heuristics for folding "lsl" into load/store ops. (#86894)
The existing heuristics were assuming that every core behaves like an Apple A7, where any extend/shift costs an extra micro-op... but in reality, nothing else behaves like that. On some older Cortex designs, shifts by 1 or 4 cost extra, but all other shifts/extensions are free. On all other cores, as far as I can tell, all shifts/extensions for integer loads are free (i.e. the same cost as an unshifted load). To reflect this, this patch: - Enables aggressive folding of shifts into loads by default. - Removes the old AddrLSLFast feature, since it applies to everything except A7 (and even if you are explicitly targeting A7, we want to assume extensions are free because the code will almost always run on a newer core). - Adds a new feature AddrLSLSlow14 that applies specifically to the Cortex cores where shifts by 1 or 4 cost extra. I didn't add support for AddrLSLSlow14 on the GlobalISel side because it would require a bunch of refactoring to work correctly. Someone can pick this up as a followup.
Diffstat (limited to 'llvm/test/CodeGen/AArch64')
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir12
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll112
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-vector-ldst.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll11
-rw-r--r--llvm/test/CodeGen/AArch64/cheap-as-a-move.ll30
-rw-r--r--llvm/test/CodeGen/AArch64/extract-bits.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/machine-licm-hoist-load.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/sink-and-fold.ll4
10 files changed, 75 insertions, 131 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir b/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
index 499c08f..7921de6 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
@@ -15,7 +15,7 @@
define void @mul_wrong_pow_2(ptr %addr) { ret void }
define void @more_than_one_use_shl_1(ptr %addr) { ret void }
define void @more_than_one_use_shl_2(ptr %addr) { ret void }
- define void @more_than_one_use_shl_lsl_fast(ptr %addr) #1 { ret void }
+ define void @more_than_one_use_shl_lsl_fast(ptr %addr) { ret void }
define void @more_than_one_use_shl_lsl_slow(ptr %addr) { ret void }
define void @more_than_one_use_shl_minsize(ptr %addr) #0 { ret void }
define void @ldrwrox(ptr %addr) { ret void }
@@ -24,7 +24,6 @@
define void @ldbbrox(ptr %addr) { ret void }
define void @ldrqrox(ptr %addr) { ret void }
attributes #0 = { optsize }
- attributes #1 = { "target-features"="+addr-lsl-fast" }
...
---
@@ -478,11 +477,10 @@ body: |
; CHECK: liveins: $x0, $x1, $x2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK-NEXT: [[ADDXrs:%[0-9]+]]:gpr64common = ADDXrs [[COPY1]], [[COPY]], 3
- ; CHECK-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
- ; CHECK-NEXT: [[LDRXui1:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
- ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[LDRXui1]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[LDRXroX1]]
; CHECK-NEXT: $x2 = COPY [[ADDXrr]]
; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
diff --git a/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll b/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
index 59cd87f..022aaea 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s --check-prefixes=CHECK,CHECK0
-; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+addr-lsl-fast | FileCheck %s --check-prefixes=CHECK,CHECK3
+; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+addr-lsl-slow-14 | FileCheck %s --check-prefixes=CHECK,CHECK0
+; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s --check-prefixes=CHECK,CHECK3
%struct.a = type [256 x i16]
%struct.b = type [256 x i32]
@@ -49,36 +49,20 @@ define i16 @halfword(ptr %ctx, i32 %xor72) nounwind {
}
define i32 @word(ptr %ctx, i32 %xor72) nounwind {
-; CHECK0-LABEL: word:
-; CHECK0: // %bb.0:
-; CHECK0-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
-; CHECK0-NEXT: // kill: def $w1 killed $w1 def $x1
-; CHECK0-NEXT: ubfx x8, x1, #9, #8
-; CHECK0-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
-; CHECK0-NEXT: mov x19, x0
-; CHECK0-NEXT: lsl x21, x8, #2
-; CHECK0-NEXT: ldr w20, [x0, x21]
-; CHECK0-NEXT: bl foo
-; CHECK0-NEXT: mov w0, w20
-; CHECK0-NEXT: str w20, [x19, x21]
-; CHECK0-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
-; CHECK0-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
-; CHECK0-NEXT: ret
-;
-; CHECK3-LABEL: word:
-; CHECK3: // %bb.0:
-; CHECK3-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
-; CHECK3-NEXT: // kill: def $w1 killed $w1 def $x1
-; CHECK3-NEXT: ubfx x21, x1, #9, #8
-; CHECK3-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
-; CHECK3-NEXT: mov x19, x0
-; CHECK3-NEXT: ldr w20, [x0, x21, lsl #2]
-; CHECK3-NEXT: bl foo
-; CHECK3-NEXT: mov w0, w20
-; CHECK3-NEXT: str w20, [x19, x21, lsl #2]
-; CHECK3-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
-; CHECK3-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
-; CHECK3-NEXT: ret
+; CHECK-LABEL: word:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT: ubfx x21, x1, #9, #8
+; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: mov x19, x0
+; CHECK-NEXT: ldr w20, [x0, x21, lsl #2]
+; CHECK-NEXT: bl foo
+; CHECK-NEXT: mov w0, w20
+; CHECK-NEXT: str w20, [x19, x21, lsl #2]
+; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
+; CHECK-NEXT: ret
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
@@ -90,36 +74,20 @@ define i32 @word(ptr %ctx, i32 %xor72) nounwind {
}
define i64 @doubleword(ptr %ctx, i32 %xor72) nounwind {
-; CHECK0-LABEL: doubleword:
-; CHECK0: // %bb.0:
-; CHECK0-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
-; CHECK0-NEXT: // kill: def $w1 killed $w1 def $x1
-; CHECK0-NEXT: ubfx x8, x1, #9, #8
-; CHECK0-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
-; CHECK0-NEXT: mov x19, x0
-; CHECK0-NEXT: lsl x21, x8, #3
-; CHECK0-NEXT: ldr x20, [x0, x21]
-; CHECK0-NEXT: bl foo
-; CHECK0-NEXT: mov x0, x20
-; CHECK0-NEXT: str x20, [x19, x21]
-; CHECK0-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
-; CHECK0-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
-; CHECK0-NEXT: ret
-;
-; CHECK3-LABEL: doubleword:
-; CHECK3: // %bb.0:
-; CHECK3-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
-; CHECK3-NEXT: // kill: def $w1 killed $w1 def $x1
-; CHECK3-NEXT: ubfx x21, x1, #9, #8
-; CHECK3-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
-; CHECK3-NEXT: mov x19, x0
-; CHECK3-NEXT: ldr x20, [x0, x21, lsl #3]
-; CHECK3-NEXT: bl foo
-; CHECK3-NEXT: mov x0, x20
-; CHECK3-NEXT: str x20, [x19, x21, lsl #3]
-; CHECK3-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
-; CHECK3-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
-; CHECK3-NEXT: ret
+; CHECK-LABEL: doubleword:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT: ubfx x21, x1, #9, #8
+; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: mov x19, x0
+; CHECK-NEXT: ldr x20, [x0, x21, lsl #3]
+; CHECK-NEXT: bl foo
+; CHECK-NEXT: mov x0, x20
+; CHECK-NEXT: str x20, [x19, x21, lsl #3]
+; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
+; CHECK-NEXT: ret
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
@@ -163,20 +131,12 @@ endbb:
}
define i64 @gep3(ptr %p, i64 %b) {
-; CHECK0-LABEL: gep3:
-; CHECK0: // %bb.0:
-; CHECK0-NEXT: lsl x9, x1, #3
-; CHECK0-NEXT: mov x8, x0
-; CHECK0-NEXT: ldr x0, [x0, x9]
-; CHECK0-NEXT: str x1, [x8, x9]
-; CHECK0-NEXT: ret
-;
-; CHECK3-LABEL: gep3:
-; CHECK3: // %bb.0:
-; CHECK3-NEXT: mov x8, x0
-; CHECK3-NEXT: ldr x0, [x0, x1, lsl #3]
-; CHECK3-NEXT: str x1, [x8, x1, lsl #3]
-; CHECK3-NEXT: ret
+; CHECK-LABEL: gep3:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x8, x0
+; CHECK-NEXT: ldr x0, [x0, x1, lsl #3]
+; CHECK-NEXT: str x1, [x8, x1, lsl #3]
+; CHECK-NEXT: ret
%g = getelementptr inbounds i64, ptr %p, i64 %b
%l = load i64, ptr %g
store i64 %b, ptr %g
diff --git a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll b/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
index 573f921..e31c9a0 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
@@ -134,9 +134,8 @@ define void @test8(i64 %a, ptr noalias %src, ptr noalias %dst, i64 %n) {
; CHECK-NEXT: b.hs .LBB7_1
; CHECK-NEXT: // %bb.3: // %if.then
; CHECK-NEXT: // in Loop: Header=BB7_2 Depth=1
-; CHECK-NEXT: lsl x10, x8, #3
-; CHECK-NEXT: ldr x11, [x1, x10]
-; CHECK-NEXT: str x11, [x2, x10]
+; CHECK-NEXT: ldr x10, [x1, x8, lsl #3]
+; CHECK-NEXT: str x10, [x2, x8, lsl #3]
; CHECK-NEXT: b .LBB7_1
; CHECK-NEXT: .LBB7_4: // %exit
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
index d593272..6bcd2f0 100644
--- a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
@@ -125,7 +125,7 @@ return: ; preds = %if.end23, %if.then3
}
; CHECK: @test
-; CHECK-NOT: , uxtw #2]
+; CHECK: , uxtw #2]
define i32 @test(ptr %array, i8 zeroext %c, i32 %arg) {
entry:
%conv = zext i8 %c to i32
diff --git a/llvm/test/CodeGen/AArch64/arm64-vector-ldst.ll b/llvm/test/CodeGen/AArch64/arm64-vector-ldst.ll
index 3542b26..5b055a4 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vector-ldst.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vector-ldst.ll
@@ -201,11 +201,10 @@ define void @fct1_64x1(ptr nocapture %array, i64 %offset) nounwind ssp {
; CHECK-LABEL: fct1_64x1:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: adrp x8, :got:globalArray64x1
-; CHECK-NEXT: lsl x9, x1, #3
; CHECK-NEXT: ldr x8, [x8, :got_lo12:globalArray64x1]
-; CHECK-NEXT: ldr d0, [x0, x9]
+; CHECK-NEXT: ldr d0, [x0, x1, lsl #3]
; CHECK-NEXT: ldr x8, [x8]
-; CHECK-NEXT: str d0, [x8, x9]
+; CHECK-NEXT: str d0, [x8, x1, lsl #3]
; CHECK-NEXT: ret
entry:
%arrayidx = getelementptr inbounds <1 x i64>, ptr %array, i64 %offset
@@ -238,11 +237,10 @@ define void @fct1_32x2(ptr nocapture %array, i64 %offset) nounwind ssp {
; CHECK-LABEL: fct1_32x2:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: adrp x8, :got:globalArray32x2
-; CHECK-NEXT: lsl x9, x1, #3
; CHECK-NEXT: ldr x8, [x8, :got_lo12:globalArray32x2]
-; CHECK-NEXT: ldr d0, [x0, x9]
+; CHECK-NEXT: ldr d0, [x0, x1, lsl #3]
; CHECK-NEXT: ldr x8, [x8]
-; CHECK-NEXT: str d0, [x8, x9]
+; CHECK-NEXT: str d0, [x8, x1, lsl #3]
; CHECK-NEXT: ret
entry:
%arrayidx = getelementptr inbounds <2 x i32>, ptr %array, i64 %offset
@@ -275,11 +273,10 @@ define void @fct1_16x4(ptr nocapture %array, i64 %offset) nounwind ssp {
; CHECK-LABEL: fct1_16x4:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: adrp x8, :got:globalArray16x4
-; CHECK-NEXT: lsl x9, x1, #3
; CHECK-NEXT: ldr x8, [x8, :got_lo12:globalArray16x4]
-; CHECK-NEXT: ldr d0, [x0, x9]
+; CHECK-NEXT: ldr d0, [x0, x1, lsl #3]
; CHECK-NEXT: ldr x8, [x8]
-; CHECK-NEXT: str d0, [x8, x9]
+; CHECK-NEXT: str d0, [x8, x1, lsl #3]
; CHECK-NEXT: ret
entry:
%arrayidx = getelementptr inbounds <4 x i16>, ptr %array, i64 %offset
@@ -312,11 +309,10 @@ define void @fct1_8x8(ptr nocapture %array, i64 %offset) nounwind ssp {
; CHECK-LABEL: fct1_8x8:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: adrp x8, :got:globalArray8x8
-; CHECK-NEXT: lsl x9, x1, #3
; CHECK-NEXT: ldr x8, [x8, :got_lo12:globalArray8x8]
-; CHECK-NEXT: ldr d0, [x0, x9]
+; CHECK-NEXT: ldr d0, [x0, x1, lsl #3]
; CHECK-NEXT: ldr x8, [x8]
-; CHECK-NEXT: str d0, [x8, x9]
+; CHECK-NEXT: str d0, [x8, x1, lsl #3]
; CHECK-NEXT: ret
entry:
%arrayidx = getelementptr inbounds <8 x i8>, ptr %array, i64 %offset
diff --git a/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll b/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll
index 8f19553..634d1b9 100644
--- a/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll
+++ b/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll
@@ -82,13 +82,12 @@ define void @avoid_promotion_2_and(ptr nocapture noundef %arg) {
; CHECK-NEXT: eor w10, w10, w11
; CHECK-NEXT: ldur w11, [x8, #-24]
; CHECK-NEXT: and w10, w10, w14
-; CHECK-NEXT: ldp x15, x14, [x8, #-16]
-; CHECK-NEXT: ubfiz x13, x10, #1, #32
+; CHECK-NEXT: ldp x14, x13, [x8, #-16]
; CHECK-NEXT: str w10, [x8]
-; CHECK-NEXT: and w10, w11, w12
-; CHECK-NEXT: ldrh w11, [x14, x13]
-; CHECK-NEXT: strh w11, [x15, w10, uxtw #1]
-; CHECK-NEXT: strh w12, [x14, x13]
+; CHECK-NEXT: and w11, w11, w12
+; CHECK-NEXT: ldrh w15, [x13, w10, uxtw #1]
+; CHECK-NEXT: strh w15, [x14, w11, uxtw #1]
+; CHECK-NEXT: strh w12, [x13, w10, uxtw #1]
; CHECK-NEXT: b LBB1_1
; CHECK-NEXT: LBB1_4: ; %exit
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/cheap-as-a-move.ll b/llvm/test/CodeGen/AArch64/cheap-as-a-move.ll
index b5c2104..50c70c5 100644
--- a/llvm/test/CodeGen/AArch64/cheap-as-a-move.ll
+++ b/llvm/test/CodeGen/AArch64/cheap-as-a-move.ll
@@ -7,7 +7,7 @@ target triple = "aarch64-unknown-linux"
define void @f0(ptr %a, i64 %n) {
; CHECK-LABEL: f0:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: stp x30, x23, [sp, #-48]! // 16-byte Folded Spill
+; CHECK-NEXT: str x30, [sp, #-48]! // 8-byte Folded Spill
; CHECK-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill
; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 48
@@ -15,7 +15,6 @@ define void @f0(ptr %a, i64 %n) {
; CHECK-NEXT: .cfi_offset w20, -16
; CHECK-NEXT: .cfi_offset w21, -24
; CHECK-NEXT: .cfi_offset w22, -32
-; CHECK-NEXT: .cfi_offset w23, -40
; CHECK-NEXT: .cfi_offset w30, -48
; CHECK-NEXT: mov x21, #1 // =0x1
; CHECK-NEXT: mov x19, x1
@@ -27,18 +26,17 @@ define void @f0(ptr %a, i64 %n) {
; CHECK-NEXT: b.ge .LBB0_2
; CHECK-NEXT: .LBB0_1: // %loop.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: lsl x23, x22, #2
+; CHECK-NEXT: ldr w0, [x20, x22, lsl #2]
; CHECK-NEXT: mov x1, x21
-; CHECK-NEXT: ldr w0, [x20, x23]
; CHECK-NEXT: bl g
-; CHECK-NEXT: str w0, [x20, x23]
+; CHECK-NEXT: str w0, [x20, x22, lsl #2]
; CHECK-NEXT: add x22, x22, #1
; CHECK-NEXT: cmp x22, x19
; CHECK-NEXT: b.lt .LBB0_1
; CHECK-NEXT: .LBB0_2: // %exit
; CHECK-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
; CHECK-NEXT: ldp x22, x21, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: ldp x30, x23, [sp], #48 // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp], #48 // 8-byte Folded Reload
; CHECK-NEXT: ret
entry:
br label %loop
@@ -64,15 +62,13 @@ exit:
define void @f1(ptr %a, i64 %n) {
; CHECK-LABEL: f1:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: str x30, [sp, #-48]! // 8-byte Folded Spill
-; CHECK-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: .cfi_offset w19, -8
; CHECK-NEXT: .cfi_offset w20, -16
; CHECK-NEXT: .cfi_offset w21, -24
-; CHECK-NEXT: .cfi_offset w22, -32
-; CHECK-NEXT: .cfi_offset w30, -48
+; CHECK-NEXT: .cfi_offset w30, -32
; CHECK-NEXT: mov x19, x1
; CHECK-NEXT: mov x20, x0
; CHECK-NEXT: mov x21, xzr
@@ -80,19 +76,17 @@ define void @f1(ptr %a, i64 %n) {
; CHECK-NEXT: b.ge .LBB1_2
; CHECK-NEXT: .LBB1_1: // %loop.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: lsl x22, x21, #2
+; CHECK-NEXT: ldr w0, [x20, x21, lsl #2]
; CHECK-NEXT: mov x1, #1450704896 // =0x56780000
; CHECK-NEXT: movk x1, #4660, lsl #48
-; CHECK-NEXT: ldr w0, [x20, x22]
; CHECK-NEXT: bl g
-; CHECK-NEXT: str w0, [x20, x22]
+; CHECK-NEXT: str w0, [x20, x21, lsl #2]
; CHECK-NEXT: add x21, x21, #1
; CHECK-NEXT: cmp x21, x19
; CHECK-NEXT: b.lt .LBB1_1
; CHECK-NEXT: .LBB1_2: // %exit
-; CHECK-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT: ldp x22, x21, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: ldr x30, [sp], #48 // 8-byte Folded Reload
+; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
; CHECK-NEXT: ret
entry:
br label %loop
diff --git a/llvm/test/CodeGen/AArch64/extract-bits.ll b/llvm/test/CodeGen/AArch64/extract-bits.ll
index d4ea143..b87157a 100644
--- a/llvm/test/CodeGen/AArch64/extract-bits.ll
+++ b/llvm/test/CodeGen/AArch64/extract-bits.ll
@@ -972,10 +972,9 @@ define void @pr38938(ptr %a0, ptr %a1) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr x8, [x1]
; CHECK-NEXT: ubfx x8, x8, #21, #10
-; CHECK-NEXT: lsl x8, x8, #2
-; CHECK-NEXT: ldr w9, [x0, x8]
+; CHECK-NEXT: ldr w9, [x0, x8, lsl #2]
; CHECK-NEXT: add w9, w9, #1
-; CHECK-NEXT: str w9, [x0, x8]
+; CHECK-NEXT: str w9, [x0, x8, lsl #2]
; CHECK-NEXT: ret
%tmp = load i64, ptr %a1, align 8
%tmp1 = lshr i64 %tmp, 21
diff --git a/llvm/test/CodeGen/AArch64/machine-licm-hoist-load.ll b/llvm/test/CodeGen/AArch64/machine-licm-hoist-load.ll
index 30123a3..e8dafd5 100644
--- a/llvm/test/CodeGen/AArch64/machine-licm-hoist-load.ll
+++ b/llvm/test/CodeGen/AArch64/machine-licm-hoist-load.ll
@@ -223,10 +223,9 @@ define i64 @three_dimensional_middle(ptr %a, ptr %b, i64 %N, i64 %M, i64 %K) {
; CHECK-NEXT: // Parent Loop BB3_1 Depth=1
; CHECK-NEXT: // => This Loop Header: Depth=2
; CHECK-NEXT: // Child Loop BB3_3 Depth 3
-; CHECK-NEXT: lsl x12, x11, #3
+; CHECK-NEXT: ldr x13, [x1, x11, lsl #3]
+; CHECK-NEXT: ldr x12, [x10, x11, lsl #3]
; CHECK-NEXT: mov x14, x4
-; CHECK-NEXT: ldr x13, [x1, x12]
-; CHECK-NEXT: ldr x12, [x10, x12]
; CHECK-NEXT: ldr w13, [x13]
; CHECK-NEXT: .LBB3_3: // %for.body8
; CHECK-NEXT: // Parent Loop BB3_1 Depth=1
diff --git a/llvm/test/CodeGen/AArch64/sink-and-fold.ll b/llvm/test/CodeGen/AArch64/sink-and-fold.ll
index 5200722..f65a08a 100644
--- a/llvm/test/CodeGen/AArch64/sink-and-fold.ll
+++ b/llvm/test/CodeGen/AArch64/sink-and-fold.ll
@@ -100,7 +100,7 @@ exit:
}
; Address calculation cheap enough on some cores.
-define i32 @f3(i1 %c1, ptr %p, i64 %i) nounwind "target-features"="+alu-lsl-fast,+addr-lsl-fast" {
+define i32 @f3(i1 %c1, ptr %p, i64 %i) nounwind "target-features"="+alu-lsl-fast" {
; CHECK-LABEL: f3:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: tbz w0, #0, .LBB3_2
@@ -130,7 +130,7 @@ exit:
ret i32 %v
}
-define void @f4(ptr %a, i64 %n) nounwind "target-features"="+alu-lsl-fast,+addr-lsl-fast" {
+define void @f4(ptr %a, i64 %n) nounwind "target-features"="+alu-lsl-fast" {
; CHECK-LABEL: f4:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: cmp x1, #1