aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AArch64
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/AArch64')
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/knownbits-add.mir278
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-vector-compress.mir6
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-matmul.ll87
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-post-coalescer.mir16
-rw-r--r--llvm/test/CodeGen/AArch64/combine-sdiv.ll85
-rw-r--r--llvm/test/CodeGen/AArch64/machine-sme-abi-find-insert-pt.mir227
-rw-r--r--llvm/test/CodeGen/AArch64/mir-yaml-has-streaming-mode-changes.ll13
-rw-r--r--llvm/test/CodeGen/AArch64/sme-agnostic-za.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sme-lazy-sve-nzcv-live.mir12
-rw-r--r--llvm/test/CodeGen/AArch64/sme-za-exceptions.ll242
10 files changed, 929 insertions, 41 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-add.mir b/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-add.mir
new file mode 100644
index 0000000..824ada1
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-add.mir
@@ -0,0 +1,278 @@
+# NOTE: Assertions have been autogenerated by utils/update_givaluetracking_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=aarch64 -passes="print<gisel-value-tracking>" -filetype=null %s 2>&1 | FileCheck %s
+
+---
+name: Cst
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @Cst
+ ; CHECK-NEXT: %0:_ KnownBits:00000010 SignBits:6
+ ; CHECK-NEXT: %1:_ KnownBits:00011000 SignBits:3
+ ; CHECK-NEXT: %2:_ KnownBits:00011010 SignBits:3
+ %0:_(s8) = G_CONSTANT i8 2
+ %1:_(s8) = G_CONSTANT i8 24
+ %2:_(s8) = G_ADD %0, %1
+...
+---
+name: CstZero
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @CstZero
+ ; CHECK-NEXT: %0:_ KnownBits:00000001 SignBits:7
+ ; CHECK-NEXT: %1:_ KnownBits:11111111 SignBits:8
+ ; CHECK-NEXT: %2:_ KnownBits:00000000 SignBits:8
+ %0:_(s8) = G_CONSTANT i8 1
+ %1:_(s8) = G_CONSTANT i8 255
+ %2:_(s8) = G_ADD %0, %1
+...
+---
+name: CstNegOne
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @CstNegOne
+ ; CHECK-NEXT: %0:_ KnownBits:00000000 SignBits:8
+ ; CHECK-NEXT: %1:_ KnownBits:11111111 SignBits:8
+ ; CHECK-NEXT: %2:_ KnownBits:11111111 SignBits:8
+ %0:_(s8) = G_CONSTANT i8 0
+ %1:_(s8) = G_CONSTANT i8 255
+ %2:_(s8) = G_ADD %0, %1
+...
+---
+name: CstSeven
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @CstSeven
+ ; CHECK-NEXT: %0:_ KnownBits:00001000 SignBits:4
+ ; CHECK-NEXT: %1:_ KnownBits:11111111 SignBits:8
+ ; CHECK-NEXT: %2:_ KnownBits:00000111 SignBits:5
+ %0:_(s8) = G_CONSTANT i8 8
+ %1:_(s8) = G_CONSTANT i8 255
+ %2:_(s8) = G_ADD %0, %1
+...
+---
+name: CstNeg
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @CstNeg
+ ; CHECK-NEXT: %0:_ KnownBits:11100000 SignBits:3
+ ; CHECK-NEXT: %1:_ KnownBits:00000010 SignBits:6
+ ; CHECK-NEXT: %2:_ KnownBits:11100010 SignBits:3
+ %0:_(s8) = G_CONSTANT i8 224
+ %1:_(s8) = G_CONSTANT i8 2
+ %2:_(s8) = G_ADD %0, %1
+...
+---
+name: ScalarVar
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @ScalarVar
+ ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:???????? SignBits:1
+ ; CHECK-NEXT: %2:_ KnownBits:???????? SignBits:1
+ %0:_(s8) = COPY $b0
+ %1:_(s8) = COPY $b1
+ %2:_(s8) = G_ADD %0, %1
+...
+---
+name: ScalarRhsEarlyOut
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @ScalarRhsEarlyOut
+ ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:00000011 SignBits:6
+ ; CHECK-NEXT: %2:_ KnownBits:???????? SignBits:1
+ %0:_(s8) = COPY $b0
+ %1:_(s8) = G_CONSTANT i8 3
+ %2:_(s8) = G_ADD %0, %1
+...
+---
+name: ScalarNonNegative
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @ScalarNonNegative
+ ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:00001111 SignBits:4
+ ; CHECK-NEXT: %2:_ KnownBits:0000???? SignBits:4
+ ; CHECK-NEXT: %3:_ KnownBits:11111111 SignBits:8
+ ; CHECK-NEXT: %4:_ KnownBits:???????? SignBits:4
+ %0:_(s8) = COPY $b0
+ %1:_(s8) = G_CONSTANT i8 15
+ %2:_(s8) = G_AND %0, %1
+ %3:_(s8) = G_CONSTANT i8 255
+ %4:_(s8) = G_ADD %2, %3
+...
+---
+name: ScalarLhsEarlyOut
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @ScalarLhsEarlyOut
+ ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:00000011 SignBits:6
+ ; CHECK-NEXT: %2:_ KnownBits:???????? SignBits:1
+ %0:_(s8) = COPY $b0
+ %1:_(s8) = G_CONSTANT i8 3
+ %2:_(s8) = G_ADD %1, %0
+...
+---
+name: ScalarPartKnown
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @ScalarPartKnown
+ ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:00001111 SignBits:4
+ ; CHECK-NEXT: %2:_ KnownBits:0000???? SignBits:4
+ ; CHECK-NEXT: %3:_ KnownBits:00000101 SignBits:5
+ ; CHECK-NEXT: %4:_ KnownBits:000????? SignBits:3
+ %0:_(s8) = COPY $b0
+ %1:_(s8) = G_CONSTANT i8 15
+ %2:_(s8) = G_AND %0, %1
+ %3:_(s8) = G_CONSTANT i8 5
+ %4:_(s8) = G_ADD %2, %3
+...
+---
+name: VectorCstZero
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @VectorCstZero
+ ; CHECK-NEXT: %0:_ KnownBits:0000000000000001 SignBits:15
+ ; CHECK-NEXT: %1:_ KnownBits:1111111111111111 SignBits:16
+ ; CHECK-NEXT: %2:_ KnownBits:0000000000000001 SignBits:15
+ ; CHECK-NEXT: %3:_ KnownBits:1111111111111111 SignBits:16
+ ; CHECK-NEXT: %4:_ KnownBits:0000000000000000 SignBits:16
+ %0:_(s16) = G_CONSTANT i16 1
+ %1:_(s16) = G_CONSTANT i16 65535
+ %2:_(<4 x s16>) = G_BUILD_VECTOR %0, %0, %0, %0
+ %3:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1
+ %4:_(<4 x s16>) = G_ADD %2, %3
+...
+---
+name: VectorCstNegOne
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @VectorCstNegOne
+ ; CHECK-NEXT: %0:_ KnownBits:0000000000000000 SignBits:16
+ ; CHECK-NEXT: %1:_ KnownBits:1111111111111111 SignBits:16
+ ; CHECK-NEXT: %2:_ KnownBits:0000000000000000 SignBits:16
+ ; CHECK-NEXT: %3:_ KnownBits:1111111111111111 SignBits:16
+ ; CHECK-NEXT: %4:_ KnownBits:1111111111111111 SignBits:16
+ %0:_(s16) = G_CONSTANT i16 0
+ %1:_(s16) = G_CONSTANT i16 65535
+ %2:_(<4 x s16>) = G_BUILD_VECTOR %0, %0, %0, %0
+ %3:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1
+ %4:_(<4 x s16>) = G_ADD %2, %3
+...
+---
+name: VectorVar
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @VectorVar
+ ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:???????????????? SignBits:1
+ ; CHECK-NEXT: %2:_ KnownBits:???????????????? SignBits:1
+ %0:_(<4 x s16>) = COPY $d0
+ %1:_(<4 x s16>) = COPY $d1
+ %2:_(<4 x s16>) = G_ADD %0, %1
+...
+---
+name: VectorRhsEarlyOut
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @VectorRhsEarlyOut
+ ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:0000000000000011 SignBits:14
+ ; CHECK-NEXT: %2:_ KnownBits:0000000000000011 SignBits:14
+ ; CHECK-NEXT: %3:_ KnownBits:???????????????? SignBits:1
+ %0:_(<4 x s16>) = COPY $d0
+ %1:_(s16) = G_CONSTANT i16 3
+ %2:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1
+ %3:_(<4 x s16>) = G_ADD %2, %0
+...
+---
+name: VectorNonNegative
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @VectorNonNegative
+ ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:0000000011111111 SignBits:8
+ ; CHECK-NEXT: %2:_ KnownBits:0000000011111111 SignBits:8
+ ; CHECK-NEXT: %3:_ KnownBits:00000000???????? SignBits:8
+ ; CHECK-NEXT: %4:_ KnownBits:1111111111111111 SignBits:16
+ ; CHECK-NEXT: %5:_ KnownBits:1111111111111111 SignBits:16
+ ; CHECK-NEXT: %6:_ KnownBits:???????????????? SignBits:8
+ %0:_(<4 x s16>) = COPY $d0
+ %1:_(s16) = G_CONSTANT i16 255
+ %2:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1
+ %3:_(<4 x s16>) = G_AND %0, %2
+ %4:_(s16) = G_CONSTANT i16 65535
+ %5:_(<4 x s16>) = G_BUILD_VECTOR %4, %4, %4, %4
+ %6:_(<4 x s16>) = G_ADD %3, %5
+...
+---
+name: VectorLhsEarlyOut
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @VectorLhsEarlyOut
+ ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:0000000000000011 SignBits:14
+ ; CHECK-NEXT: %2:_ KnownBits:0000000000000011 SignBits:14
+ ; CHECK-NEXT: %3:_ KnownBits:???????????????? SignBits:1
+ %0:_(<4 x s16>) = COPY $d0
+ %1:_(s16) = G_CONSTANT i16 3
+ %2:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1
+ %3:_(<4 x s16>) = G_ADD %0, %2
+...
+---
+name: VectorPartKnown
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @VectorPartKnown
+ ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:0000000011111111 SignBits:8
+ ; CHECK-NEXT: %2:_ KnownBits:0000000011111111 SignBits:8
+ ; CHECK-NEXT: %3:_ KnownBits:00000000???????? SignBits:8
+ ; CHECK-NEXT: %4:_ KnownBits:0000000000101010 SignBits:10
+ ; CHECK-NEXT: %5:_ KnownBits:0000000001001010 SignBits:9
+ ; CHECK-NEXT: %6:_ KnownBits:000000000??01010 SignBits:9
+ ; CHECK-NEXT: %7:_ KnownBits:0000000????????? SignBits:7
+ %0:_(<4 x s16>) = COPY $d0
+ %1:_(s16) = G_CONSTANT i16 255
+ %2:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1
+ %3:_(<4 x s16>) = G_AND %0, %2
+ %4:_(s16) = G_CONSTANT i16 42
+ %5:_(s16) = G_CONSTANT i16 74
+ %6:_(<4 x s16>) = G_BUILD_VECTOR %4, %5, %5, %4
+ %7:_(<4 x s16>) = G_ADD %6, %3
+...
+---
+name: VectorCst36
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @VectorCst36
+ ; CHECK-NEXT: %0:_ KnownBits:0000000000000011 SignBits:14
+ ; CHECK-NEXT: %1:_ KnownBits:0000000000000110 SignBits:13
+ ; CHECK-NEXT: %2:_ KnownBits:0000000000000?1? SignBits:13
+ ; CHECK-NEXT: %3:_ KnownBits:0000000000000?1? SignBits:13
+ ; CHECK-NEXT: %4:_ KnownBits:000000000000???? SignBits:12
+ %0:_(s16) = G_CONSTANT i16 3
+ %1:_(s16) = G_CONSTANT i16 6
+ %2:_(<4 x s16>) = G_BUILD_VECTOR %0, %1, %1, %0
+ %3:_(<4 x s16>) = G_BUILD_VECTOR %0, %1, %1, %0
+ %4:_(<4 x s16>) = G_ADD %2, %3
+...
+
+---
+name: VectorCst3unknown
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @VectorCst3unknown
+ ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:???????????????? SignBits:1
+ ; CHECK-NEXT: %2:_ KnownBits:0000000000000011 SignBits:14
+ ; CHECK-NEXT: %3:_ KnownBits:???????????????? SignBits:1
+ ; CHECK-NEXT: %4:_ KnownBits:???????????????? SignBits:1
+ %0:_(<4 x s16>) = COPY $d0
+ %1:_(s16) = COPY $h0
+ %2:_(s16) = G_CONSTANT i16 3
+ %3:_(<4 x s16>) = G_BUILD_VECTOR %1, %2, %2, %1
+ %4:_(<4 x s16>) = G_ADD %0, %3
+...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vector-compress.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vector-compress.mir
index cc75774..c2bf95c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vector-compress.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vector-compress.mir
@@ -15,8 +15,9 @@ body: |
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<4 x s32>), [[C1]](s64)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[C1]](s64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C1]], [[C2]]
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY2]], [[C2]]
; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[MUL]](s64)
; CHECK-NEXT: G_STORE [[EVEC]](s32), [[PTR_ADD]](p0) :: (store (s32))
; CHECK-NEXT: [[EVEC1:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY1]](<4 x s16>), [[C1]](s64)
@@ -91,7 +92,8 @@ body: |
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32))
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<4 x s32>), [[C3]](s64)
- ; CHECK-NEXT: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[C3]], [[C2]]
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[C3]](s64)
+ ; CHECK-NEXT: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[COPY3]], [[C2]]
; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[MUL1]](s64)
; CHECK-NEXT: G_STORE [[EVEC]](s32), [[PTR_ADD1]](p0) :: (store (s32))
; CHECK-NEXT: [[EVEC1:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY1]](<4 x s16>), [[C3]](s64)
diff --git a/llvm/test/CodeGen/AArch64/aarch64-matmul.ll b/llvm/test/CodeGen/AArch64/aarch64-matmul.ll
index 649d0a9..e7e9ee7 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-matmul.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-matmul.ll
@@ -1,41 +1,54 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon,+i8mm < %s -o -| FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple aarch64-none-linux-gnu -mattr=+neon,+i8mm < %s | FileCheck %s
+; RUN: llc -mtriple aarch64-none-linux-gnu -mattr=+neon,+i8mm -global-isel < %s | FileCheck %s
define <4 x i32> @smmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: smmla.v4i32.v16i8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: smmla v0.4s, v1.16b, v2.16b
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: smmla.v4i32.v16i8
-; CHECK: smmla v0.4s, v1.16b, v2.16b
%vmmla1.i = tail call <4 x i32> @llvm.aarch64.neon.smmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b)
ret <4 x i32> %vmmla1.i
}
define <4 x i32> @ummla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: ummla.v4i32.v16i8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ummla v0.4s, v1.16b, v2.16b
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: ummla.v4i32.v16i8
-; CHECK: ummla v0.4s, v1.16b, v2.16b
%vmmla1.i = tail call <4 x i32> @llvm.aarch64.neon.ummla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b)
ret <4 x i32> %vmmla1.i
}
define <4 x i32> @usmmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: usmmla.v4i32.v16i8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: usmmla v0.4s, v1.16b, v2.16b
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: usmmla.v4i32.v16i8
-; CHECK: usmmla v0.4s, v1.16b, v2.16b
%vusmmla1.i = tail call <4 x i32> @llvm.aarch64.neon.usmmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) #3
ret <4 x i32> %vusmmla1.i
}
define <2 x i32> @usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: usdot.v2i32.v8i8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: usdot v0.2s, v1.8b, v2.8b
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: usdot.v2i32.v8i8
-; CHECK: usdot v0.2s, v1.8b, v2.8b
%vusdot1.i = tail call <2 x i32> @llvm.aarch64.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b)
ret <2 x i32> %vusdot1.i
}
define <2 x i32> @usdot_lane.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: usdot_lane.v2i32.v8i8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT: usdot v0.2s, v1.8b, v2.4b[0]
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: usdot_lane.v2i32.v8i8
-; CHECK: usdot v0.2s, v1.8b, v2.4b[0]
%0 = bitcast <8 x i8> %b to <2 x i32>
%shuffle = shufflevector <2 x i32> %0, <2 x i32> undef, <2 x i32> zeroinitializer
%1 = bitcast <2 x i32> %shuffle to <8 x i8>
@@ -44,9 +57,12 @@ entry:
}
define <2 x i32> @sudot_lane.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: sudot_lane.v2i32.v8i8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT: sudot v0.2s, v1.8b, v2.4b[0]
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: sudot_lane.v2i32.v8i8
-; CHECK: sudot v0.2s, v1.8b, v2.4b[0]
%0 = bitcast <8 x i8> %b to <2 x i32>
%shuffle = shufflevector <2 x i32> %0, <2 x i32> undef, <2 x i32> zeroinitializer
%1 = bitcast <2 x i32> %shuffle to <8 x i8>
@@ -55,9 +71,11 @@ entry:
}
define <2 x i32> @usdot_lane.v2i32.v16i8(<2 x i32> %r, <8 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: usdot_lane.v2i32.v16i8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: usdot v0.2s, v1.8b, v2.4b[0]
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: usdot_lane.v2i32.v16i8
-; CHECK: usdot v0.2s, v1.8b, v2.4b[0]
%0 = bitcast <16 x i8> %b to <4 x i32>
%shuffle = shufflevector <4 x i32> %0, <4 x i32> undef, <2 x i32> zeroinitializer
%1 = bitcast <2 x i32> %shuffle to <8 x i8>
@@ -66,9 +84,11 @@ entry:
}
define <2 x i32> @sudot_lane.v2i32.v16i8(<2 x i32> %r, <8 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: sudot_lane.v2i32.v16i8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sudot v0.2s, v1.8b, v2.4b[0]
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: sudot_lane.v2i32.v16i8
-; CHECK: sudot v0.2s, v1.8b, v2.4b[0]
%0 = bitcast <16 x i8> %b to <4 x i32>
%shuffle = shufflevector <4 x i32> %0, <4 x i32> undef, <2 x i32> zeroinitializer
%1 = bitcast <2 x i32> %shuffle to <8 x i8>
@@ -77,17 +97,22 @@ entry:
}
define <4 x i32> @usdot.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: usdot.v4i32.v16i8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: usdot v0.4s, v1.16b, v2.16b
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: usdot.v4i32.v16i8
-; CHECK: usdot v0.4s, v1.16b, v2.16b
%vusdot1.i = tail call <4 x i32> @llvm.aarch64.neon.usdot.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) #3
ret <4 x i32> %vusdot1.i
}
define <4 x i32> @usdot_lane.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: usdot_lane.v4i32.v16i8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT: usdot v0.4s, v1.16b, v2.4b[0]
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: usdot_lane.v4i32.v16i8
-; CHECK: usdot v0.4s, v1.16b, v2.4b[0]
%0 = bitcast <8 x i8> %b to <2 x i32>
%shuffle = shufflevector <2 x i32> %0, <2 x i32> undef, <4 x i32> zeroinitializer
%1 = bitcast <4 x i32> %shuffle to <16 x i8>
@@ -96,9 +121,12 @@ entry:
}
define <4 x i32> @sudot_lane.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: sudot_lane.v4i32.v16i8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT: sudot v0.4s, v1.16b, v2.4b[0]
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: sudot_lane.v4i32.v16i8
-; CHECK: sudot v0.4s, v1.16b, v2.4b[0]
%0 = bitcast <8 x i8> %b to <2 x i32>
%shuffle = shufflevector <2 x i32> %0, <2 x i32> undef, <4 x i32> zeroinitializer
%1 = bitcast <4 x i32> %shuffle to <16 x i8>
@@ -107,9 +135,11 @@ entry:
}
define <4 x i32> @usdot_laneq.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: usdot_laneq.v4i32.v16i8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: usdot v0.4s, v1.16b, v2.4b[0]
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: usdot_laneq.v4i32.v16i8
-; CHECK: usdot v0.4s, v1.16b, v2.4b[0]
%0 = bitcast <16 x i8> %b to <4 x i32>
%shuffle = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> zeroinitializer
%1 = bitcast <4 x i32> %shuffle to <16 x i8>
@@ -118,9 +148,11 @@ entry:
}
define <4 x i32> @sudot_laneq.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: sudot_laneq.v4i32.v16i8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sudot v0.4s, v1.16b, v2.4b[0]
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: sudot_laneq.v4i32.v16i8
-; CHECK: sudot v0.4s, v1.16b, v2.4b[0]
%0 = bitcast <16 x i8> %b to <4 x i32>
%shuffle = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> zeroinitializer
%1 = bitcast <4 x i32> %shuffle to <16 x i8>
@@ -133,4 +165,3 @@ declare <4 x i32> @llvm.aarch64.neon.ummla.v4i32.v16i8(<4 x i32>, <16 x i8>, <16
declare <4 x i32> @llvm.aarch64.neon.usmmla.v4i32.v16i8(<4 x i32>, <16 x i8>, <16 x i8>) #2
declare <2 x i32> @llvm.aarch64.neon.usdot.v2i32.v8i8(<2 x i32>, <8 x i8>, <8 x i8>) #2
declare <4 x i32> @llvm.aarch64.neon.usdot.v4i32.v16i8(<4 x i32>, <16 x i8>, <16 x i8>) #2
-
diff --git a/llvm/test/CodeGen/AArch64/aarch64-post-coalescer.mir b/llvm/test/CodeGen/AArch64/aarch64-post-coalescer.mir
new file mode 100644
index 0000000..6540160
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/aarch64-post-coalescer.mir
@@ -0,0 +1,16 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6
+# RUN: llc -mtriple=aarch64 -mattr=+sme -run-pass=aarch64-post-coalescer-pass -o - %s | FileCheck %s
+
+---
+name: foo
+machineFunctionInfo:
+ hasStreamingModeChanges: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: foo
+ ; CHECK: $d0 = COPY undef %0:fpr64
+ ; CHECK-NEXT: FAKE_USE implicit $d0
+ %1:fpr64 = COALESCER_BARRIER_FPR64 undef %1
+ $d0 = COPY %1
+ FAKE_USE implicit $d0
+...
diff --git a/llvm/test/CodeGen/AArch64/combine-sdiv.ll b/llvm/test/CodeGen/AArch64/combine-sdiv.ll
index dc88f94..cca190f 100644
--- a/llvm/test/CodeGen/AArch64/combine-sdiv.ll
+++ b/llvm/test/CodeGen/AArch64/combine-sdiv.ll
@@ -1774,3 +1774,88 @@ define i128 @combine_i128_sdiv_const100(i128 %x) {
%1 = sdiv i128 %x, 100
ret i128 %1
}
+
+; The following only becomes an sdiv_by_one after type legalisation, after which
+; the splatted scalar constant has a different type to the splat vector. This
+; test verifies DAGCombiner does not care about this type difference.
+define <16 x i16> @combine_vec_sdiv_by_one_obfuscated(<16 x i16> %x) "target-features"="+sve" {
+; CHECK-SD-LABEL: combine_vec_sdiv_by_one_obfuscated:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: combine_vec_sdiv_by_one_obfuscated:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: movi v2.2d, #0000000000000000
+; CHECK-GI-NEXT: movi v3.8h, #1
+; CHECK-GI-NEXT: smov w8, v0.h[0]
+; CHECK-GI-NEXT: mov v3.h[0], v2.h[0]
+; CHECK-GI-NEXT: smov w9, v3.h[0]
+; CHECK-GI-NEXT: smov w16, v3.h[7]
+; CHECK-GI-NEXT: sdiv w14, w8, w9
+; CHECK-GI-NEXT: smov w8, v0.h[1]
+; CHECK-GI-NEXT: smov w9, v3.h[1]
+; CHECK-GI-NEXT: sdiv w15, w8, w9
+; CHECK-GI-NEXT: smov w8, v0.h[2]
+; CHECK-GI-NEXT: smov w9, v3.h[2]
+; CHECK-GI-NEXT: sdiv w13, w8, w9
+; CHECK-GI-NEXT: smov w8, v0.h[3]
+; CHECK-GI-NEXT: smov w9, v3.h[3]
+; CHECK-GI-NEXT: sdiv w12, w8, w9
+; CHECK-GI-NEXT: smov w8, v0.h[4]
+; CHECK-GI-NEXT: smov w9, v3.h[4]
+; CHECK-GI-NEXT: sdiv w11, w8, w9
+; CHECK-GI-NEXT: smov w8, v0.h[5]
+; CHECK-GI-NEXT: smov w9, v3.h[5]
+; CHECK-GI-NEXT: sdiv w10, w8, w9
+; CHECK-GI-NEXT: smov w8, v0.h[6]
+; CHECK-GI-NEXT: smov w9, v3.h[6]
+; CHECK-GI-NEXT: movi v3.8h, #1
+; CHECK-GI-NEXT: smov w17, v3.h[0]
+; CHECK-GI-NEXT: smov w18, v3.h[1]
+; CHECK-GI-NEXT: smov w0, v3.h[2]
+; CHECK-GI-NEXT: smov w1, v3.h[3]
+; CHECK-GI-NEXT: smov w2, v3.h[4]
+; CHECK-GI-NEXT: smov w3, v3.h[5]
+; CHECK-GI-NEXT: sdiv w8, w8, w9
+; CHECK-GI-NEXT: smov w9, v0.h[7]
+; CHECK-GI-NEXT: fmov s0, w14
+; CHECK-GI-NEXT: mov v0.h[1], w15
+; CHECK-GI-NEXT: smov w15, v1.h[6]
+; CHECK-GI-NEXT: mov v0.h[2], w13
+; CHECK-GI-NEXT: sdiv w9, w9, w16
+; CHECK-GI-NEXT: smov w16, v1.h[0]
+; CHECK-GI-NEXT: mov v0.h[3], w12
+; CHECK-GI-NEXT: smov w12, v1.h[7]
+; CHECK-GI-NEXT: mov v0.h[4], w11
+; CHECK-GI-NEXT: sdiv w16, w16, w17
+; CHECK-GI-NEXT: smov w17, v1.h[1]
+; CHECK-GI-NEXT: mov v0.h[5], w10
+; CHECK-GI-NEXT: mov v0.h[6], w8
+; CHECK-GI-NEXT: sdiv w17, w17, w18
+; CHECK-GI-NEXT: smov w18, v1.h[2]
+; CHECK-GI-NEXT: fmov s2, w16
+; CHECK-GI-NEXT: smov w16, v3.h[6]
+; CHECK-GI-NEXT: mov v0.h[7], w9
+; CHECK-GI-NEXT: sdiv w18, w18, w0
+; CHECK-GI-NEXT: smov w0, v1.h[3]
+; CHECK-GI-NEXT: mov v2.h[1], w17
+; CHECK-GI-NEXT: sdiv w0, w0, w1
+; CHECK-GI-NEXT: smov w1, v1.h[4]
+; CHECK-GI-NEXT: mov v2.h[2], w18
+; CHECK-GI-NEXT: sdiv w1, w1, w2
+; CHECK-GI-NEXT: smov w2, v1.h[5]
+; CHECK-GI-NEXT: mov v2.h[3], w0
+; CHECK-GI-NEXT: sdiv w14, w2, w3
+; CHECK-GI-NEXT: mov v2.h[4], w1
+; CHECK-GI-NEXT: sdiv w13, w15, w16
+; CHECK-GI-NEXT: smov w15, v3.h[7]
+; CHECK-GI-NEXT: mov v2.h[5], w14
+; CHECK-GI-NEXT: sdiv w10, w12, w15
+; CHECK-GI-NEXT: mov v2.h[6], w13
+; CHECK-GI-NEXT: mov v2.h[7], w10
+; CHECK-GI-NEXT: mov v1.16b, v2.16b
+; CHECK-GI-NEXT: ret
+ %zero_and_ones = shufflevector <16 x i16> zeroinitializer, <16 x i16> splat (i16 1), <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %div = sdiv <16 x i16> %x, %zero_and_ones
+ ret <16 x i16> %div
+}
diff --git a/llvm/test/CodeGen/AArch64/machine-sme-abi-find-insert-pt.mir b/llvm/test/CodeGen/AArch64/machine-sme-abi-find-insert-pt.mir
new file mode 100644
index 0000000..3f174a6
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/machine-sme-abi-find-insert-pt.mir
@@ -0,0 +1,227 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6
+# RUN: llc -mtriple=aarch64 -mattr=+sve -mattr=+sme -run-pass=aarch64-machine-sme-abi -verify-machineinstrs %s -o - | FileCheck %s
+
+--- |
+ ; Test moving a state change to be before a $nzcv def
+ define void @move_before_nzcv_def() "aarch64_inout_za" { ret void }
+
+ ; Test moving a state change to a point where $x0 is live
+ define void @move_to_x0_live() "aarch64_inout_za" { ret void }
+
+ ; Test we don't move before a previous state change.
+ define void @do_not_move_before_prior_state_change() "aarch64_za_state_agnostic" { ret void }
+
+ ; Test we don't move into a call sequence.
+ define void @do_not_move_into_call() "aarch64_inout_za" { ret void }
+
+ declare void @clobber()
+ declare void @inout_call() "aarch64_inout_za"
+...
+---
+name: move_before_nzcv_def
+tracksRegLiveness: true
+isSSA: true
+noVRegs: false
+body: |
+ bb.0:
+
+ ; CHECK-LABEL: name: move_before_nzcv_def
+ ; CHECK: [[RDSVLI_XI:%[0-9]+]]:gpr64 = RDSVLI_XI 1, implicit $vg
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $sp
+ ; CHECK-NEXT: [[MSUBXrrr:%[0-9]+]]:gpr64 = MSUBXrrr [[RDSVLI_XI]], [[RDSVLI_XI]], [[COPY]]
+ ; CHECK-NEXT: $sp = COPY [[MSUBXrrr]]
+ ; CHECK-NEXT: STPXi [[MSUBXrrr]], [[RDSVLI_XI]], %stack.0, 0
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0, 0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY [[ADDXri]]
+ ; CHECK-NEXT: MSR 56965, [[COPY1]]
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: RequiresZASavePseudo
+ ; CHECK-NEXT: BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: MSRpstatesvcrImm1 2, 1, implicit-def $nzcv
+ ; CHECK-NEXT: [[MRS:%[0-9]+]]:gpr64 = MRS 56965, implicit-def $nzcv
+ ; CHECK-NEXT: $x0 = ADDXri %stack.0, 0, 0
+ ; CHECK-NEXT: RestoreZAPseudo [[MRS]], $x0, &__arm_tpidr2_restore, csr_aarch64_sme_abi_support_routines_preservemost_from_x0
+ ; CHECK-NEXT: MSR 56965, $xzr
+ ; CHECK-NEXT: $nzcv = IMPLICIT_DEF
+ ; CHECK-NEXT: $zab0 = IMPLICIT_DEF
+ ; CHECK-NEXT: FAKE_USE $nzcv
+ ; CHECK-NEXT: RET_ReallyLR
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ RequiresZASavePseudo
+ BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+
+ $nzcv = IMPLICIT_DEF
+ $zab0 = IMPLICIT_DEF
+ FAKE_USE $nzcv
+
+ RET_ReallyLR
+...
+---
+name: move_to_x0_live
+tracksRegLiveness: true
+isSSA: true
+noVRegs: false
+body: |
+ bb.0:
+
+ ; CHECK-LABEL: name: move_to_x0_live
+ ; CHECK: [[RDSVLI_XI:%[0-9]+]]:gpr64 = RDSVLI_XI 1, implicit $vg
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $sp
+ ; CHECK-NEXT: [[MSUBXrrr:%[0-9]+]]:gpr64 = MSUBXrrr [[RDSVLI_XI]], [[RDSVLI_XI]], [[COPY]]
+ ; CHECK-NEXT: $sp = COPY [[MSUBXrrr]]
+ ; CHECK-NEXT: STPXi [[MSUBXrrr]], [[RDSVLI_XI]], %stack.0, 0
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0, 0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY [[ADDXri]]
+ ; CHECK-NEXT: MSR 56965, [[COPY1]]
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: RequiresZASavePseudo
+ ; CHECK-NEXT: BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: $x0 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: MSRpstatesvcrImm1 2, 1, implicit-def $nzcv
+ ; CHECK-NEXT: [[MRS:%[0-9]+]]:gpr64 = MRS 56965, implicit-def $nzcv
+ ; CHECK-NEXT: $x0 = ADDXri %stack.0, 0, 0
+ ; CHECK-NEXT: RestoreZAPseudo [[MRS]], $x0, &__arm_tpidr2_restore, csr_aarch64_sme_abi_support_routines_preservemost_from_x0
+ ; CHECK-NEXT: MSR 56965, $xzr
+ ; CHECK-NEXT: $x0 = COPY [[COPY2]]
+ ; CHECK-NEXT: $nzcv = IMPLICIT_DEF
+ ; CHECK-NEXT: FAKE_USE $x0
+ ; CHECK-NEXT: $zab0 = IMPLICIT_DEF
+ ; CHECK-NEXT: FAKE_USE $nzcv
+ ; CHECK-NEXT: RET_ReallyLR
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ RequiresZASavePseudo
+ BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+
+ $x0 = IMPLICIT_DEF
+
+ $nzcv = IMPLICIT_DEF
+ FAKE_USE $x0
+
+ $zab0 = IMPLICIT_DEF
+ FAKE_USE $nzcv
+
+ RET_ReallyLR
+...
+---
+name: do_not_move_before_prior_state_change
+tracksRegLiveness: true
+isSSA: true
+noVRegs: false
+body: |
+ ; CHECK-LABEL: name: do_not_move_before_prior_state_change
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: BL &__arm_sme_state_size, csr_aarch64_sme_abi_support_routines_preservemost_from_x1, implicit-def $lr, implicit $sp, implicit-def $x0
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: $sp = SUBXrx64 $sp, [[COPY]], 24
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $sp
+ ; CHECK-NEXT: $nzcv = IMPLICIT_DEF
+ ; CHECK-NEXT: $zab0 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[MRS:%[0-9]+]]:gpr64 = MRS 55824, implicit-def $nzcv, implicit $nzcv
+ ; CHECK-NEXT: $x0 = COPY [[COPY1]]
+ ; CHECK-NEXT: BL &__arm_sme_save, csr_aarch64_sme_abi_support_routines_preservemost_from_x1, implicit-def $lr, implicit $sp, implicit $x0
+ ; CHECK-NEXT: MSR 55824, [[MRS]], implicit-def $nzcv
+ ; CHECK-NEXT: Bcc 2, %bb.1, implicit $nzcv
+ ; CHECK-NEXT: B %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: liveins: $nzcv
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: FAKE_USE $nzcv
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: RequiresZASavePseudo
+ ; CHECK-NEXT: BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: $x0 = COPY [[COPY1]]
+ ; CHECK-NEXT: BL &__arm_sme_restore, csr_aarch64_sme_abi_support_routines_preservemost_from_x1, implicit-def $lr, implicit $sp, implicit $x0
+ ; CHECK-NEXT: RET_ReallyLR
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: RequiresZASavePseudo
+ ; CHECK-NEXT: BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: $x0 = COPY [[COPY1]]
+ ; CHECK-NEXT: BL &__arm_sme_restore, csr_aarch64_sme_abi_support_routines_preservemost_from_x1, implicit-def $lr, implicit $sp, implicit $x0
+ ; CHECK-NEXT: RET_ReallyLR
+ bb.0:
+ successors: %bb.1, %bb.2
+
+ ; The insertion point can move before the $nzcv def (as that would require
+ ; moving before a $zab0 def -- that requires the ACTIVE state).
+ $nzcv = IMPLICIT_DEF
+ $zab0 = IMPLICIT_DEF
+ Bcc 2, %bb.1, implicit $nzcv
+ B %bb.2
+ ; bb.1 and bb.2 both require ZA saved on entry (to force bb.0's exit bundle to
+ ; pick the LOCAL_SAVED state).
+ bb.1:
+ liveins: $nzcv
+ FAKE_USE $nzcv
+
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ RequiresZASavePseudo
+ BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+
+ RET_ReallyLR
+ bb.2:
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ RequiresZASavePseudo
+ BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+
+ RET_ReallyLR
+...
+---
+name: do_not_move_into_call
+tracksRegLiveness: true
+isSSA: true
+noVRegs: false
+body: |
+ bb.0:
+
+ ; CHECK-LABEL: name: do_not_move_into_call
+ ; CHECK: [[RDSVLI_XI:%[0-9]+]]:gpr64 = RDSVLI_XI 1, implicit $vg
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $sp
+ ; CHECK-NEXT: [[MSUBXrrr:%[0-9]+]]:gpr64 = MSUBXrrr [[RDSVLI_XI]], [[RDSVLI_XI]], [[COPY]]
+ ; CHECK-NEXT: $sp = COPY [[MSUBXrrr]]
+ ; CHECK-NEXT: STPXi [[MSUBXrrr]], [[RDSVLI_XI]], %stack.0, 0
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0, 0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY [[ADDXri]]
+ ; CHECK-NEXT: MSR 56965, [[COPY1]]
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: RequiresZASavePseudo
+ ; CHECK-NEXT: BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ ; CHECK-NEXT: $nzcv = IMPLICIT_DEF
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: [[MRS:%[0-9]+]]:gpr64 = MRS 55824, implicit-def $nzcv, implicit $nzcv
+ ; CHECK-NEXT: MSRpstatesvcrImm1 2, 1, implicit-def $nzcv
+ ; CHECK-NEXT: [[MRS1:%[0-9]+]]:gpr64 = MRS 56965, implicit-def $nzcv
+ ; CHECK-NEXT: $x0 = ADDXri %stack.0, 0, 0
+ ; CHECK-NEXT: RestoreZAPseudo [[MRS1]], $x0, &__arm_tpidr2_restore, csr_aarch64_sme_abi_support_routines_preservemost_from_x0
+ ; CHECK-NEXT: MSR 56965, $xzr
+ ; CHECK-NEXT: MSR 55824, [[MRS]], implicit-def $nzcv
+ ; CHECK-NEXT: $zab0 = IMPLICIT_DEF
+ ; CHECK-NEXT: FAKE_USE $nzcv
+ ; CHECK-NEXT: RET_ReallyLR
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ RequiresZASavePseudo
+ BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+
+ ; This is artificial test where NZCV is def'd inside a call, so we can't
+ ; move the insert point before it's definition.
+ $nzcv = IMPLICIT_DEF
+ ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+
+ $zab0 = IMPLICIT_DEF
+ FAKE_USE $nzcv
+
+ RET_ReallyLR
+...
diff --git a/llvm/test/CodeGen/AArch64/mir-yaml-has-streaming-mode-changes.ll b/llvm/test/CodeGen/AArch64/mir-yaml-has-streaming-mode-changes.ll
new file mode 100644
index 0000000..8f1fe5c
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/mir-yaml-has-streaming-mode-changes.ll
@@ -0,0 +1,13 @@
+; RUN: llc -mtriple=aarch64 -mattr=+sme -stop-after=aarch64-isel < %s | FileCheck %s
+
+target triple = "aarch64"
+
+declare void @foo() "aarch64_pstate_sm_enabled"
+
+define dso_local void @bar() local_unnamed_addr {
+; CHECK-LABEL: name: bar
+; CHECK: hasStreamingModeChanges: true
+entry:
+ tail call void @foo() "aarch64_pstate_sm_enabled"
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/sme-agnostic-za.ll b/llvm/test/CodeGen/AArch64/sme-agnostic-za.ll
index e3007a3..e4f9efa 100644
--- a/llvm/test/CodeGen/AArch64/sme-agnostic-za.ll
+++ b/llvm/test/CodeGen/AArch64/sme-agnostic-za.ll
@@ -391,11 +391,9 @@ define void @agnostic_za_buffer_alloc_with_stack_probes() nounwind "aarch64_za_s
; CHECK-NEWLOWERING-NEXT: sub x19, x8, x0
; CHECK-NEWLOWERING-NEXT: .LBB7_1: // =>This Inner Loop Header: Depth=1
; CHECK-NEWLOWERING-NEXT: sub sp, sp, #16, lsl #12 // =65536
-; CHECK-NEWLOWERING-NEXT: cmp sp, x19
; CHECK-NEWLOWERING-NEXT: mov x0, x19
-; CHECK-NEWLOWERING-NEXT: mrs x8, NZCV
; CHECK-NEWLOWERING-NEXT: bl __arm_sme_save
-; CHECK-NEWLOWERING-NEXT: msr NZCV, x8
+; CHECK-NEWLOWERING-NEXT: cmp sp, x19
; CHECK-NEWLOWERING-NEXT: b.le .LBB7_3
; CHECK-NEWLOWERING-NEXT: // %bb.2: // in Loop: Header=BB7_1 Depth=1
; CHECK-NEWLOWERING-NEXT: mov x0, x19
diff --git a/llvm/test/CodeGen/AArch64/sme-lazy-sve-nzcv-live.mir b/llvm/test/CodeGen/AArch64/sme-lazy-sve-nzcv-live.mir
index 18764d5..9f33c06 100644
--- a/llvm/test/CodeGen/AArch64/sme-lazy-sve-nzcv-live.mir
+++ b/llvm/test/CodeGen/AArch64/sme-lazy-sve-nzcv-live.mir
@@ -62,14 +62,12 @@ body: |
; CHECK-NEXT: RequiresZASavePseudo
; CHECK-NEXT: BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
- ; CHECK-NEXT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY1]], 101, 0, implicit-def $nzcv
- ; CHECK-NEXT: [[MRS:%[0-9]+]]:gpr64 = MRS 55824, implicit-def $nzcv, implicit $nzcv
; CHECK-NEXT: MSRpstatesvcrImm1 2, 1, implicit-def $nzcv
; CHECK-NEXT: [[MRS1:%[0-9]+]]:gpr64 = MRS 56965, implicit-def $nzcv
; CHECK-NEXT: $x0 = ADDXri %stack.0, 0, 0
; CHECK-NEXT: RestoreZAPseudo [[MRS1]], $x0, &__arm_tpidr2_restore, csr_aarch64_sme_abi_support_routines_preservemost_from_x0
; CHECK-NEXT: MSR 56965, $xzr
- ; CHECK-NEXT: MSR 55824, [[MRS]], implicit-def $nzcv
+ ; CHECK-NEXT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY1]], 101, 0, implicit-def $nzcv
; CHECK-NEXT: Bcc 11, %bb.2, implicit $nzcv
; CHECK-NEXT: B %bb.1
; CHECK-NEXT: {{ $}}
@@ -116,16 +114,14 @@ body: |
# CHECK-ASM-LABEL: cmp_branch
# CHECK-ASM: msr TPIDR2_EL0, x10
# CHECK-ASM-NEXT: bl clobber
-# CHECK-ASM-NEXT: cmp w20, #101
-# CHECK-ASM-NEXT: mrs x8, NZCV
# CHECK-ASM-NEXT: smstart za
-# CHECK-ASM-NEXT: mrs x9, TPIDR2_EL0
+# CHECK-ASM-NEXT: mrs x8, TPIDR2_EL0
# CHECK-ASM-NEXT: sub x0, x29, #16
-# CHECK-ASM-NEXT: cbnz x9, .LBB0_2
+# CHECK-ASM-NEXT: cbnz x8, .LBB0_2
# CHECK-ASM: bl __arm_tpidr2_restore
# CHECK-ASM-NEXT: .LBB0_2:
+# CHECK-ASM-NEXT: cmp w20, #101
# CHECK-ASM-NEXT: msr TPIDR2_EL0, xzr
-# CHECK-ASM-NEXT: msr NZCV, x8
# CHECK-ASM-NEXT: b.lt .LBB0_4
# CHECK-ASM: bl inout_call
# CHECK-ASM-NEXT: .LBB0_4:
diff --git a/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll b/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll
index b6dee97e..b8d6c88 100644
--- a/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll
+++ b/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll
@@ -732,6 +732,247 @@ exit:
ret void
}
+; This example corresponds to:
+;
+; __arm_agnostic("sme_za_state") void try_catch_agnostic_za_invoke()
+; {
+; try {
+; agnostic_za_call();
+; } catch(...) {
+; }
+; }
+;
+; In this example we preserve all SME state enabled by PSTATE.ZA using
+; `__arm_sme_save` before agnostic_za_call(). This is because on all normal
+; returns from an agnostic ZA function ZA state should be preserved. That means
+; we need to make sure ZA state is saved in case agnostic_za_call() throws, and
+; we need to restore ZA state after unwinding to the catch block.
+
+define void @try_catch_agnostic_za_invoke() "aarch64_za_state_agnostic" personality ptr @__gxx_personality_v0 {
+; CHECK-LABEL: try_catch_agnostic_za_invoke:
+; CHECK: .Lfunc_begin5:
+; CHECK-NEXT: .cfi_startproc
+; CHECK-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-NEXT: .cfi_lsda 28, .Lexception5
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT: str x19, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: mov x29, sp
+; CHECK-NEXT: .cfi_def_cfa w29, 32
+; CHECK-NEXT: .cfi_offset w19, -16
+; CHECK-NEXT: .cfi_offset w30, -24
+; CHECK-NEXT: .cfi_offset w29, -32
+; CHECK-NEXT: bl __arm_sme_state_size
+; CHECK-NEXT: sub sp, sp, x0
+; CHECK-NEXT: mov x19, sp
+; CHECK-NEXT: .Ltmp15: // EH_LABEL
+; CHECK-NEXT: mov x0, x19
+; CHECK-NEXT: bl __arm_sme_save
+; CHECK-NEXT: bl agnostic_za_call
+; CHECK-NEXT: .Ltmp16: // EH_LABEL
+; CHECK-NEXT: .LBB5_1: // %exit
+; CHECK-NEXT: mov x0, x19
+; CHECK-NEXT: bl __arm_sme_restore
+; CHECK-NEXT: mov sp, x29
+; CHECK-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB5_2: // %catch
+; CHECK-NEXT: .Ltmp17: // EH_LABEL
+; CHECK-NEXT: bl __cxa_begin_catch
+; CHECK-NEXT: bl __cxa_end_catch
+; CHECK-NEXT: b .LBB5_1
+;
+; CHECK-SDAG-LABEL: try_catch_agnostic_za_invoke:
+; CHECK-SDAG: .Lfunc_begin5:
+; CHECK-SDAG-NEXT: .cfi_startproc
+; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception5
+; CHECK-SDAG-NEXT: // %bb.0: // %entry
+; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill
+; CHECK-SDAG-NEXT: mov x29, sp
+; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32
+; CHECK-SDAG-NEXT: .cfi_offset w19, -16
+; CHECK-SDAG-NEXT: .cfi_offset w30, -24
+; CHECK-SDAG-NEXT: .cfi_offset w29, -32
+; CHECK-SDAG-NEXT: bl __arm_sme_state_size
+; CHECK-SDAG-NEXT: sub sp, sp, x0
+; CHECK-SDAG-NEXT: mov x19, sp
+; CHECK-SDAG-NEXT: .Ltmp15: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_save
+; CHECK-SDAG-NEXT: bl agnostic_za_call
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_restore
+; CHECK-SDAG-NEXT: .Ltmp16: // EH_LABEL
+; CHECK-SDAG-NEXT: .LBB5_1: // %exit
+; CHECK-SDAG-NEXT: mov sp, x29
+; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload
+; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-SDAG-NEXT: ret
+; CHECK-SDAG-NEXT: .LBB5_2: // %catch
+; CHECK-SDAG-NEXT: .Ltmp17: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x1, x0
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_restore
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_save
+; CHECK-SDAG-NEXT: mov x0, x1
+; CHECK-SDAG-NEXT: bl __cxa_begin_catch
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_restore
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_save
+; CHECK-SDAG-NEXT: bl __cxa_end_catch
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_restore
+; CHECK-SDAG-NEXT: b .LBB5_1
+entry:
+ invoke void @agnostic_za_call()
+ to label %exit unwind label %catch
+
+catch:
+ %eh_info = landingpad { ptr, i32 }
+ catch ptr null
+ %exception_ptr = extractvalue { ptr, i32 } %eh_info, 0
+ tail call ptr @__cxa_begin_catch(ptr %exception_ptr)
+ tail call void @__cxa_end_catch()
+ br label %exit
+
+exit:
+ ret void
+}
+
+; This is the same `try_catch_agnostic_za_invoke`, but shows a lazy save would
+; also need to be committed in a shared-ZA function calling an agnostic-ZA function.
+define void @try_catch_inout_za_agnostic_za_callee() "aarch64_inout_za" personality ptr @__gxx_personality_v0 {
+; CHECK-LABEL: try_catch_inout_za_agnostic_za_callee:
+; CHECK: .Lfunc_begin6:
+; CHECK-NEXT: .cfi_startproc
+; CHECK-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-NEXT: .cfi_lsda 28, .Lexception6
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT: mov x29, sp
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: .cfi_def_cfa w29, 16
+; CHECK-NEXT: .cfi_offset w30, -8
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdsvl x8, #1
+; CHECK-NEXT: mov x9, sp
+; CHECK-NEXT: msub x9, x8, x8, x9
+; CHECK-NEXT: mov sp, x9
+; CHECK-NEXT: stp x9, x8, [x29, #-16]
+; CHECK-NEXT: .Ltmp18: // EH_LABEL
+; CHECK-NEXT: sub x8, x29, #16
+; CHECK-NEXT: msr TPIDR2_EL0, x8
+; CHECK-NEXT: bl agnostic_za_call
+; CHECK-NEXT: .Ltmp19: // EH_LABEL
+; CHECK-NEXT: .LBB6_1: // %exit
+; CHECK-NEXT: smstart za
+; CHECK-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-NEXT: sub x0, x29, #16
+; CHECK-NEXT: cbnz x8, .LBB6_3
+; CHECK-NEXT: // %bb.2: // %exit
+; CHECK-NEXT: bl __arm_tpidr2_restore
+; CHECK-NEXT: .LBB6_3: // %exit
+; CHECK-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-NEXT: mov sp, x29
+; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB6_4: // %catch
+; CHECK-NEXT: .Ltmp20: // EH_LABEL
+; CHECK-NEXT: bl __cxa_begin_catch
+; CHECK-NEXT: bl __cxa_end_catch
+; CHECK-NEXT: b .LBB6_1
+;
+; CHECK-SDAG-LABEL: try_catch_inout_za_agnostic_za_callee:
+; CHECK-SDAG: .Lfunc_begin6:
+; CHECK-SDAG-NEXT: .cfi_startproc
+; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception6
+; CHECK-SDAG-NEXT: // %bb.0: // %entry
+; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill
+; CHECK-SDAG-NEXT: mov x29, sp
+; CHECK-SDAG-NEXT: sub sp, sp, #16
+; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32
+; CHECK-SDAG-NEXT: .cfi_offset w19, -16
+; CHECK-SDAG-NEXT: .cfi_offset w30, -24
+; CHECK-SDAG-NEXT: .cfi_offset w29, -32
+; CHECK-SDAG-NEXT: rdsvl x8, #1
+; CHECK-SDAG-NEXT: mov x9, sp
+; CHECK-SDAG-NEXT: msub x9, x8, x8, x9
+; CHECK-SDAG-NEXT: mov sp, x9
+; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16]
+; CHECK-SDAG-NEXT: .Ltmp18: // EH_LABEL
+; CHECK-SDAG-NEXT: sub x19, x29, #16
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19
+; CHECK-SDAG-NEXT: bl agnostic_za_call
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB6_2
+; CHECK-SDAG-NEXT: // %bb.1: // %entry
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB6_2: // %entry
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: .Ltmp19: // EH_LABEL
+; CHECK-SDAG-NEXT: .LBB6_3: // %exit
+; CHECK-SDAG-NEXT: mov sp, x29
+; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload
+; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-SDAG-NEXT: ret
+; CHECK-SDAG-NEXT: .LBB6_4: // %catch
+; CHECK-SDAG-NEXT: .Ltmp20: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x1, x0
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB6_6
+; CHECK-SDAG-NEXT: // %bb.5: // %catch
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB6_6: // %catch
+; CHECK-SDAG-NEXT: mov x0, x1
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19
+; CHECK-SDAG-NEXT: bl __cxa_begin_catch
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB6_8
+; CHECK-SDAG-NEXT: // %bb.7: // %catch
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB6_8: // %catch
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19
+; CHECK-SDAG-NEXT: bl __cxa_end_catch
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB6_10
+; CHECK-SDAG-NEXT: // %bb.9: // %catch
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB6_10: // %catch
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: b .LBB6_3
+entry:
+ invoke void @agnostic_za_call()
+ to label %exit unwind label %catch
+
+catch:
+ %eh_info = landingpad { ptr, i32 }
+ catch ptr null
+ %exception_ptr = extractvalue { ptr, i32 } %eh_info, 0
+ tail call ptr @__cxa_begin_catch(ptr %exception_ptr)
+ tail call void @__cxa_end_catch()
+ br label %exit
+
+exit:
+ ret void
+}
+
declare ptr @__cxa_allocate_exception(i64)
declare void @__cxa_throw(ptr, ptr, ptr)
declare ptr @__cxa_begin_catch(ptr)
@@ -742,3 +983,4 @@ declare void @may_throw()
declare void @shared_za_call() "aarch64_inout_za"
declare void @noexcept_shared_za_call() "aarch64_inout_za"
declare void @shared_zt0_call() "aarch64_inout_zt0"
+declare void @agnostic_za_call() "aarch64_za_state_agnostic"