; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 ; RUN: opt < %s -passes=interleaved-access -mtriple=aarch64-linux-gnu -mattr=+sve -S | FileCheck %s define void @interleave4(ptr %dst, %a, %b, %c, %d) { ; CHECK-LABEL: define void @interleave4 ; CHECK-SAME: (ptr [[DST:%.*]], [[A:%.*]], [[B:%.*]], [[C:%.*]], [[D:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: call void @llvm.aarch64.sve.st4.nxv4i32( [[A]], [[B]], [[C]], [[D]], splat (i1 true), ptr [[DST]]) ; CHECK-NEXT: ret void ; %interleaved.vec = tail call @llvm.vector.interleave4.nxv16i32( %a, %b, %c, %d) store %interleaved.vec, ptr %dst, align 4 ret void } define void @wide_interleave4(ptr %dst, %a, %b, %c, %d) { ; CHECK-LABEL: define void @wide_interleave4 ; CHECK-SAME: (ptr [[DST:%.*]], [[A:%.*]], [[B:%.*]], [[C:%.*]], [[D:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr , ptr [[DST]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[A]], i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[B]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[C]], i64 0) ; CHECK-NEXT: [[TMP5:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[D]], i64 0) ; CHECK-NEXT: call void @llvm.aarch64.sve.st4.nxv4i32( [[TMP2]], [[TMP3]], [[TMP4]], [[TMP5]], splat (i1 true), ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr , ptr [[DST]], i64 4 ; CHECK-NEXT: [[TMP7:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[A]], i64 4) ; CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[B]], i64 4) ; CHECK-NEXT: [[TMP9:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[C]], i64 4) ; CHECK-NEXT: [[TMP10:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[D]], i64 4) ; CHECK-NEXT: call void @llvm.aarch64.sve.st4.nxv4i32( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], splat (i1 true), ptr [[TMP6]]) ; CHECK-NEXT: ret void ; %interleaved.vec = tail call @llvm.vector.interleave4.nxv32i32( %a, %b, %c, %d) store %interleaved.vec, ptr %dst, align 4 ret void } define void @mix_interleave4_interleave2(ptr %dst1, ptr %dst2, %a, %b, %c, %d) { ; CHECK-LABEL: define void @mix_interleave4_interleave2 ; CHECK-SAME: (ptr [[DST1:%.*]], ptr [[DST2:%.*]], [[A:%.*]], [[B:%.*]], [[C:%.*]], [[D:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: call void @llvm.aarch64.sve.st4.nxv4i32( [[A]], [[B]], [[C]], [[D]], splat (i1 true), ptr [[DST1]]) ; CHECK-NEXT: call void @llvm.aarch64.sve.st2.nxv4i32( [[A]], [[C]], splat (i1 true), ptr [[DST2]]) ; CHECK-NEXT: ret void ; %interleaved.vec = tail call @llvm.vector.interleave4.nxv16i32( %a, %b, %c, %d) store %interleaved.vec, ptr %dst1, align 4 %interleaved = tail call @llvm.vector.interleave2.nxv8i32( %a, %c) store %interleaved, ptr %dst2, align 4 ret void } ; This case tests when the interleave is using same parameter twice, ; the dead parameter will not get deleted twice. define void @duplicate_by_interleave( %A, %B, ptr writeonly %AB_duplicate) { ; CHECK-LABEL: define void @duplicate_by_interleave ; CHECK-SAME: ( [[A:%.*]], [[B:%.*]], ptr writeonly [[AB_DUPLICATE:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: call void @llvm.aarch64.sve.st4.nxv4i32( [[A]], [[A]], [[B]], [[B]], splat (i1 true), ptr [[AB_DUPLICATE]]) ; CHECK-NEXT: ret void ; %interleave = tail call @llvm.vector.interleave4.nxv16i32( %A, %A, %B, %B) store %interleave, ptr %AB_duplicate, align 4 ret void }