; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 ; RUN: opt -S -passes=instcombine < %s | FileCheck %s target triple = "aarch64-unknown-linux-gnu" ; DUPQ b8 define @dupq_b_0() #0 { ; CHECK-LABEL: define @dupq_b_0( ; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: ret zeroinitializer ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) %2 = tail call @llvm.vector.insert.nxv16i8.v16i8( poison, <16 x i8> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv16i8( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.wide.nxv16i8( %1, %3, %4) ret %5 } define @dupq_b_d() #0 { ; CHECK-LABEL: define @dupq_b_d( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv2i1( splat (i1 true)) ; CHECK-NEXT: ret [[TMP2]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) %2 = tail call @llvm.vector.insert.nxv16i8.v16i8( poison, <16 x i8> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv16i8( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.wide.nxv16i8( %1, %3, %4) ret %5 } define @dupq_b_w() #0 { ; CHECK-LABEL: define @dupq_b_w( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv4i1( splat (i1 true)) ; CHECK-NEXT: ret [[TMP2]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) %2 = tail call @llvm.vector.insert.nxv16i8.v16i8( poison, <16 x i8> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv16i8( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.wide.nxv16i8( %1, %3, %4) ret %5 } define @dupq_b_h() #0 { ; CHECK-LABEL: define @dupq_b_h( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv8i1( splat (i1 true)) ; CHECK-NEXT: ret [[TMP2]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) %2 = tail call @llvm.vector.insert.nxv16i8.v16i8( poison, <16 x i8> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv16i8( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.wide.nxv16i8( %1, %3, %4) ret %5 } define @dupq_b_b() #0 { ; CHECK-LABEL: define @dupq_b_b( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: ret splat (i1 true) ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) %2 = tail call @llvm.vector.insert.nxv16i8.v16i8( poison, <16 x i8> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv16i8( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.wide.nxv16i8( %1, %3, %4) ret %5 } ; DUPQ b16 define @dupq_h_0() #0 { ; CHECK-LABEL: define @dupq_h_0( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: ret zeroinitializer ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) %2 = tail call @llvm.vector.insert.nxv8i16.v8i16( poison, <8 x i16> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv8i16( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.wide.nxv8i16( %1, %3, %4) ret %5 } define @dupq_h_d() #0 { ; CHECK-LABEL: define @dupq_h_d( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv2i1( splat (i1 true)) ; CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[TMP1]]) ; CHECK-NEXT: ret [[TMP3]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) %2 = tail call @llvm.vector.insert.nxv8i16.v8i16( poison, <8 x i16> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv8i16( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.wide.nxv8i16( %1, %3, %4) ret %5 } define @dupq_h_w() #0 { ; CHECK-LABEL: define @dupq_h_w( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv4i1( splat (i1 true)) ; CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[TMP1]]) ; CHECK-NEXT: ret [[TMP3]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) %2 = tail call @llvm.vector.insert.nxv8i16.v8i16( poison, <8 x i16> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv8i16( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.wide.nxv8i16( %1, %3, %4) ret %5 } define @dupq_h_h() #0 { ; CHECK-LABEL: define @dupq_h_h( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: ret splat (i1 true) ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) %2 = tail call @llvm.vector.insert.nxv8i16.v8i16( poison, <8 x i16> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv8i16( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.wide.nxv8i16( %1, %3, %4) ret %5 } ; DUPQ b32 define @dupq_w_0() #0 { ; CHECK-LABEL: define @dupq_w_0( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: ret zeroinitializer ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %2 = tail call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.wide.nxv4i32( %1, %3, %4) ret %5 } define @dupq_w_d() #0 { ; CHECK-LABEL: define @dupq_w_d( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv2i1( splat (i1 true)) ; CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[TMP1]]) ; CHECK-NEXT: ret [[TMP3]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %2 = tail call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.wide.nxv4i32( %1, %3, %4) ret %5 } define @dupq_w_w() #0 { ; CHECK-LABEL: define @dupq_w_w( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: ret splat (i1 true) ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %2 = tail call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.wide.nxv4i32( %1, %3, %4) ret %5 } ; DUPQ b64 define @dupq_d_0() #0 { ; CHECK-LABEL: define @dupq_d_0( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: ret zeroinitializer ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %2 = tail call @llvm.vector.insert.nxv2i64.v2i64( poison, <2 x i64> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.nxv2i64( %1, %3, %4) ret %5 } define @dupq_d_d() #0 { ; CHECK-LABEL: define @dupq_d_d( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: ret splat (i1 true) ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %2 = tail call @llvm.vector.insert.nxv2i64.v2i64( poison, <2 x i64> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.nxv2i64( %1, %3, %4) ret %5 } ; Cases that cannot be converted define @dupq_neg1() #0 { ; CHECK-LABEL: define @dupq_neg1( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2i64.v2i64( poison, <2 x i64> , i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.cmpne.nxv2i64( splat (i1 true), [[TMP3]], zeroinitializer) ; CHECK-NEXT: ret [[TMP4]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %2 = tail call @llvm.vector.insert.nxv2i64.v2i64( poison, <2 x i64> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.nxv2i64( %1, %3, %4) ret %5 } define @dupq_neg2() #0 { ; CHECK-LABEL: define @dupq_neg2( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> , i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.cmpne.wide.nxv4i32( splat (i1 true), [[TMP3]], zeroinitializer) ; CHECK-NEXT: ret [[TMP4]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %2 = tail call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.wide.nxv4i32( %1, %3, %4) ret %5 } define @dupq_neg3() #0 { ; CHECK-LABEL: define @dupq_neg3( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> , i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.cmpne.wide.nxv4i32( splat (i1 true), [[TMP3]], zeroinitializer) ; CHECK-NEXT: ret [[TMP4]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %2 = tail call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.wide.nxv4i32( %1, %3, %4) ret %5 } define @dupq_neg4() #0 { ; CHECK-LABEL: define @dupq_neg4( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> , i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.cmpne.wide.nxv4i32( splat (i1 true), [[TMP3]], zeroinitializer) ; CHECK-NEXT: ret [[TMP4]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %2 = tail call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.wide.nxv4i32( %1, %3, %4) ret %5 } define @dupq_neg5() #0 { ; CHECK-LABEL: define @dupq_neg5( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> , i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.cmpne.wide.nxv4i32( splat (i1 true), [[TMP3]], zeroinitializer) ; CHECK-NEXT: ret [[TMP4]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %2 = tail call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.wide.nxv4i32( %1, %3, %4) ret %5 } define @dupq_neg6(i1 %a) #0 { ; CHECK-LABEL: define @dupq_neg6( ; CHECK-SAME: i1 [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[A]] to i32 ; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> , i32 [[TMP2]], i64 3 ; CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> [[TMP3]], i64 0) ; CHECK-NEXT: [[TMP5:%.*]] = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP4]], i64 0) ; CHECK-NEXT: [[TMP6:%.*]] = tail call @llvm.aarch64.sve.cmpne.wide.nxv4i32( splat (i1 true), [[TMP5]], zeroinitializer) ; CHECK-NEXT: ret [[TMP6]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %2 = zext i1 %a to i32 %3 = insertelement <4 x i32> , i32 %2, i32 3 %4 = tail call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> %3, i64 0) %5 = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( %4 , i64 0) %6 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %7 = tail call @llvm.aarch64.sve.cmpne.wide.nxv4i32( %1, %5, %6) ret %7 } define @dupq_neg7() #0 { ; CHECK-LABEL: define @dupq_neg7( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2i64.v2i64( poison, <2 x i64> splat (i64 1), i64 2) ; CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.cmpne.nxv2i64( splat (i1 true), [[TMP3]], zeroinitializer) ; CHECK-NEXT: ret [[TMP4]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %2 = tail call @llvm.vector.insert.nxv2i64.v2i64( poison, <2 x i64> , i64 2) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.nxv2i64( %1, %3, %4) ret %5 } define @dupq_neg8() #0 { ; CHECK-LABEL: define @dupq_neg8( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2i64.v2i64( poison, <2 x i64> splat (i64 1), i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP2]], i64 1) ; CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.cmpne.nxv2i64( splat (i1 true), [[TMP3]], zeroinitializer) ; CHECK-NEXT: ret [[TMP4]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %2 = tail call @llvm.vector.insert.nxv2i64.v2i64( poison, <2 x i64> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %2 , i64 1) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.nxv2i64( %1, %3, %4) ret %5 } define @dupq_neg9( %x) #0 { ; CHECK-LABEL: define @dupq_neg9( ; CHECK-SAME: [[X:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2i64.v2i64( [[X]], <2 x i64> splat (i64 1), i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.cmpne.nxv2i64( splat (i1 true), [[TMP3]], zeroinitializer) ; CHECK-NEXT: ret [[TMP4]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %2 = tail call @llvm.vector.insert.nxv2i64.v2i64( %x, <2 x i64> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.nxv2i64( %1, %3, %4) ret %5 } define @dupq_neg10() #0 { ; CHECK-LABEL: define @dupq_neg10( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2i64.v2i64( poison, <2 x i64> splat (i64 1), i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.cmpne.nxv2i64( splat (i1 true), [[TMP3]], splat (i64 1)) ; CHECK-NEXT: ret [[TMP4]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %2 = tail call @llvm.vector.insert.nxv2i64.v2i64( poison, <2 x i64> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 1) %5 = tail call @llvm.aarch64.sve.cmpne.nxv2i64( %1, %3, %4) ret %5 } define @dupq_neg11( %pg) #0 { ; CHECK-LABEL: define @dupq_neg11( ; CHECK-SAME: [[PG:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.insert.nxv2i64.v2i64( poison, <2 x i64> splat (i64 1), i64 0) ; CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP1]], i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.cmpne.nxv2i64( [[PG]], [[TMP2]], zeroinitializer) ; CHECK-NEXT: ret [[TMP3]] ; %1 = tail call @llvm.vector.insert.nxv2i64.v2i64( poison, <2 x i64> , i64 0) %2 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %1 , i64 0) %3 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %4 = tail call @llvm.aarch64.sve.cmpne.nxv2i64( %pg, %2, %3) ret %4 } define @dupq_neg12() #0 { ; CHECK-LABEL: define @dupq_neg12( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 15) ; CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2i64.v2i64( poison, <2 x i64> splat (i64 1), i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.cmpne.nxv2i64( [[TMP1]], [[TMP3]], zeroinitializer) ; CHECK-NEXT: ret [[TMP4]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 15) %2 = tail call @llvm.vector.insert.nxv2i64.v2i64( poison, <2 x i64> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.nxv2i64( %1, %3, %4) ret %5 } define @dupq_neg13( %x) #0 { ; CHECK-LABEL: define @dupq_neg13( ; CHECK-SAME: [[X:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2i64.v2i64( poison, <2 x i64> splat (i64 1), i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.cmpne.nxv2i64( splat (i1 true), [[TMP3]], [[X]]) ; CHECK-NEXT: ret [[TMP4]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %2 = tail call @llvm.vector.insert.nxv2i64.v2i64( poison, <2 x i64> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.cmpne.nxv2i64( %1, %3, %x) ret %4 } define @dupq_b_idx(i64 %idx) #0 { ; CHECK-LABEL: define @dupq_b_idx( ; CHECK-SAME: i64 [[IDX:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv16i8.v16i8( poison, <16 x i8> zeroinitializer, i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP2]], i64 [[IDX]]) ; CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.cmpne.wide.nxv16i8( splat (i1 true), [[TMP3]], zeroinitializer) ; CHECK-NEXT: ret [[TMP4]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) %2 = tail call @llvm.vector.insert.nxv16i8.v16i8( poison, <16 x i8> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv16i8( %2 , i64 %idx) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %5 = tail call @llvm.aarch64.sve.cmpne.wide.nxv16i8( %1, %3, %4) ret %5 } declare @llvm.aarch64.sve.ptrue.nxv16i1(i32) declare @llvm.aarch64.sve.ptrue.nxv8i1(i32) declare @llvm.aarch64.sve.ptrue.nxv4i1(i32) declare @llvm.aarch64.sve.ptrue.nxv2i1(i32) declare @llvm.vector.insert.nxv16i8.v16i8(, <16 x i8>, i64) declare @llvm.vector.insert.nxv8i16.v8i16(, <8 x i16>, i64) declare @llvm.vector.insert.nxv4i32.v4i32(, <4 x i32>, i64) declare @llvm.vector.insert.nxv2i64.v2i64(, <2 x i64>, i64) declare @llvm.aarch64.sve.dupq.lane.nxv16i8(, i64) declare @llvm.aarch64.sve.dupq.lane.nxv8i16(, i64) declare @llvm.aarch64.sve.dupq.lane.nxv4i32(, i64) declare @llvm.aarch64.sve.dupq.lane.nxv2i64(, i64) declare @llvm.aarch64.sve.cmpne.wide.nxv16i8(, , ) declare @llvm.aarch64.sve.cmpne.wide.nxv8i16(, , ) declare @llvm.aarch64.sve.cmpne.wide.nxv4i32(, , ) declare @llvm.aarch64.sve.cmpne.nxv2i64(, , ) declare @llvm.aarch64.sve.dup.x.nxv2i64(i64) attributes #0 = { "target-features"="+sve" }