; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -S -passes=instcombine %s | FileCheck %s ; RUN: opt -S -passes=instcombine %s \ ; RUN: -use-constant-int-for-fixed-length-splat \ ; RUN -use-constant-fp-for-fixed-length-splat \ ; RUN: -use-constant-int-for-scalable-splat \ ; RUN: -use-constant-fp-for-scalable-splat | FileCheck %s define @insert_div() { ; CHECK-LABEL: @insert_div( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[DIV:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> splat (i32 3), i64 0) ; CHECK-NEXT: ret [[DIV]] ; entry: %0 = call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> splat (i32 9), i64 0) %div = udiv %0, splat (i32 3) ret %div } define @insert_div_splat_lhs() { ; CHECK-LABEL: @insert_div_splat_lhs( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[DIV:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( splat (i32 5), <4 x i32> splat (i32 2), i64 0) ; CHECK-NEXT: ret [[DIV]] ; entry: %0 = call @llvm.vector.insert.nxv4i32.v4i32( splat(i32 2), <4 x i32> splat (i32 5), i64 0) %div = udiv splat (i32 10), %0 ret %div } define @insert_div_mixed_splat() { ; CHECK-LABEL: @insert_div_mixed_splat( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[DIV:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( splat (i32 6), <4 x i32> splat (i32 3), i64 0) ; CHECK-NEXT: ret [[DIV]] ; entry: %0 = call @llvm.vector.insert.nxv4i32.v4i32( splat (i32 18), <4 x i32> splat (i32 9), i64 0) %div = udiv %0, splat (i32 3) ret %div } define @insert_mul() { ; CHECK-LABEL: @insert_mul( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[MUL:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> splat (i32 7), i64 4) ; CHECK-NEXT: ret [[MUL]] ; entry: %0 = call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> splat (i32 1), i64 4) %mul = mul %0, splat (i32 7) ret %mul } define @insert_add() { ; CHECK-LABEL: @insert_add( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ADD:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> splat (i32 16), i64 0) ; CHECK-NEXT: ret [[ADD]] ; entry: %0 = call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> splat (i32 5), i64 0) %add = add %0, splat (i32 11) ret %add } define @insert_add_non_splat_subvector() { ; CHECK-LABEL: @insert_add_non_splat_subvector( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ADD:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> , i64 0) ; CHECK-NEXT: ret [[ADD]] ; entry: %0 = call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> , i64 0) %add = add %0, splat (i32 100) ret %add } define @insert_add_fp() { ; CHECK-LABEL: @insert_add_fp( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ADD:%.*]] = call @llvm.vector.insert.nxv4f32.v4f32( splat (float 6.250000e+00), <4 x float> splat (float 5.500000e+00), i64 0) ; CHECK-NEXT: ret [[ADD]] ; entry: %0 = call @llvm.vector.insert.nxv4f32.v4f32( splat(float 1.25), <4 x float> splat (float 0.5), i64 0) %add = fadd %0, splat (float 5.0) ret %add } define @insert_add_scalable_subvector() { ; CHECK-LABEL: @insert_add_scalable_subvector( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ADD:%.*]] = call @llvm.vector.insert.nxv8i32.nxv4i32( splat (i32 20), splat (i32 -4), i64 0) ; CHECK-NEXT: ret [[ADD]] ; entry: %0 = call @llvm.vector.insert.nxv8i32.nxv4i32( splat(i32 16), splat (i32 -8), i64 0) %add = add %0, splat (i32 4) ret %add } define @insert_sub() { ; CHECK-LABEL: @insert_sub( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[SUB:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> zeroinitializer, i64 8) ; CHECK-NEXT: ret [[SUB]] ; entry: %0 = call @llvm.vector.insert.nxv4i32.v4i32( poison, <4 x i32> splat (i32 11), i64 8) %sub = add %0, splat (i32 -11) ret %sub } define @insert_and_partially_undef() { ; CHECK-LABEL: @insert_and_partially_undef( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[AND:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( zeroinitializer, <4 x i32> splat (i32 4), i64 0) ; CHECK-NEXT: ret [[AND]] ; entry: %0 = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> splat (i32 6), i64 0) %and = and %0, splat (i32 4) ret %and } define @insert_fold_chain() { ; CHECK-LABEL: @insert_fold_chain( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ADD:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( splat (i32 11), <4 x i32> splat (i32 8), i64 0) ; CHECK-NEXT: ret [[ADD]] ; entry: %0 = call @llvm.vector.insert.nxv4i32.v4i32( splat (i32 21), <4 x i32> splat (i32 12), i64 0) %div = udiv %0, splat (i32 3) %add = add %div, splat (i32 4) ret %add } ; TODO: This could be folded more. define @insert_add_both_insert_vector() { ; CHECK-LABEL: @insert_add_both_insert_vector( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( splat (i32 10), <4 x i32> splat (i32 5), i64 0) ; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( splat (i32 -1), <4 x i32> splat (i32 2), i64 0) ; CHECK-NEXT: [[ADD:%.*]] = add [[TMP0]], [[TMP1]] ; CHECK-NEXT: ret [[ADD]] ; entry: %0 = call @llvm.vector.insert.nxv4i32.v4i32( splat(i32 10), <4 x i32> splat (i32 5), i64 0) %1 = call @llvm.vector.insert.nxv4i32.v4i32( splat(i32 -1), <4 x i32> splat (i32 2), i64 0) %add = add %0, %1 ret %add }