diff options
author | ZhaoQi <zhaoqi01@loongson.cn> | 2025-07-26 14:24:39 +0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2025-07-26 14:24:39 +0800 |
commit | f2a4cc1dd0c43ffe1756a158150eeeacb75daf28 (patch) | |
tree | 5ebdc9871995339685834aaa1c8fc9ead6908e4c | |
parent | dd4ebe6514a9250d10004cdf8876fca7394997d2 (diff) | |
download | llvm-f2a4cc1dd0c43ffe1756a158150eeeacb75daf28.zip llvm-f2a4cc1dd0c43ffe1756a158150eeeacb75daf28.tar.gz llvm-f2a4cc1dd0c43ffe1756a158150eeeacb75daf28.tar.bz2 |
[LoongArch] Avoid expanding build_vector containing insertion of undef elements (#150377)
-rw-r--r-- | llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp | 31 | ||||
-rw-r--r-- | llvm/test/CodeGen/LoongArch/lasx/build-vector.ll | 151 | ||||
-rw-r--r-- | llvm/test/CodeGen/LoongArch/lasx/xvmskcond.ll | 45 | ||||
-rw-r--r-- | llvm/test/CodeGen/LoongArch/llvm.exp10.ll | 6 | ||||
-rw-r--r-- | llvm/test/CodeGen/LoongArch/llvm.sincos.ll | 54 | ||||
-rw-r--r-- | llvm/test/CodeGen/LoongArch/lsx/build-vector.ll | 41 |
6 files changed, 146 insertions, 182 deletions
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp index e915a3c4..613cfb5 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -2385,19 +2385,9 @@ SDValue LoongArchTargetLowering::lowerBF16_TO_FP(SDValue Op, return Res; } -static bool isConstantOrUndef(const SDValue Op) { - if (Op->isUndef()) - return true; - if (isa<ConstantSDNode>(Op)) - return true; - if (isa<ConstantFPSDNode>(Op)) - return true; - return false; -} - -static bool isConstantOrUndefBUILD_VECTOR(const BuildVectorSDNode *Op) { +static bool isConstantBUILD_VECTOR(const BuildVectorSDNode *Op) { for (unsigned i = 0; i < Op->getNumOperands(); ++i) - if (isConstantOrUndef(Op->getOperand(i))) + if (isIntOrFPConstant(Op->getOperand(i))) return true; return false; } @@ -2505,20 +2495,23 @@ SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op, if (DAG.isSplatValue(Op, /*AllowUndefs=*/false)) return Op; - if (!isConstantOrUndefBUILD_VECTOR(Node)) { + if (!isConstantBUILD_VECTOR(Node)) { // Use INSERT_VECTOR_ELT operations rather than expand to stores. // The resulting code is the same length as the expansion, but it doesn't // use memory operations. - EVT ResTy = Node->getValueType(0); - assert(ResTy.isVector()); unsigned NumElts = ResTy.getVectorNumElements(); - SDValue Vector = - DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ResTy, Node->getOperand(0)); + SDValue Op0 = Node->getOperand(0); + SDValue Vector = DAG.getUNDEF(ResTy); + + if (!Op0.isUndef()) + Vector = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ResTy, Op0); for (unsigned i = 1; i < NumElts; ++i) { - Vector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ResTy, Vector, - Node->getOperand(i), + SDValue Opi = Node->getOperand(i); + if (Opi.isUndef()) + continue; + Vector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ResTy, Vector, Opi, DAG.getConstant(i, DL, Subtarget.getGRLenVT())); } return Vector; diff --git a/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll b/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll index 8aa38f0..5130865 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll @@ -355,40 +355,46 @@ entry: define void @buildvector_v32i8_partial(ptr %dst, i8 %a0, i8 %a1, i8 %a2, i8 %a5, i8 %a7, i8 %a8, i8 %a15, i8 %a17, i8 %a18, i8 %a20, i8 %a22, i8 %a23, i8 %a27, i8 %a28, i8 %a31) nounwind { ; CHECK-LABEL: buildvector_v32i8_partial: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi.d $sp, $sp, -96 -; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill -; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill -; CHECK-NEXT: addi.d $fp, $sp, 96 -; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0 -; CHECK-NEXT: ld.b $t0, $fp, 0 -; CHECK-NEXT: ld.b $t1, $fp, 8 -; CHECK-NEXT: ld.b $t2, $fp, 16 -; CHECK-NEXT: ld.b $t3, $fp, 24 -; CHECK-NEXT: ld.b $t4, $fp, 56 -; CHECK-NEXT: ld.b $t5, $fp, 48 -; CHECK-NEXT: ld.b $t6, $fp, 40 -; CHECK-NEXT: ld.b $t7, $fp, 32 -; CHECK-NEXT: st.b $t4, $sp, 63 -; CHECK-NEXT: st.b $t5, $sp, 60 -; CHECK-NEXT: st.b $t6, $sp, 59 -; CHECK-NEXT: st.b $t7, $sp, 55 -; CHECK-NEXT: st.b $t3, $sp, 54 -; CHECK-NEXT: st.b $t2, $sp, 52 -; CHECK-NEXT: st.b $t1, $sp, 50 -; CHECK-NEXT: st.b $t0, $sp, 49 -; CHECK-NEXT: st.b $a7, $sp, 47 -; CHECK-NEXT: st.b $a6, $sp, 40 -; CHECK-NEXT: st.b $a5, $sp, 39 -; CHECK-NEXT: st.b $a4, $sp, 37 -; CHECK-NEXT: st.b $a3, $sp, 34 -; CHECK-NEXT: st.b $a2, $sp, 33 -; CHECK-NEXT: st.b $a1, $sp, 32 -; CHECK-NEXT: xvld $xr0, $sp, 32 +; CHECK-NEXT: ld.b $t0, $sp, 56 +; CHECK-NEXT: ld.b $t1, $sp, 48 +; CHECK-NEXT: ld.b $t2, $sp, 40 +; CHECK-NEXT: ld.b $t3, $sp, 32 +; CHECK-NEXT: ld.b $t4, $sp, 24 +; CHECK-NEXT: ld.b $t5, $sp, 16 +; CHECK-NEXT: ld.b $t6, $sp, 8 +; CHECK-NEXT: ld.b $t7, $sp, 0 +; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 0 +; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 1 +; CHECK-NEXT: vinsgr2vr.b $vr0, $a3, 2 +; CHECK-NEXT: vinsgr2vr.b $vr0, $a4, 5 +; CHECK-NEXT: vinsgr2vr.b $vr0, $a5, 7 +; CHECK-NEXT: vinsgr2vr.b $vr0, $a6, 8 +; CHECK-NEXT: vinsgr2vr.b $vr0, $a7, 15 +; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14 +; CHECK-NEXT: vinsgr2vr.b $vr1, $t7, 1 +; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2 +; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14 +; CHECK-NEXT: vinsgr2vr.b $vr1, $t6, 2 +; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2 +; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14 +; CHECK-NEXT: vinsgr2vr.b $vr1, $t5, 4 +; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2 +; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14 +; CHECK-NEXT: vinsgr2vr.b $vr1, $t4, 6 +; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2 +; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14 +; CHECK-NEXT: vinsgr2vr.b $vr1, $t3, 7 +; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2 +; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14 +; CHECK-NEXT: vinsgr2vr.b $vr1, $t2, 11 +; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2 +; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14 +; CHECK-NEXT: vinsgr2vr.b $vr1, $t1, 12 +; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2 +; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14 +; CHECK-NEXT: vinsgr2vr.b $vr1, $t0, 15 +; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2 ; CHECK-NEXT: xvst $xr0, $a0, 0 -; CHECK-NEXT: addi.d $sp, $fp, -96 -; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload -; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload -; CHECK-NEXT: addi.d $sp, $sp, 96 ; CHECK-NEXT: ret entry: %ins0 = insertelement <32 x i8> undef, i8 %a0, i32 0 @@ -581,24 +587,18 @@ entry: define void @buildvector_v16i16_partial(ptr %dst, i16 %a0, i16 %a2, i16 %a5, i16 %a6, i16 %a7, i16 %a12, i16 %a13) nounwind { ; CHECK-LABEL: buildvector_v16i16_partial: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi.d $sp, $sp, -96 -; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill -; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill -; CHECK-NEXT: addi.d $fp, $sp, 96 -; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0 -; CHECK-NEXT: st.h $a7, $sp, 58 -; CHECK-NEXT: st.h $a6, $sp, 56 -; CHECK-NEXT: st.h $a5, $sp, 46 -; CHECK-NEXT: st.h $a4, $sp, 44 -; CHECK-NEXT: st.h $a3, $sp, 42 -; CHECK-NEXT: st.h $a2, $sp, 36 -; CHECK-NEXT: st.h $a1, $sp, 32 -; CHECK-NEXT: xvld $xr0, $sp, 32 +; CHECK-NEXT: vinsgr2vr.h $vr0, $a1, 0 +; CHECK-NEXT: vinsgr2vr.h $vr0, $a2, 2 +; CHECK-NEXT: vinsgr2vr.h $vr0, $a3, 5 +; CHECK-NEXT: vinsgr2vr.h $vr0, $a4, 6 +; CHECK-NEXT: vinsgr2vr.h $vr0, $a5, 7 +; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14 +; CHECK-NEXT: vinsgr2vr.h $vr1, $a6, 4 +; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2 +; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14 +; CHECK-NEXT: vinsgr2vr.h $vr1, $a7, 5 +; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2 ; CHECK-NEXT: xvst $xr0, $a0, 0 -; CHECK-NEXT: addi.d $sp, $fp, -96 -; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload -; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload -; CHECK-NEXT: addi.d $sp, $sp, 96 ; CHECK-NEXT: ret entry: %ins0 = insertelement <16 x i16> undef, i16 %a0, i32 0 @@ -702,21 +702,11 @@ entry: define void @buildvector_v8i32_partial(ptr %dst, i32 %a2, i32 %a4, i32 %a5, i32 %a6) nounwind { ; CHECK-LABEL: buildvector_v8i32_partial: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi.d $sp, $sp, -96 -; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill -; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill -; CHECK-NEXT: addi.d $fp, $sp, 96 -; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0 -; CHECK-NEXT: st.w $a4, $sp, 56 -; CHECK-NEXT: st.w $a3, $sp, 52 -; CHECK-NEXT: st.w $a2, $sp, 48 -; CHECK-NEXT: st.w $a1, $sp, 40 -; CHECK-NEXT: xvld $xr0, $sp, 32 +; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 2 +; CHECK-NEXT: xvinsgr2vr.w $xr0, $a2, 4 +; CHECK-NEXT: xvinsgr2vr.w $xr0, $a3, 5 +; CHECK-NEXT: xvinsgr2vr.w $xr0, $a4, 6 ; CHECK-NEXT: xvst $xr0, $a0, 0 -; CHECK-NEXT: addi.d $sp, $fp, -96 -; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload -; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload -; CHECK-NEXT: addi.d $sp, $sp, 96 ; CHECK-NEXT: ret entry: %ins0 = insertelement <8 x i32> undef, i32 undef, i32 0 @@ -787,11 +777,8 @@ entry: define void @buildvector_v4i64_partial(ptr %dst, i64 %a1, i64 %a2) nounwind { ; CHECK-LABEL: buildvector_v4i64_partial: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a2, 0 -; CHECK-NEXT: xvpermi.d $xr0, $xr0, 68 -; CHECK-NEXT: xvinsgr2vr.d $xr1, $a1, 0 -; CHECK-NEXT: xvpermi.d $xr1, $xr1, 68 -; CHECK-NEXT: xvpackev.d $xr0, $xr1, $xr0 +; CHECK-NEXT: xvinsgr2vr.d $xr0, $a1, 1 +; CHECK-NEXT: xvinsgr2vr.d $xr0, $a2, 2 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -867,21 +854,15 @@ entry: define void @buildvector_v8f32_partial(ptr %dst, float %a1, float %a2, float %a5, float %a7) nounwind { ; CHECK-LABEL: buildvector_v8f32_partial: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi.d $sp, $sp, -96 -; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill -; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill -; CHECK-NEXT: addi.d $fp, $sp, 96 -; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0 -; CHECK-NEXT: fst.s $fa3, $sp, 60 -; CHECK-NEXT: fst.s $fa2, $sp, 52 -; CHECK-NEXT: fst.s $fa1, $sp, 40 -; CHECK-NEXT: fst.s $fa0, $sp, 36 -; CHECK-NEXT: xvld $xr0, $sp, 32 +; CHECK-NEXT: # kill: def $f3 killed $f3 def $xr3 +; CHECK-NEXT: # kill: def $f2 killed $f2 def $xr2 +; CHECK-NEXT: # kill: def $f1 killed $f1 def $xr1 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr0, 1 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 2 +; CHECK-NEXT: xvinsve0.w $xr0, $xr2, 5 +; CHECK-NEXT: xvinsve0.w $xr0, $xr3, 7 ; CHECK-NEXT: xvst $xr0, $a0, 0 -; CHECK-NEXT: addi.d $sp, $fp, -96 -; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload -; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload -; CHECK-NEXT: addi.d $sp, $sp, 96 ; CHECK-NEXT: ret entry: %ins0 = insertelement <8 x float> undef, float undef, i32 0 @@ -960,9 +941,7 @@ define void @buildvector_v4f64_partial(ptr %dst, double %a0, double %a3) nounwin ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $f1_64 killed $f1_64 def $xr1 ; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 -; CHECK-NEXT: xvpermi.d $xr0, $xr0, 68 -; CHECK-NEXT: xvpermi.d $xr1, $xr1, 68 -; CHECK-NEXT: xvpackev.d $xr0, $xr1, $xr0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 3 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/LoongArch/lasx/xvmskcond.ll b/llvm/test/CodeGen/LoongArch/lasx/xvmskcond.ll index c61b784..06d4a5d 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/xvmskcond.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/xvmskcond.ll @@ -524,9 +524,8 @@ define i8 @xvmsk_eq_v2i64_concat_poison(<2 x i64> %vec) { ; CHECK-NEXT: vpickve2gr.d $a0, $vr0, 0 ; CHECK-NEXT: vinsgr2vr.h $vr1, $a0, 0 ; CHECK-NEXT: vpickve2gr.d $a0, $vr0, 1 -; CHECK-NEXT: vinsgr2vr.h $vr0, $a0, 0 -; CHECK-NEXT: vpackev.h $vr0, $vr0, $vr1 -; CHECK-NEXT: vslli.h $vr0, $vr0, 15 +; CHECK-NEXT: vinsgr2vr.h $vr1, $a0, 1 +; CHECK-NEXT: vslli.h $vr0, $vr1, 15 ; CHECK-NEXT: vmskltz.h $vr0, $vr0 ; CHECK-NEXT: vpickve2gr.hu $a0, $vr0, 0 ; CHECK-NEXT: ret @@ -539,24 +538,20 @@ define i8 @xvmsk_eq_v2i64_concat_poison(<2 x i64> %vec) { define i8 @xvmsk_ne_v4i32_concat_poison(<4 x i32> %vec) { ; CHECK-LABEL: xvmsk_ne_v4i32_concat_poison: ; CHECK: # %bb.0: -; CHECK-NEXT: addi.d $sp, $sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: vseqi.w $vr0, $vr0, 0 ; CHECK-NEXT: vrepli.b $vr1, -1 ; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1 -; CHECK-NEXT: vpickve2gr.w $a0, $vr0, 3 -; CHECK-NEXT: st.h $a0, $sp, 6 -; CHECK-NEXT: vpickve2gr.w $a0, $vr0, 2 -; CHECK-NEXT: st.h $a0, $sp, 4 -; CHECK-NEXT: vpickve2gr.w $a0, $vr0, 1 -; CHECK-NEXT: st.h $a0, $sp, 2 ; CHECK-NEXT: vpickve2gr.w $a0, $vr0, 0 -; CHECK-NEXT: st.h $a0, $sp, 0 -; CHECK-NEXT: vld $vr0, $sp, 0 -; CHECK-NEXT: vslli.h $vr0, $vr0, 15 +; CHECK-NEXT: vinsgr2vr.h $vr1, $a0, 0 +; CHECK-NEXT: vpickve2gr.w $a0, $vr0, 1 +; CHECK-NEXT: vinsgr2vr.h $vr1, $a0, 1 +; CHECK-NEXT: vpickve2gr.w $a0, $vr0, 2 +; CHECK-NEXT: vinsgr2vr.h $vr1, $a0, 2 +; CHECK-NEXT: vpickve2gr.w $a0, $vr0, 3 +; CHECK-NEXT: vinsgr2vr.h $vr1, $a0, 3 +; CHECK-NEXT: vslli.h $vr0, $vr1, 15 ; CHECK-NEXT: vmskltz.h $vr0, $vr0 ; CHECK-NEXT: vpickve2gr.hu $a0, $vr0, 0 -; CHECK-NEXT: addi.d $sp, $sp, 16 ; CHECK-NEXT: ret %tobool = icmp ne <4 x i32> %vec, zeroinitializer %insertvec = shufflevector <4 x i1> %tobool, <4 x i1> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison> @@ -567,23 +562,19 @@ define i8 @xvmsk_ne_v4i32_concat_poison(<4 x i32> %vec) { define i8 @xvmsk_ogt_v4f64_concat_poison(<4 x double> %vec) { ; CHECK-LABEL: xvmsk_ogt_v4f64_concat_poison: ; CHECK: # %bb.0: -; CHECK-NEXT: addi.d $sp, $sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: xvrepli.b $xr1, 0 ; CHECK-NEXT: xvfcmp.clt.d $xr0, $xr1, $xr0 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.d $a1, $xr0, 1 -; CHECK-NEXT: xvpickve2gr.d $a2, $xr0, 2 -; CHECK-NEXT: xvpickve2gr.d $a3, $xr0, 3 -; CHECK-NEXT: st.h $a3, $sp, 6 -; CHECK-NEXT: st.h $a2, $sp, 4 -; CHECK-NEXT: st.h $a1, $sp, 2 -; CHECK-NEXT: st.h $a0, $sp, 0 -; CHECK-NEXT: vld $vr0, $sp, 0 +; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3 +; CHECK-NEXT: xvpickve2gr.d $a1, $xr0, 2 +; CHECK-NEXT: xvpickve2gr.d $a2, $xr0, 1 +; CHECK-NEXT: xvpickve2gr.d $a3, $xr0, 0 +; CHECK-NEXT: vinsgr2vr.h $vr0, $a3, 0 +; CHECK-NEXT: vinsgr2vr.h $vr0, $a2, 1 +; CHECK-NEXT: vinsgr2vr.h $vr0, $a1, 2 +; CHECK-NEXT: vinsgr2vr.h $vr0, $a0, 3 ; CHECK-NEXT: vslli.h $vr0, $vr0, 15 ; CHECK-NEXT: vmskltz.h $vr0, $vr0 ; CHECK-NEXT: vpickve2gr.hu $a0, $vr0, 0 -; CHECK-NEXT: addi.d $sp, $sp, 16 ; CHECK-NEXT: ret %tobool = fcmp ogt <4 x double> %vec, zeroinitializer %insertvec = shufflevector <4 x i1> %tobool, <4 x i1> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison> diff --git a/llvm/test/CodeGen/LoongArch/llvm.exp10.ll b/llvm/test/CodeGen/LoongArch/llvm.exp10.ll index 62ea5cb..030b822c 100644 --- a/llvm/test/CodeGen/LoongArch/llvm.exp10.ll +++ b/llvm/test/CodeGen/LoongArch/llvm.exp10.ll @@ -137,20 +137,20 @@ define <2 x float> @exp10_v2f32(<2 x float> %x) #0 { ; LA64-NEXT: addi.d $sp, $sp, -48 ; LA64-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill ; LA64-NEXT: vst $vr0, $sp, 0 # 16-byte Folded Spill -; LA64-NEXT: vreplvei.w $vr0, $vr0, 0 +; LA64-NEXT: vreplvei.w $vr0, $vr0, 1 ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(exp10f) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 ; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload -; LA64-NEXT: vreplvei.w $vr0, $vr0, 1 +; LA64-NEXT: vreplvei.w $vr0, $vr0, 0 ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(exp10f) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 ; LA64-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload -; LA64-NEXT: vpackev.w $vr0, $vr0, $vr1 +; LA64-NEXT: vextrins.w $vr0, $vr1, 16 ; LA64-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload ; LA64-NEXT: addi.d $sp, $sp, 48 ; LA64-NEXT: ret diff --git a/llvm/test/CodeGen/LoongArch/llvm.sincos.ll b/llvm/test/CodeGen/LoongArch/llvm.sincos.ll index 383d63c..4ac38a9 100644 --- a/llvm/test/CodeGen/LoongArch/llvm.sincos.ll +++ b/llvm/test/CodeGen/LoongArch/llvm.sincos.ll @@ -350,7 +350,7 @@ define { <2 x float>, <2 x float> } @test_sincos_v2f32(<2 x float> %a) #0 { ; LA64-NEXT: addi.d $sp, $sp, -80 ; LA64-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill ; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill -; LA64-NEXT: vreplvei.w $vr0, $vr0, 0 +; LA64-NEXT: vreplvei.w $vr0, $vr0, 1 ; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(sinf) @@ -358,14 +358,14 @@ define { <2 x float>, <2 x float> } @test_sincos_v2f32(<2 x float> %a) #0 { ; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 ; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload -; LA64-NEXT: vreplvei.w $vr0, $vr0, 1 +; LA64-NEXT: vreplvei.w $vr0, $vr0, 0 ; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(sinf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 ; LA64-NEXT: vld $vr1, $sp, 32 # 16-byte Folded Reload -; LA64-NEXT: vpackev.w $vr0, $vr0, $vr1 +; LA64-NEXT: vextrins.w $vr0, $vr1, 16 ; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 @@ -377,9 +377,9 @@ define { <2 x float>, <2 x float> } @test_sincos_v2f32(<2 x float> %a) #0 { ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(cosf) ; LA64-NEXT: jirl $ra, $ra, 0 -; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 -; LA64-NEXT: vld $vr1, $sp, 48 # 16-byte Folded Reload -; LA64-NEXT: vpackev.w $vr1, $vr0, $vr1 +; LA64-NEXT: fmov.s $fa1, $fa0 +; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload +; LA64-NEXT: vextrins.w $vr1, $vr0, 16 ; LA64-NEXT: vld $vr0, $sp, 32 # 16-byte Folded Reload ; LA64-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload ; LA64-NEXT: addi.d $sp, $sp, 80 @@ -439,48 +439,60 @@ define { <3 x float>, <3 x float> } @test_sincos_v3f32(<3 x float> %a) #0 { ; ; LA64-LABEL: test_sincos_v3f32: ; LA64: # %bb.0: -; LA64-NEXT: addi.d $sp, $sp, -112 -; LA64-NEXT: st.d $ra, $sp, 104 # 8-byte Folded Spill +; LA64-NEXT: addi.d $sp, $sp, -96 +; LA64-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill ; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill -; LA64-NEXT: vreplvei.w $vr0, $vr0, 2 +; LA64-NEXT: vreplvei.w $vr0, $vr0, 1 ; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(sinf) ; LA64-NEXT: jirl $ra, $ra, 0 -; LA64-NEXT: fst.s $fa0, $sp, 88 +; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA64-NEXT: vst $vr0, $sp, 64 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload -; LA64-NEXT: vreplvei.w $vr0, $vr0, 1 +; LA64-NEXT: vreplvei.w $vr0, $vr0, 0 ; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(sinf) ; LA64-NEXT: jirl $ra, $ra, 0 -; LA64-NEXT: fst.s $fa0, $sp, 84 +; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA64-NEXT: vld $vr1, $sp, 64 # 16-byte Folded Reload +; LA64-NEXT: vextrins.w $vr0, $vr1, 16 +; LA64-NEXT: vst $vr0, $sp, 64 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload -; LA64-NEXT: vreplvei.w $vr0, $vr0, 0 +; LA64-NEXT: vreplvei.w $vr0, $vr0, 2 ; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(sinf) ; LA64-NEXT: jirl $ra, $ra, 0 -; LA64-NEXT: fst.s $fa0, $sp, 80 +; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA64-NEXT: vld $vr1, $sp, 64 # 16-byte Folded Reload +; LA64-NEXT: vextrins.w $vr1, $vr0, 32 +; LA64-NEXT: vst $vr1, $sp, 64 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 32 # 16-byte Folded Reload ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(cosf) ; LA64-NEXT: jirl $ra, $ra, 0 -; LA64-NEXT: fst.s $fa0, $sp, 72 +; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(cosf) ; LA64-NEXT: jirl $ra, $ra, 0 -; LA64-NEXT: fst.s $fa0, $sp, 68 +; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA64-NEXT: vld $vr1, $sp, 32 # 16-byte Folded Reload +; LA64-NEXT: vextrins.w $vr0, $vr1, 16 +; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(cosf) ; LA64-NEXT: jirl $ra, $ra, 0 -; LA64-NEXT: fst.s $fa0, $sp, 64 -; LA64-NEXT: vld $vr0, $sp, 80 -; LA64-NEXT: vld $vr1, $sp, 64 -; LA64-NEXT: ld.d $ra, $sp, 104 # 8-byte Folded Reload -; LA64-NEXT: addi.d $sp, $sp, 112 +; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA64-NEXT: vld $vr1, $sp, 32 # 16-byte Folded Reload +; LA64-NEXT: vextrins.w $vr1, $vr0, 32 +; LA64-NEXT: vld $vr0, $sp, 64 # 16-byte Folded Reload +; LA64-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload +; LA64-NEXT: addi.d $sp, $sp, 96 ; LA64-NEXT: ret %result = call { <3 x float>, <3 x float> } @llvm.sincos.v3f32(<3 x float> %a) ret { <3 x float>, <3 x float> } %result diff --git a/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll b/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll index 36d337b..78588c5 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll @@ -275,16 +275,13 @@ entry: define void @buildvector_v16i8_partial(ptr %dst, i8 %a2, i8 %a6, i8 %a8, i8 %a11, i8 %a12, i8 %a15) nounwind { ; CHECK-LABEL: buildvector_v16i8_partial: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi.d $sp, $sp, -16 -; CHECK-NEXT: st.b $a6, $sp, 15 -; CHECK-NEXT: st.b $a5, $sp, 12 -; CHECK-NEXT: st.b $a4, $sp, 11 -; CHECK-NEXT: st.b $a3, $sp, 8 -; CHECK-NEXT: st.b $a2, $sp, 6 -; CHECK-NEXT: st.b $a1, $sp, 2 -; CHECK-NEXT: vld $vr0, $sp, 0 +; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 2 +; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 6 +; CHECK-NEXT: vinsgr2vr.b $vr0, $a3, 8 +; CHECK-NEXT: vinsgr2vr.b $vr0, $a4, 11 +; CHECK-NEXT: vinsgr2vr.b $vr0, $a5, 12 +; CHECK-NEXT: vinsgr2vr.b $vr0, $a6, 15 ; CHECK-NEXT: vst $vr0, $a0, 0 -; CHECK-NEXT: addi.d $sp, $sp, 16 ; CHECK-NEXT: ret entry: %ins0 = insertelement <16 x i8> undef, i8 undef, i32 0 @@ -379,14 +376,11 @@ entry: define void @buildvector_v8i16_partial(ptr %dst, i16 %a1, i16 %a3, i16 %a4, i16 %a5) nounwind { ; CHECK-LABEL: buildvector_v8i16_partial: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi.d $sp, $sp, -16 -; CHECK-NEXT: st.h $a4, $sp, 10 -; CHECK-NEXT: st.h $a3, $sp, 8 -; CHECK-NEXT: st.h $a2, $sp, 6 -; CHECK-NEXT: st.h $a1, $sp, 2 -; CHECK-NEXT: vld $vr0, $sp, 0 +; CHECK-NEXT: vinsgr2vr.h $vr0, $a1, 1 +; CHECK-NEXT: vinsgr2vr.h $vr0, $a2, 3 +; CHECK-NEXT: vinsgr2vr.h $vr0, $a3, 4 +; CHECK-NEXT: vinsgr2vr.h $vr0, $a4, 5 ; CHECK-NEXT: vst $vr0, $a0, 0 -; CHECK-NEXT: addi.d $sp, $sp, 16 ; CHECK-NEXT: ret entry: %ins0 = insertelement <8 x i16> undef, i16 undef, i32 0 @@ -449,11 +443,8 @@ entry: define void @buildvector_v4i32_partial(ptr %dst, i32 %a0, i32 %a3) nounwind { ; CHECK-LABEL: buildvector_v4i32_partial: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: pcalau12i $a3, %pc_hi20(.LCPI25_0) -; CHECK-NEXT: vld $vr0, $a3, %pc_lo12(.LCPI25_0) -; CHECK-NEXT: vinsgr2vr.w $vr1, $a1, 0 -; CHECK-NEXT: vinsgr2vr.w $vr2, $a2, 0 -; CHECK-NEXT: vshuf.w $vr0, $vr2, $vr1 +; CHECK-NEXT: vinsgr2vr.w $vr0, $a1, 0 +; CHECK-NEXT: vinsgr2vr.w $vr0, $a2, 3 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -553,12 +544,10 @@ entry: define void @buildvector_v4f32_partial(ptr %dst, float %a0, float %a3) nounwind { ; CHECK-LABEL: buildvector_v4f32_partial: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI31_0) -; CHECK-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI31_0) ; CHECK-NEXT: # kill: def $f1 killed $f1 def $vr1 ; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 -; CHECK-NEXT: vshuf.w $vr2, $vr1, $vr0 -; CHECK-NEXT: vst $vr2, $a0, 0 +; CHECK-NEXT: vextrins.w $vr0, $vr1, 48 +; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: %ins0 = insertelement <4 x float> undef, float %a0, i32 0 @@ -610,7 +599,7 @@ define void @buildvector_v2f64_partial(ptr %dst, double %a1) nounwind { ; CHECK-LABEL: buildvector_v2f64_partial: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 -; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0 +; CHECK-NEXT: vextrins.d $vr0, $vr0, 16 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: |