; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=mips-elf -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s ; RUN: llc -mtriple=mipsel-elf -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s declare <4 x float> @llvm.mips.fmax.w(<4 x float>, <4 x float>) nounwind declare <2 x double> @llvm.mips.fmax.d(<2 x double>, <2 x double>) nounwind declare <4 x float> @llvm.mips.fmin.w(<4 x float>, <4 x float>) nounwind declare <2 x double> @llvm.mips.fmin.d(<2 x double>, <2 x double>) nounwind ; (setcc $a, $b, SETFALSE) is always folded, so we won't get fcaf define void @false_v4f32(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: false_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: ldi.b $w0, 0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = fcmp false <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> store <4 x i32> %4, ptr %c ret void } ; (setcc $a, $b, SETFALSE) is always folded define void @false_v2f64(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: false_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: ldi.b $w0, 0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = fcmp false <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> store <2 x i64> %4, ptr %c ret void } define void @oeq_v4f32(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: oeq_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.w $w0, 0($6) ; CHECK-NEXT: ld.w $w1, 0($5) ; CHECK-NEXT: fceq.w $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = fcmp oeq <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> store <4 x i32> %4, ptr %c ret void } define void @oeq_v2f64(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: oeq_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.d $w0, 0($6) ; CHECK-NEXT: ld.d $w1, 0($5) ; CHECK-NEXT: fceq.d $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.d $w0, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = fcmp oeq <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> store <2 x i64> %4, ptr %c ret void } define void @oge_v4f32(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: oge_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.w $w0, 0($5) ; CHECK-NEXT: ld.w $w1, 0($6) ; CHECK-NEXT: fcle.w $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = fcmp oge <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> store <4 x i32> %4, ptr %c ret void } define void @oge_v2f64(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: oge_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.d $w0, 0($5) ; CHECK-NEXT: ld.d $w1, 0($6) ; CHECK-NEXT: fcle.d $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.d $w0, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = fcmp oge <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> store <2 x i64> %4, ptr %c ret void } define void @ogt_v4f32(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: ogt_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.w $w0, 0($5) ; CHECK-NEXT: ld.w $w1, 0($6) ; CHECK-NEXT: fclt.w $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = fcmp ogt <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> store <4 x i32> %4, ptr %c ret void } define void @ogt_v2f64(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: ogt_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.d $w0, 0($5) ; CHECK-NEXT: ld.d $w1, 0($6) ; CHECK-NEXT: fclt.d $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.d $w0, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = fcmp ogt <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> store <2 x i64> %4, ptr %c ret void } define void @ole_v4f32(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: ole_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.w $w0, 0($6) ; CHECK-NEXT: ld.w $w1, 0($5) ; CHECK-NEXT: fcle.w $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = fcmp ole <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> store <4 x i32> %4, ptr %c ret void } define void @ole_v2f64(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: ole_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.d $w0, 0($6) ; CHECK-NEXT: ld.d $w1, 0($5) ; CHECK-NEXT: fcle.d $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.d $w0, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = fcmp ole <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> store <2 x i64> %4, ptr %c ret void } define void @olt_v4f32(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: olt_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.w $w0, 0($6) ; CHECK-NEXT: ld.w $w1, 0($5) ; CHECK-NEXT: fclt.w $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = fcmp olt <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> store <4 x i32> %4, ptr %c ret void } define void @olt_v2f64(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: olt_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.d $w0, 0($6) ; CHECK-NEXT: ld.d $w1, 0($5) ; CHECK-NEXT: fclt.d $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.d $w0, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = fcmp olt <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> store <2 x i64> %4, ptr %c ret void } define void @one_v4f32(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: one_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.w $w0, 0($6) ; CHECK-NEXT: ld.w $w1, 0($5) ; CHECK-NEXT: fcne.w $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = fcmp one <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> store <4 x i32> %4, ptr %c ret void } define void @one_v2f64(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: one_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.d $w0, 0($6) ; CHECK-NEXT: ld.d $w1, 0($5) ; CHECK-NEXT: fcne.d $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.d $w0, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = fcmp one <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> store <2 x i64> %4, ptr %c ret void } define void @ord_v4f32(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: ord_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.w $w0, 0($6) ; CHECK-NEXT: ld.w $w1, 0($5) ; CHECK-NEXT: fcor.w $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = fcmp ord <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> store <4 x i32> %4, ptr %c ret void } define void @ord_v2f64(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: ord_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.d $w0, 0($6) ; CHECK-NEXT: ld.d $w1, 0($5) ; CHECK-NEXT: fcor.d $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.d $w0, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = fcmp ord <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> store <2 x i64> %4, ptr %c ret void } define void @ueq_v4f32(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: ueq_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.w $w0, 0($6) ; CHECK-NEXT: ld.w $w1, 0($5) ; CHECK-NEXT: fcueq.w $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = fcmp ueq <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> store <4 x i32> %4, ptr %c ret void } define void @ueq_v2f64(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: ueq_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.d $w0, 0($6) ; CHECK-NEXT: ld.d $w1, 0($5) ; CHECK-NEXT: fcueq.d $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.d $w0, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = fcmp ueq <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> store <2 x i64> %4, ptr %c ret void } define void @uge_v4f32(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: uge_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.w $w0, 0($5) ; CHECK-NEXT: ld.w $w1, 0($6) ; CHECK-NEXT: fcule.w $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = fcmp uge <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> store <4 x i32> %4, ptr %c ret void } define void @uge_v2f64(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: uge_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.d $w0, 0($5) ; CHECK-NEXT: ld.d $w1, 0($6) ; CHECK-NEXT: fcule.d $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.d $w0, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = fcmp uge <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> store <2 x i64> %4, ptr %c ret void } define void @ugt_v4f32(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: ugt_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.w $w0, 0($5) ; CHECK-NEXT: ld.w $w1, 0($6) ; CHECK-NEXT: fcult.w $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = fcmp ugt <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> store <4 x i32> %4, ptr %c ret void } define void @ugt_v2f64(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: ugt_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.d $w0, 0($5) ; CHECK-NEXT: ld.d $w1, 0($6) ; CHECK-NEXT: fcult.d $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.d $w0, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = fcmp ugt <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> store <2 x i64> %4, ptr %c ret void } define void @ule_v4f32(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: ule_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.w $w0, 0($6) ; CHECK-NEXT: ld.w $w1, 0($5) ; CHECK-NEXT: fcule.w $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = fcmp ule <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> store <4 x i32> %4, ptr %c ret void } define void @ule_v2f64(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: ule_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.d $w0, 0($6) ; CHECK-NEXT: ld.d $w1, 0($5) ; CHECK-NEXT: fcule.d $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.d $w0, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = fcmp ule <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> store <2 x i64> %4, ptr %c ret void } define void @ult_v4f32(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: ult_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.w $w0, 0($6) ; CHECK-NEXT: ld.w $w1, 0($5) ; CHECK-NEXT: fcult.w $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = fcmp ult <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> store <4 x i32> %4, ptr %c ret void } define void @ult_v2f64(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: ult_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.d $w0, 0($6) ; CHECK-NEXT: ld.d $w1, 0($5) ; CHECK-NEXT: fcult.d $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.d $w0, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = fcmp ult <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> store <2 x i64> %4, ptr %c ret void } define void @uno_v4f32(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: uno_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.w $w0, 0($6) ; CHECK-NEXT: ld.w $w1, 0($5) ; CHECK-NEXT: fcun.w $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = fcmp uno <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> store <4 x i32> %4, ptr %c ret void } define void @uno_v2f64(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: uno_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.d $w0, 0($6) ; CHECK-NEXT: ld.d $w1, 0($5) ; CHECK-NEXT: fcun.d $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.d $w0, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = fcmp uno <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> store <2 x i64> %4, ptr %c ret void } ; (setcc $a, $b, SETTRUE) is always folded, so we won't get fcaf define void @true_v4f32(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: true_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: ldi.b $w0, -1 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = fcmp true <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> store <4 x i32> %4, ptr %c ret void } ; (setcc $a, $b, SETTRUE) is always folded. define void @true_v2f64(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: true_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: ldi.b $w0, -1 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = fcmp true <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> store <2 x i64> %4, ptr %c ret void } ; Note that IfSet and IfClr are swapped since the condition is inverted define void @bsel_v4f32(ptr %d, ptr %a, ptr %b, ptr %c) nounwind { ; CHECK-LABEL: bsel_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.w $w0, 0($5) ; CHECK-NEXT: ld.w $w1, 0($6) ; CHECK-NEXT: fclt.w $w1, $w1, $w0 ; CHECK-NEXT: ld.w $w2, 0($7) ; CHECK-NEXT: bsel.v $w1, $w2, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w1, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = load <4 x float>, ptr %c %4 = fcmp ogt <4 x float> %1, %2 %5 = select <4 x i1> %4, <4 x float> %1, <4 x float> %3 store <4 x float> %5, ptr %d ret void } ; Note that IfSet and IfClr are swapped since the condition is inverted define void @bsel_v2f64(ptr %d, ptr %a, ptr %b, ptr %c) nounwind { ; CHECK-LABEL: bsel_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.d $w0, 0($5) ; CHECK-NEXT: ld.d $w1, 0($6) ; CHECK-NEXT: fclt.d $w1, $w1, $w0 ; CHECK-NEXT: ld.d $w2, 0($7) ; CHECK-NEXT: bsel.v $w1, $w2, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.d $w1, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = load <2 x double>, ptr %c %4 = fcmp ogt <2 x double> %1, %2 %5 = select <2 x i1> %4, <2 x double> %1, <2 x double> %3 store <2 x double> %5, ptr %d ret void } ; Note that IfSet and IfClr are swapped since the condition is inverted define void @bseli_v4f32(ptr %d, ptr %a, ptr %b, ptr %c) nounwind { ; CHECK-LABEL: bseli_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.w $w0, 0($5) ; CHECK-NEXT: ld.w $w1, 0($6) ; CHECK-NEXT: fclt.w $w1, $w1, $w0 ; CHECK-NEXT: and.v $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = fcmp ogt <4 x float> %1, %2 %4 = select <4 x i1> %3, <4 x float> %1, <4 x float> zeroinitializer store <4 x float> %4, ptr %d ret void } ; Note that IfSet and IfClr are swapped since the condition is inverted define void @bseli_v2f64(ptr %d, ptr %a, ptr %b, ptr %c) nounwind { ; CHECK-LABEL: bseli_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.d $w0, 0($5) ; CHECK-NEXT: ld.d $w1, 0($6) ; CHECK-NEXT: fclt.d $w1, $w1, $w0 ; CHECK-NEXT: and.v $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.d $w0, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = fcmp ogt <2 x double> %1, %2 %4 = select <2 x i1> %3, <2 x double> %1, <2 x double> zeroinitializer store <2 x double> %4, ptr %d ret void } define void @max_v4f32(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: max_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.w $w0, 0($6) ; CHECK-NEXT: ld.w $w1, 0($5) ; CHECK-NEXT: fmax.w $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = tail call <4 x float> @llvm.mips.fmax.w(<4 x float> %1, <4 x float> %2) store <4 x float> %3, ptr %c ret void } define void @max_v2f64(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: max_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.d $w0, 0($6) ; CHECK-NEXT: ld.d $w1, 0($5) ; CHECK-NEXT: fmax.d $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.d $w0, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = tail call <2 x double> @llvm.mips.fmax.d(<2 x double> %1, <2 x double> %2) store <2 x double> %3, ptr %c ret void } define void @min_v4f32(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: min_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.w $w0, 0($6) ; CHECK-NEXT: ld.w $w1, 0($5) ; CHECK-NEXT: fmin.w $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.w $w0, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = tail call <4 x float> @llvm.mips.fmin.w(<4 x float> %1, <4 x float> %2) store <4 x float> %3, ptr %c ret void } define void @min_v2f64(ptr %c, ptr %a, ptr %b) nounwind { ; CHECK-LABEL: min_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: ld.d $w0, 0($6) ; CHECK-NEXT: ld.d $w1, 0($5) ; CHECK-NEXT: fmin.d $w0, $w1, $w0 ; CHECK-NEXT: jr $ra ; CHECK-NEXT: st.d $w0, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = tail call <2 x double> @llvm.mips.fmin.d(<2 x double> %1, <2 x double> %2) store <2 x double> %3, ptr %c ret void }