aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/LoongArch
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/LoongArch')
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/fp-max-min.ll160
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/ir-instruction/avg.ll307
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/fp-max-min.ll112
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/ir-instruction/avg.ll307
4 files changed, 886 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/LoongArch/lasx/fp-max-min.ll b/llvm/test/CodeGen/LoongArch/lasx/fp-max-min.ll
new file mode 100644
index 0000000..48ec98c
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/fp-max-min.ll
@@ -0,0 +1,160 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+define void @minnum_v8f32(ptr %res, ptr %x, ptr %y) nounwind {
+; CHECK-LABEL: minnum_v8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a2, 0
+; CHECK-NEXT: xvld $xr1, $a1, 0
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 5
+; CHECK-NEXT: xvpickve.w $xr3, $xr1, 5
+; CHECK-NEXT: fmin.s $fa2, $fa3, $fa2
+; CHECK-NEXT: xvpickve.w $xr3, $xr0, 4
+; CHECK-NEXT: xvpickve.w $xr4, $xr1, 4
+; CHECK-NEXT: fmin.s $fa3, $fa4, $fa3
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 16
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 6
+; CHECK-NEXT: xvpickve.w $xr4, $xr1, 6
+; CHECK-NEXT: fmin.s $fa2, $fa4, $fa2
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 32
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 7
+; CHECK-NEXT: xvpickve.w $xr4, $xr1, 7
+; CHECK-NEXT: fmin.s $fa2, $fa4, $fa2
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 48
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 1
+; CHECK-NEXT: xvpickve.w $xr4, $xr1, 1
+; CHECK-NEXT: fmin.s $fa2, $fa4, $fa2
+; CHECK-NEXT: xvpickve.w $xr4, $xr0, 0
+; CHECK-NEXT: xvpickve.w $xr5, $xr1, 0
+; CHECK-NEXT: fmin.s $fa4, $fa5, $fa4
+; CHECK-NEXT: vextrins.w $vr4, $vr2, 16
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 2
+; CHECK-NEXT: xvpickve.w $xr5, $xr1, 2
+; CHECK-NEXT: fmin.s $fa2, $fa5, $fa2
+; CHECK-NEXT: vextrins.w $vr4, $vr2, 32
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 3
+; CHECK-NEXT: xvpickve.w $xr1, $xr1, 3
+; CHECK-NEXT: fmin.s $fa0, $fa1, $fa0
+; CHECK-NEXT: vextrins.w $vr4, $vr0, 48
+; CHECK-NEXT: xvpermi.q $xr4, $xr3, 2
+; CHECK-NEXT: xvst $xr4, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %v0 = load <8 x float>, ptr %x
+ %v1 = load <8 x float>, ptr %y
+ %r = call <8 x float> @llvm.minnum.v8f32(<8 x float> %v0, <8 x float> %v1)
+ store <8 x float> %r, ptr %res
+ ret void
+}
+
+define void @minnum_v4f64(ptr %res, ptr %x, ptr %y) nounwind {
+; CHECK-LABEL: minnum_v4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a2, 0
+; CHECK-NEXT: xvld $xr1, $a1, 0
+; CHECK-NEXT: xvpickve.d $xr2, $xr0, 3
+; CHECK-NEXT: xvpickve.d $xr3, $xr1, 3
+; CHECK-NEXT: fmin.d $fa2, $fa3, $fa2
+; CHECK-NEXT: xvpickve.d $xr3, $xr0, 2
+; CHECK-NEXT: xvpickve.d $xr4, $xr1, 2
+; CHECK-NEXT: fmin.d $fa3, $fa4, $fa3
+; CHECK-NEXT: vextrins.d $vr3, $vr2, 16
+; CHECK-NEXT: xvpickve.d $xr2, $xr0, 1
+; CHECK-NEXT: xvpickve.d $xr4, $xr1, 1
+; CHECK-NEXT: fmin.d $fa2, $fa4, $fa2
+; CHECK-NEXT: xvpickve.d $xr0, $xr0, 0
+; CHECK-NEXT: xvpickve.d $xr1, $xr1, 0
+; CHECK-NEXT: fmin.d $fa0, $fa1, $fa0
+; CHECK-NEXT: vextrins.d $vr0, $vr2, 16
+; CHECK-NEXT: xvpermi.q $xr0, $xr3, 2
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %v0 = load <4 x double>, ptr %x
+ %v1 = load <4 x double>, ptr %y
+ %r = call <4 x double> @llvm.minnum.v4f64(<4 x double> %v0, <4 x double> %v1)
+ store <4 x double> %r, ptr %res
+ ret void
+}
+
+define void @maxnum_v8f32(ptr %res, ptr %x, ptr %y) nounwind {
+; CHECK-LABEL: maxnum_v8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a2, 0
+; CHECK-NEXT: xvld $xr1, $a1, 0
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 5
+; CHECK-NEXT: xvpickve.w $xr3, $xr1, 5
+; CHECK-NEXT: fmax.s $fa2, $fa3, $fa2
+; CHECK-NEXT: xvpickve.w $xr3, $xr0, 4
+; CHECK-NEXT: xvpickve.w $xr4, $xr1, 4
+; CHECK-NEXT: fmax.s $fa3, $fa4, $fa3
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 16
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 6
+; CHECK-NEXT: xvpickve.w $xr4, $xr1, 6
+; CHECK-NEXT: fmax.s $fa2, $fa4, $fa2
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 32
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 7
+; CHECK-NEXT: xvpickve.w $xr4, $xr1, 7
+; CHECK-NEXT: fmax.s $fa2, $fa4, $fa2
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 48
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 1
+; CHECK-NEXT: xvpickve.w $xr4, $xr1, 1
+; CHECK-NEXT: fmax.s $fa2, $fa4, $fa2
+; CHECK-NEXT: xvpickve.w $xr4, $xr0, 0
+; CHECK-NEXT: xvpickve.w $xr5, $xr1, 0
+; CHECK-NEXT: fmax.s $fa4, $fa5, $fa4
+; CHECK-NEXT: vextrins.w $vr4, $vr2, 16
+; CHECK-NEXT: xvpickve.w $xr2, $xr0, 2
+; CHECK-NEXT: xvpickve.w $xr5, $xr1, 2
+; CHECK-NEXT: fmax.s $fa2, $fa5, $fa2
+; CHECK-NEXT: vextrins.w $vr4, $vr2, 32
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 3
+; CHECK-NEXT: xvpickve.w $xr1, $xr1, 3
+; CHECK-NEXT: fmax.s $fa0, $fa1, $fa0
+; CHECK-NEXT: vextrins.w $vr4, $vr0, 48
+; CHECK-NEXT: xvpermi.q $xr4, $xr3, 2
+; CHECK-NEXT: xvst $xr4, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %v0 = load <8 x float>, ptr %x
+ %v1 = load <8 x float>, ptr %y
+ %r = call <8 x float> @llvm.maxnum.v8f32(<8 x float> %v0, <8 x float> %v1)
+ store <8 x float> %r, ptr %res
+ ret void
+}
+
+define void @maxnum_v4f64(ptr %res, ptr %x, ptr %y) nounwind {
+; CHECK-LABEL: maxnum_v4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a2, 0
+; CHECK-NEXT: xvld $xr1, $a1, 0
+; CHECK-NEXT: xvpickve.d $xr2, $xr0, 3
+; CHECK-NEXT: xvpickve.d $xr3, $xr1, 3
+; CHECK-NEXT: fmax.d $fa2, $fa3, $fa2
+; CHECK-NEXT: xvpickve.d $xr3, $xr0, 2
+; CHECK-NEXT: xvpickve.d $xr4, $xr1, 2
+; CHECK-NEXT: fmax.d $fa3, $fa4, $fa3
+; CHECK-NEXT: vextrins.d $vr3, $vr2, 16
+; CHECK-NEXT: xvpickve.d $xr2, $xr0, 1
+; CHECK-NEXT: xvpickve.d $xr4, $xr1, 1
+; CHECK-NEXT: fmax.d $fa2, $fa4, $fa2
+; CHECK-NEXT: xvpickve.d $xr0, $xr0, 0
+; CHECK-NEXT: xvpickve.d $xr1, $xr1, 0
+; CHECK-NEXT: fmax.d $fa0, $fa1, $fa0
+; CHECK-NEXT: vextrins.d $vr0, $vr2, 16
+; CHECK-NEXT: xvpermi.q $xr0, $xr3, 2
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %v0 = load <4 x double>, ptr %x
+ %v1 = load <4 x double>, ptr %y
+ %r = call <4 x double> @llvm.maxnum.v4f64(<4 x double> %v0, <4 x double> %v1)
+ store <4 x double> %r, ptr %res
+ ret void
+}
+
+declare <8 x float> @llvm.minnum.v8f32(<8 x float>, <8 x float>)
+declare <4 x double> @llvm.minnum.v4f64(<4 x double>, <4 x double>)
+declare <8 x float> @llvm.maxnum.v8f32(<8 x float>, <8 x float>)
+declare <4 x double> @llvm.maxnum.v4f64(<4 x double>, <4 x double>)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/avg.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/avg.ll
new file mode 100644
index 0000000..2a5a8fa
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/avg.ll
@@ -0,0 +1,307 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+define void @xvavg_b(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavg_b:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.b $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsrai.b $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <32 x i8>, ptr %a
+ %vb = load <32 x i8>, ptr %b
+ %add = add <32 x i8> %va, %vb
+ %shr = ashr <32 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ store <32 x i8> %shr, ptr %res
+ ret void
+}
+
+define void @xvavg_h(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavg_h:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.h $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsrai.h $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <16 x i16>, ptr %a
+ %vb = load <16 x i16>, ptr %b
+ %add = add <16 x i16> %va, %vb
+ %shr = ashr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ store <16 x i16> %shr, ptr %res
+ ret void
+}
+
+define void @xvavg_w(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavg_w:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.w $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsrai.w $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i32>, ptr %a
+ %vb = load <8 x i32>, ptr %b
+ %add = add <8 x i32> %va, %vb
+ %shr = ashr <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ store <8 x i32> %shr, ptr %res
+ ret void
+}
+
+define void @xvavg_d(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavg_d:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.d $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsrai.d $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i64>, ptr %a
+ %vb = load <4 x i64>, ptr %b
+ %add = add <4 x i64> %va, %vb
+ %shr = ashr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
+ store <4 x i64> %shr, ptr %res
+ ret void
+}
+
+define void @xvavg_bu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavg_bu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.b $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsrli.b $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <32 x i8>, ptr %a
+ %vb = load <32 x i8>, ptr %b
+ %add = add <32 x i8> %va, %vb
+ %shr = lshr <32 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ store <32 x i8> %shr, ptr %res
+ ret void
+}
+
+define void @xvavg_hu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavg_hu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.h $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsrli.h $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <16 x i16>, ptr %a
+ %vb = load <16 x i16>, ptr %b
+ %add = add <16 x i16> %va, %vb
+ %shr = lshr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ store <16 x i16> %shr, ptr %res
+ ret void
+}
+
+define void @xvavg_wu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavg_wu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.w $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsrli.w $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i32>, ptr %a
+ %vb = load <8 x i32>, ptr %b
+ %add = add <8 x i32> %va, %vb
+ %shr = lshr <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ store <8 x i32> %shr, ptr %res
+ ret void
+}
+
+define void @xvavg_du(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavg_du:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.d $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsrli.d $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i64>, ptr %a
+ %vb = load <4 x i64>, ptr %b
+ %add = add <4 x i64> %va, %vb
+ %shr = lshr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
+ store <4 x i64> %shr, ptr %res
+ ret void
+}
+
+define void @xvavgr_b(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavgr_b:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.b $xr0, $xr0, $xr1
+; CHECK-NEXT: xvaddi.bu $xr0, $xr0, 1
+; CHECK-NEXT: xvsrai.b $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <32 x i8>, ptr %a
+ %vb = load <32 x i8>, ptr %b
+ %add = add <32 x i8> %va, %vb
+ %add1 = add <32 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %shr = ashr <32 x i8> %add1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ store <32 x i8> %shr, ptr %res
+ ret void
+}
+
+define void @xvavgr_h(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavgr_h:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.h $xr0, $xr0, $xr1
+; CHECK-NEXT: xvaddi.hu $xr0, $xr0, 1
+; CHECK-NEXT: xvsrai.h $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <16 x i16>, ptr %a
+ %vb = load <16 x i16>, ptr %b
+ %add = add <16 x i16> %va, %vb
+ %add1 = add <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shr = ashr <16 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ store <16 x i16> %shr, ptr %res
+ ret void
+}
+
+define void @xvavgr_w(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavgr_w:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.w $xr0, $xr0, $xr1
+; CHECK-NEXT: xvaddi.wu $xr0, $xr0, 1
+; CHECK-NEXT: xvsrai.w $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i32>, ptr %a
+ %vb = load <8 x i32>, ptr %b
+ %add = add <8 x i32> %va, %vb
+ %add1 = add <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %shr = ashr <8 x i32> %add1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ store <8 x i32> %shr, ptr %res
+ ret void
+}
+
+define void @xvavgr_d(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavgr_d:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.d $xr0, $xr0, $xr1
+; CHECK-NEXT: xvaddi.du $xr0, $xr0, 1
+; CHECK-NEXT: xvsrai.d $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i64>, ptr %a
+ %vb = load <4 x i64>, ptr %b
+ %add = add <4 x i64> %va, %vb
+ %add1 = add <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
+ %shr = ashr <4 x i64> %add1, <i64 1, i64 1, i64 1, i64 1>
+ store <4 x i64> %shr, ptr %res
+ ret void
+}
+
+define void @xvavgr_bu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavgr_bu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.b $xr0, $xr0, $xr1
+; CHECK-NEXT: xvaddi.bu $xr0, $xr0, 1
+; CHECK-NEXT: xvsrli.b $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <32 x i8>, ptr %a
+ %vb = load <32 x i8>, ptr %b
+ %add = add <32 x i8> %va, %vb
+ %add1 = add <32 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %shr = lshr <32 x i8> %add1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ store <32 x i8> %shr, ptr %res
+ ret void
+}
+
+define void @xvavgr_hu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavgr_hu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.h $xr0, $xr0, $xr1
+; CHECK-NEXT: xvaddi.hu $xr0, $xr0, 1
+; CHECK-NEXT: xvsrli.h $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <16 x i16>, ptr %a
+ %vb = load <16 x i16>, ptr %b
+ %add = add <16 x i16> %va, %vb
+ %add1 = add <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shr = lshr <16 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ store <16 x i16> %shr, ptr %res
+ ret void
+}
+
+define void @xvavgr_wu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavgr_wu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.w $xr0, $xr0, $xr1
+; CHECK-NEXT: xvaddi.wu $xr0, $xr0, 1
+; CHECK-NEXT: xvsrli.w $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i32>, ptr %a
+ %vb = load <8 x i32>, ptr %b
+ %add = add <8 x i32> %va, %vb
+ %add1 = add <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %shr = lshr <8 x i32> %add1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ store <8 x i32> %shr, ptr %res
+ ret void
+}
+
+define void @xvavgr_du(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavgr_du:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.d $xr0, $xr0, $xr1
+; CHECK-NEXT: xvaddi.du $xr0, $xr0, 1
+; CHECK-NEXT: xvsrli.d $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i64>, ptr %a
+ %vb = load <4 x i64>, ptr %b
+ %add = add <4 x i64> %va, %vb
+ %add1 = add <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
+ %shr = lshr <4 x i64> %add1, <i64 1, i64 1, i64 1, i64 1>
+ store <4 x i64> %shr, ptr %res
+ ret void
+}
diff --git a/llvm/test/CodeGen/LoongArch/lsx/fp-max-min.ll b/llvm/test/CodeGen/LoongArch/lsx/fp-max-min.ll
new file mode 100644
index 0000000..27ecb75
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/fp-max-min.ll
@@ -0,0 +1,112 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @minnum_v4f32(ptr %res, ptr %x, ptr %y) nounwind {
+; CHECK-LABEL: minnum_v4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a2, 0
+; CHECK-NEXT: vld $vr1, $a1, 0
+; CHECK-NEXT: vreplvei.w $vr2, $vr0, 1
+; CHECK-NEXT: vreplvei.w $vr3, $vr1, 1
+; CHECK-NEXT: fmin.s $fa2, $fa3, $fa2
+; CHECK-NEXT: vreplvei.w $vr3, $vr0, 0
+; CHECK-NEXT: vreplvei.w $vr4, $vr1, 0
+; CHECK-NEXT: fmin.s $fa3, $fa4, $fa3
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 16
+; CHECK-NEXT: vreplvei.w $vr2, $vr0, 2
+; CHECK-NEXT: vreplvei.w $vr4, $vr1, 2
+; CHECK-NEXT: fmin.s $fa2, $fa4, $fa2
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 32
+; CHECK-NEXT: vreplvei.w $vr0, $vr0, 3
+; CHECK-NEXT: vreplvei.w $vr1, $vr1, 3
+; CHECK-NEXT: fmin.s $fa0, $fa1, $fa0
+; CHECK-NEXT: vextrins.w $vr3, $vr0, 48
+; CHECK-NEXT: vst $vr3, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %v0 = load <4 x float>, ptr %x
+ %v1 = load <4 x float>, ptr %y
+ %r = call <4 x float> @llvm.minnum.v4f32(<4 x float> %v0, <4 x float> %v1)
+ store <4 x float> %r, ptr %res
+ ret void
+}
+
+define void @minnum_v2f64(ptr %res, ptr %x, ptr %y) nounwind {
+; CHECK-LABEL: minnum_v2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a2, 0
+; CHECK-NEXT: vld $vr1, $a1, 0
+; CHECK-NEXT: vreplvei.d $vr2, $vr0, 1
+; CHECK-NEXT: vreplvei.d $vr3, $vr1, 1
+; CHECK-NEXT: fmin.d $fa2, $fa3, $fa2
+; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT: vreplvei.d $vr1, $vr1, 0
+; CHECK-NEXT: fmin.d $fa0, $fa1, $fa0
+; CHECK-NEXT: vextrins.d $vr0, $vr2, 16
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %v0 = load <2 x double>, ptr %x
+ %v1 = load <2 x double>, ptr %y
+ %r = call <2 x double> @llvm.minnum.v2f64(<2 x double> %v0, <2 x double> %v1)
+ store <2 x double> %r, ptr %res
+ ret void
+}
+
+define void @maxnum_v4f32(ptr %res, ptr %x, ptr %y) nounwind {
+; CHECK-LABEL: maxnum_v4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a2, 0
+; CHECK-NEXT: vld $vr1, $a1, 0
+; CHECK-NEXT: vreplvei.w $vr2, $vr0, 1
+; CHECK-NEXT: vreplvei.w $vr3, $vr1, 1
+; CHECK-NEXT: fmax.s $fa2, $fa3, $fa2
+; CHECK-NEXT: vreplvei.w $vr3, $vr0, 0
+; CHECK-NEXT: vreplvei.w $vr4, $vr1, 0
+; CHECK-NEXT: fmax.s $fa3, $fa4, $fa3
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 16
+; CHECK-NEXT: vreplvei.w $vr2, $vr0, 2
+; CHECK-NEXT: vreplvei.w $vr4, $vr1, 2
+; CHECK-NEXT: fmax.s $fa2, $fa4, $fa2
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 32
+; CHECK-NEXT: vreplvei.w $vr0, $vr0, 3
+; CHECK-NEXT: vreplvei.w $vr1, $vr1, 3
+; CHECK-NEXT: fmax.s $fa0, $fa1, $fa0
+; CHECK-NEXT: vextrins.w $vr3, $vr0, 48
+; CHECK-NEXT: vst $vr3, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %v0 = load <4 x float>, ptr %x
+ %v1 = load <4 x float>, ptr %y
+ %r = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %v0, <4 x float> %v1)
+ store <4 x float> %r, ptr %res
+ ret void
+}
+
+define void @maxnum_v2f64(ptr %res, ptr %x, ptr %y) nounwind {
+; CHECK-LABEL: maxnum_v2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a2, 0
+; CHECK-NEXT: vld $vr1, $a1, 0
+; CHECK-NEXT: vreplvei.d $vr2, $vr0, 1
+; CHECK-NEXT: vreplvei.d $vr3, $vr1, 1
+; CHECK-NEXT: fmax.d $fa2, $fa3, $fa2
+; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT: vreplvei.d $vr1, $vr1, 0
+; CHECK-NEXT: fmax.d $fa0, $fa1, $fa0
+; CHECK-NEXT: vextrins.d $vr0, $vr2, 16
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %v0 = load <2 x double>, ptr %x
+ %v1 = load <2 x double>, ptr %y
+ %r = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %v0, <2 x double> %v1)
+ store <2 x double> %r, ptr %res
+ ret void
+}
+
+declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.minnum.v2f64(<2 x double>, <2 x double>)
+declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.maxnum.v2f64(<2 x double>, <2 x double>)
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/avg.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/avg.ll
new file mode 100644
index 0000000..20b88984
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/avg.ll
@@ -0,0 +1,307 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @vavg_b(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavg_b:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vsrai.b $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <16 x i8>, ptr %a
+ %vb = load <16 x i8>, ptr %b
+ %add = add <16 x i8> %va, %vb
+ %shr = ashr <16 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ store <16 x i8> %shr, ptr %res
+ ret void
+}
+
+define void @vavg_h(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavg_h:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vsrai.h $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i16>, ptr %a
+ %vb = load <8 x i16>, ptr %b
+ %add = add <8 x i16> %va, %vb
+ %shr = ashr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ store <8 x i16> %shr, ptr %res
+ ret void
+}
+
+define void @vavg_w(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavg_w:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.w $vr0, $vr0, $vr1
+; CHECK-NEXT: vsrai.w $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i32>, ptr %a
+ %vb = load <4 x i32>, ptr %b
+ %add = add <4 x i32> %va, %vb
+ %shr = ashr <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
+ store <4 x i32> %shr, ptr %res
+ ret void
+}
+
+define void @vavg_d(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavg_d:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.d $vr0, $vr0, $vr1
+; CHECK-NEXT: vsrai.d $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <2 x i64>, ptr %a
+ %vb = load <2 x i64>, ptr %b
+ %add = add <2 x i64> %va, %vb
+ %shr = ashr <2 x i64> %add, <i64 1, i64 1>
+ store <2 x i64> %shr, ptr %res
+ ret void
+}
+
+define void @vavg_bu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavg_bu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vsrli.b $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <16 x i8>, ptr %a
+ %vb = load <16 x i8>, ptr %b
+ %add = add <16 x i8> %va, %vb
+ %shr = lshr <16 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ store <16 x i8> %shr, ptr %res
+ ret void
+}
+
+define void @vavg_hu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavg_hu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vsrli.h $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i16>, ptr %a
+ %vb = load <8 x i16>, ptr %b
+ %add = add <8 x i16> %va, %vb
+ %shr = lshr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ store <8 x i16> %shr, ptr %res
+ ret void
+}
+
+define void @vavg_wu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavg_wu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.w $vr0, $vr0, $vr1
+; CHECK-NEXT: vsrli.w $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i32>, ptr %a
+ %vb = load <4 x i32>, ptr %b
+ %add = add <4 x i32> %va, %vb
+ %shr = lshr <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
+ store <4 x i32> %shr, ptr %res
+ ret void
+}
+
+define void @vavg_du(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavg_du:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.d $vr0, $vr0, $vr1
+; CHECK-NEXT: vsrli.d $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <2 x i64>, ptr %a
+ %vb = load <2 x i64>, ptr %b
+ %add = add <2 x i64> %va, %vb
+ %shr = lshr <2 x i64> %add, <i64 1, i64 1>
+ store <2 x i64> %shr, ptr %res
+ ret void
+}
+
+define void @vavgr_b(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavgr_b:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vaddi.bu $vr0, $vr0, 1
+; CHECK-NEXT: vsrai.b $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <16 x i8>, ptr %a
+ %vb = load <16 x i8>, ptr %b
+ %add = add <16 x i8> %va, %vb
+ %add1 = add <16 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %shr = ashr <16 x i8> %add1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ store <16 x i8> %shr, ptr %res
+ ret void
+}
+
+define void @vavgr_h(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavgr_h:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vaddi.hu $vr0, $vr0, 1
+; CHECK-NEXT: vsrai.h $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i16>, ptr %a
+ %vb = load <8 x i16>, ptr %b
+ %add = add <8 x i16> %va, %vb
+ %add1 = add <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shr = ashr <8 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ store <8 x i16> %shr, ptr %res
+ ret void
+}
+
+define void @vavgr_w(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavgr_w:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.w $vr0, $vr0, $vr1
+; CHECK-NEXT: vaddi.wu $vr0, $vr0, 1
+; CHECK-NEXT: vsrai.w $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i32>, ptr %a
+ %vb = load <4 x i32>, ptr %b
+ %add = add <4 x i32> %va, %vb
+ %add1 = add <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
+ %shr = ashr <4 x i32> %add1, <i32 1, i32 1, i32 1, i32 1>
+ store <4 x i32> %shr, ptr %res
+ ret void
+}
+
+define void @vavgr_d(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavgr_d:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.d $vr0, $vr0, $vr1
+; CHECK-NEXT: vaddi.du $vr0, $vr0, 1
+; CHECK-NEXT: vsrai.d $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <2 x i64>, ptr %a
+ %vb = load <2 x i64>, ptr %b
+ %add = add <2 x i64> %va, %vb
+ %add1 = add <2 x i64> %add, <i64 1, i64 1>
+ %shr = ashr <2 x i64> %add1, <i64 1, i64 1>
+ store <2 x i64> %shr, ptr %res
+ ret void
+}
+
+define void @vavgr_bu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavgr_bu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vaddi.bu $vr0, $vr0, 1
+; CHECK-NEXT: vsrli.b $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <16 x i8>, ptr %a
+ %vb = load <16 x i8>, ptr %b
+ %add = add <16 x i8> %va, %vb
+ %add1 = add <16 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %shr = lshr <16 x i8> %add1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ store <16 x i8> %shr, ptr %res
+ ret void
+}
+
+define void @vavgr_hu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavgr_hu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vaddi.hu $vr0, $vr0, 1
+; CHECK-NEXT: vsrli.h $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i16>, ptr %a
+ %vb = load <8 x i16>, ptr %b
+ %add = add <8 x i16> %va, %vb
+ %add1 = add <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shr = lshr <8 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ store <8 x i16> %shr, ptr %res
+ ret void
+}
+
+define void @vavgr_wu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavgr_wu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.w $vr0, $vr0, $vr1
+; CHECK-NEXT: vaddi.wu $vr0, $vr0, 1
+; CHECK-NEXT: vsrli.w $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i32>, ptr %a
+ %vb = load <4 x i32>, ptr %b
+ %add = add <4 x i32> %va, %vb
+ %add1 = add <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
+ %shr = lshr <4 x i32> %add1, <i32 1, i32 1, i32 1, i32 1>
+ store <4 x i32> %shr, ptr %res
+ ret void
+}
+
+define void @vavgr_du(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavgr_du:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.d $vr0, $vr0, $vr1
+; CHECK-NEXT: vaddi.du $vr0, $vr0, 1
+; CHECK-NEXT: vsrli.d $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <2 x i64>, ptr %a
+ %vb = load <2 x i64>, ptr %b
+ %add = add <2 x i64> %va, %vb
+ %add1 = add <2 x i64> %add, <i64 1, i64 1>
+ %shr = lshr <2 x i64> %add1, <i64 1, i64 1>
+ store <2 x i64> %shr, ptr %res
+ ret void
+}