aboutsummaryrefslogtreecommitdiff
path: root/llvm/test
diff options
context:
space:
mode:
authortangaac <tangyan01@loongson.cn>2025-09-02 11:44:19 +0800
committerGitHub <noreply@github.com>2025-09-02 11:44:19 +0800
commit66ba9dc62ba647d5dcefdcab06f21d38f6b2dd5f (patch)
tree78d4de9f263bafebb1dd467ee672dff8bcdbaefa /llvm/test
parent8fdae0c7daf3831c48be4c69e799da627337bddd (diff)
downloadllvm-66ba9dc62ba647d5dcefdcab06f21d38f6b2dd5f.zip
llvm-66ba9dc62ba647d5dcefdcab06f21d38f6b2dd5f.tar.gz
llvm-66ba9dc62ba647d5dcefdcab06f21d38f6b2dd5f.tar.bz2
[LoongArch] Custom lower vecreduce. (#155196)
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/vec-reduce-and.ll80
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/vec-reduce-or.ll80
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smax.ll80
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smin.ll80
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umax.ll80
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umin.ll80
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/vec-reduce-xor.ll80
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/vec-reduce-and.ll76
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/vec-reduce-or.ll76
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smax.ll76
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smin.ll76
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umax.ll76
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umin.ll76
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/vec-reduce-xor.ll76
14 files changed, 490 insertions, 602 deletions
diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-and.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-and.ll
index a3160f10..fd64bea 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-and.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-and.ll
@@ -5,22 +5,17 @@ define void @vec_reduce_and_v32i8(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_and_v32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.b $xr1, $xr1, 228
-; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvsrli.d $xr1, $xr1, 32
-; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.b $xr1, $xr1, 14
-; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.b $xr1, $xr1, 1
-; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <32 x i8>, ptr %src
%res = call i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %v)
@@ -32,19 +27,15 @@ define void @vec_reduce_and_v16i16(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_and_v16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.h $xr1, $xr1, 228
-; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.h $xr1, $xr1, 14
-; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.h $xr1, $xr1, 1
-; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <16 x i16>, ptr %src
%res = call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %v)
@@ -56,16 +47,13 @@ define void @vec_reduce_and_v8i32(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_and_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.w $xr1, $xr1, 228
-; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.w $xr1, $xr1, 14
-; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.w $xr1, $xr1, 1
-; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i32>, ptr %src
%res = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %v)
@@ -77,15 +65,11 @@ define void @vec_reduce_and_v4i64(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_and_v4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
-; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
-; CHECK-NEXT: xvpermi.d $xr2, $xr0, 78
-; CHECK-NEXT: xvshuf.d $xr1, $xr0, $xr2
-; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.d $xr1, $xr1, 1
-; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i64>, ptr %src
%res = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> %v)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-or.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-or.ll
index bc910c2..cdb08d9 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-or.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-or.ll
@@ -5,22 +5,17 @@ define void @vec_reduce_or_v32i8(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_or_v32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.b $xr1, $xr1, 228
-; CHECK-NEXT: xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT: xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvsrli.d $xr1, $xr1, 32
-; CHECK-NEXT: xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.b $xr1, $xr1, 14
-; CHECK-NEXT: xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.b $xr1, $xr1, 1
-; CHECK-NEXT: xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <32 x i8>, ptr %src
%res = call i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %v)
@@ -32,19 +27,15 @@ define void @vec_reduce_or_v16i16(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_or_v16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.h $xr1, $xr1, 228
-; CHECK-NEXT: xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT: xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.h $xr1, $xr1, 14
-; CHECK-NEXT: xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.h $xr1, $xr1, 1
-; CHECK-NEXT: xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <16 x i16>, ptr %src
%res = call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %v)
@@ -56,16 +47,13 @@ define void @vec_reduce_or_v8i32(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_or_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.w $xr1, $xr1, 228
-; CHECK-NEXT: xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.w $xr1, $xr1, 14
-; CHECK-NEXT: xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.w $xr1, $xr1, 1
-; CHECK-NEXT: xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i32>, ptr %src
%res = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %v)
@@ -77,15 +65,11 @@ define void @vec_reduce_or_v4i64(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_or_v4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
-; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
-; CHECK-NEXT: xvpermi.d $xr2, $xr0, 78
-; CHECK-NEXT: xvshuf.d $xr1, $xr0, $xr2
-; CHECK-NEXT: xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.d $xr1, $xr1, 1
-; CHECK-NEXT: xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i64>, ptr %src
%res = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> %v)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smax.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smax.ll
index 378088c..1d18273 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smax.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smax.ll
@@ -5,22 +5,17 @@ define void @vec_reduce_smax_v32i8(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_smax_v32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.b $xr1, $xr1, 228
-; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvsrli.d $xr1, $xr1, 32
-; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.b $xr1, $xr1, 14
-; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.b $xr1, $xr1, 1
-; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <32 x i8>, ptr %src
%res = call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %v)
@@ -32,19 +27,15 @@ define void @vec_reduce_smax_v16i16(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_smax_v16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.h $xr1, $xr1, 228
-; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.h $xr1, $xr1, 14
-; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.h $xr1, $xr1, 1
-; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmax.h $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmax.h $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmax.h $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <16 x i16>, ptr %src
%res = call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %v)
@@ -56,16 +47,13 @@ define void @vec_reduce_smax_v8i32(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_smax_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.w $xr1, $xr1, 228
-; CHECK-NEXT: xvmax.w $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.w $xr1, $xr1, 14
-; CHECK-NEXT: xvmax.w $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.w $xr1, $xr1, 1
-; CHECK-NEXT: xvmax.w $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vmax.w $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmax.w $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmax.w $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i32>, ptr %src
%res = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> %v)
@@ -77,15 +65,11 @@ define void @vec_reduce_smax_v4i64(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_smax_v4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
-; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
-; CHECK-NEXT: xvpermi.d $xr2, $xr0, 78
-; CHECK-NEXT: xvshuf.d $xr1, $xr0, $xr2
-; CHECK-NEXT: xvmax.d $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.d $xr1, $xr1, 1
-; CHECK-NEXT: xvmax.d $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vmax.d $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmax.d $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i64>, ptr %src
%res = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> %v)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smin.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smin.ll
index 1c7f205..369afdd 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smin.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smin.ll
@@ -5,22 +5,17 @@ define void @vec_reduce_smin_v32i8(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_smin_v32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.b $xr1, $xr1, 228
-; CHECK-NEXT: xvmin.b $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT: xvmin.b $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvsrli.d $xr1, $xr1, 32
-; CHECK-NEXT: xvmin.b $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.b $xr1, $xr1, 14
-; CHECK-NEXT: xvmin.b $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.b $xr1, $xr1, 1
-; CHECK-NEXT: xvmin.b $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <32 x i8>, ptr %src
%res = call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %v)
@@ -32,19 +27,15 @@ define void @vec_reduce_smin_v16i16(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_smin_v16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.h $xr1, $xr1, 228
-; CHECK-NEXT: xvmin.h $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT: xvmin.h $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.h $xr1, $xr1, 14
-; CHECK-NEXT: xvmin.h $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.h $xr1, $xr1, 1
-; CHECK-NEXT: xvmin.h $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vmin.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmin.h $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmin.h $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmin.h $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <16 x i16>, ptr %src
%res = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %v)
@@ -56,16 +47,13 @@ define void @vec_reduce_smin_v8i32(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_smin_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.w $xr1, $xr1, 228
-; CHECK-NEXT: xvmin.w $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.w $xr1, $xr1, 14
-; CHECK-NEXT: xvmin.w $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.w $xr1, $xr1, 1
-; CHECK-NEXT: xvmin.w $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vmin.w $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmin.w $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmin.w $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i32>, ptr %src
%res = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %v)
@@ -77,15 +65,11 @@ define void @vec_reduce_smin_v4i64(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_smin_v4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
-; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
-; CHECK-NEXT: xvpermi.d $xr2, $xr0, 78
-; CHECK-NEXT: xvshuf.d $xr1, $xr0, $xr2
-; CHECK-NEXT: xvmin.d $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.d $xr1, $xr1, 1
-; CHECK-NEXT: xvmin.d $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vmin.d $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmin.d $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i64>, ptr %src
%res = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> %v)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umax.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umax.ll
index 152f093..5256a72 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umax.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umax.ll
@@ -5,22 +5,17 @@ define void @vec_reduce_umax_v32i8(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_umax_v32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.b $xr1, $xr1, 228
-; CHECK-NEXT: xvmax.bu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT: xvmax.bu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvsrli.d $xr1, $xr1, 32
-; CHECK-NEXT: xvmax.bu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.b $xr1, $xr1, 14
-; CHECK-NEXT: xvmax.bu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.b $xr1, $xr1, 1
-; CHECK-NEXT: xvmax.bu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <32 x i8>, ptr %src
%res = call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %v)
@@ -32,19 +27,15 @@ define void @vec_reduce_umax_v16i16(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_umax_v16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.h $xr1, $xr1, 228
-; CHECK-NEXT: xvmax.hu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT: xvmax.hu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.h $xr1, $xr1, 14
-; CHECK-NEXT: xvmax.hu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.h $xr1, $xr1, 1
-; CHECK-NEXT: xvmax.hu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmax.hu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmax.hu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmax.hu $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <16 x i16>, ptr %src
%res = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %v)
@@ -56,16 +47,13 @@ define void @vec_reduce_umax_v8i32(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_umax_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.w $xr1, $xr1, 228
-; CHECK-NEXT: xvmax.wu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.w $xr1, $xr1, 14
-; CHECK-NEXT: xvmax.wu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.w $xr1, $xr1, 1
-; CHECK-NEXT: xvmax.wu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vmax.wu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmax.wu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmax.wu $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i32>, ptr %src
%res = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %v)
@@ -77,15 +65,11 @@ define void @vec_reduce_umax_v4i64(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_umax_v4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
-; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
-; CHECK-NEXT: xvpermi.d $xr2, $xr0, 78
-; CHECK-NEXT: xvshuf.d $xr1, $xr0, $xr2
-; CHECK-NEXT: xvmax.du $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.d $xr1, $xr1, 1
-; CHECK-NEXT: xvmax.du $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vmax.du $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmax.du $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i64>, ptr %src
%res = call i64 @llvm.vector.reduce.umax.v4i64(<4 x i64> %v)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umin.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umin.ll
index 64ed377..a82c886 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umin.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umin.ll
@@ -5,22 +5,17 @@ define void @vec_reduce_umin_v32i8(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_umin_v32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.b $xr1, $xr1, 228
-; CHECK-NEXT: xvmin.bu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT: xvmin.bu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvsrli.d $xr1, $xr1, 32
-; CHECK-NEXT: xvmin.bu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.b $xr1, $xr1, 14
-; CHECK-NEXT: xvmin.bu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.b $xr1, $xr1, 1
-; CHECK-NEXT: xvmin.bu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <32 x i8>, ptr %src
%res = call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %v)
@@ -32,19 +27,15 @@ define void @vec_reduce_umin_v16i16(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_umin_v16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.h $xr1, $xr1, 228
-; CHECK-NEXT: xvmin.hu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT: xvmin.hu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.h $xr1, $xr1, 14
-; CHECK-NEXT: xvmin.hu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.h $xr1, $xr1, 1
-; CHECK-NEXT: xvmin.hu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vmin.hu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmin.hu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmin.hu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmin.hu $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <16 x i16>, ptr %src
%res = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %v)
@@ -56,16 +47,13 @@ define void @vec_reduce_umin_v8i32(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_umin_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.w $xr1, $xr1, 228
-; CHECK-NEXT: xvmin.wu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.w $xr1, $xr1, 14
-; CHECK-NEXT: xvmin.wu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.w $xr1, $xr1, 1
-; CHECK-NEXT: xvmin.wu $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vmin.wu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmin.wu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmin.wu $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i32>, ptr %src
%res = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %v)
@@ -77,15 +65,11 @@ define void @vec_reduce_umin_v4i64(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_umin_v4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
-; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
-; CHECK-NEXT: xvpermi.d $xr2, $xr0, 78
-; CHECK-NEXT: xvshuf.d $xr1, $xr0, $xr2
-; CHECK-NEXT: xvmin.du $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.d $xr1, $xr1, 1
-; CHECK-NEXT: xvmin.du $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vmin.du $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmin.du $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i64>, ptr %src
%res = call i64 @llvm.vector.reduce.umin.v4i64(<4 x i64> %v)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-xor.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-xor.ll
index 5dbf37e..429fadc 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-xor.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-xor.ll
@@ -5,22 +5,17 @@ define void @vec_reduce_xor_v32i8(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_xor_v32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.b $xr1, $xr1, 228
-; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvsrli.d $xr1, $xr1, 32
-; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.b $xr1, $xr1, 14
-; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.b $xr1, $xr1, 1
-; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <32 x i8>, ptr %src
%res = call i8 @llvm.vector.reduce.xor.v32i8(<32 x i8> %v)
@@ -32,19 +27,15 @@ define void @vec_reduce_xor_v16i16(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_xor_v16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.h $xr1, $xr1, 228
-; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.h $xr1, $xr1, 14
-; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.h $xr1, $xr1, 1
-; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <16 x i16>, ptr %src
%res = call i16 @llvm.vector.reduce.xor.v16i16(<16 x i16> %v)
@@ -56,16 +47,13 @@ define void @vec_reduce_xor_v8i32(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_xor_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT: xvshuf4i.w $xr1, $xr1, 228
-; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvshuf4i.w $xr1, $xr1, 14
-; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.w $xr1, $xr1, 1
-; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i32>, ptr %src
%res = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %v)
@@ -77,15 +65,11 @@ define void @vec_reduce_xor_v4i64(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_xor_v4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
-; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
-; CHECK-NEXT: xvpermi.d $xr2, $xr0, 78
-; CHECK-NEXT: xvshuf.d $xr1, $xr0, $xr2
-; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT: xvrepl128vei.d $xr1, $xr1, 1
-; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT: xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i64>, ptr %src
%res = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> %v)
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-and.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-and.ll
index c16de10..cca4ce30 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-and.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-and.ll
@@ -6,13 +6,13 @@ define void @vec_reduce_and_v16i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <16 x i8>, ptr %src
@@ -26,12 +26,12 @@ define void @vec_reduce_and_v8i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i8>, ptr %src
@@ -45,10 +45,10 @@ define void @vec_reduce_and_v4i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i8>, ptr %src
@@ -62,8 +62,8 @@ define void @vec_reduce_and_v2i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.h $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i8>, ptr %src
@@ -77,11 +77,11 @@ define void @vec_reduce_and_v8i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i16>, ptr %src
@@ -95,10 +95,10 @@ define void @vec_reduce_and_v4i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i16>, ptr %src
@@ -112,8 +112,8 @@ define void @vec_reduce_and_v2i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i16>, ptr %src
@@ -126,10 +126,10 @@ define void @vec_reduce_and_v4i32(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_and_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.w $vr1, $vr0, 14
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i32>, ptr %src
@@ -143,8 +143,8 @@ define void @vec_reduce_and_v2i32(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i32>, ptr %src
@@ -157,8 +157,8 @@ define void @vec_reduce_and_v2i64(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_and_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.d $vr1, $vr0, 1
-; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vand.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i64>, ptr %src
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-or.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-or.ll
index 52f18cc..ce431f0 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-or.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-or.ll
@@ -6,13 +6,13 @@ define void @vec_reduce_or_v16i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <16 x i8>, ptr %src
@@ -26,12 +26,12 @@ define void @vec_reduce_or_v8i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i8>, ptr %src
@@ -45,10 +45,10 @@ define void @vec_reduce_or_v4i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i8>, ptr %src
@@ -62,8 +62,8 @@ define void @vec_reduce_or_v2i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.h $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i8>, ptr %src
@@ -77,11 +77,11 @@ define void @vec_reduce_or_v8i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i16>, ptr %src
@@ -95,10 +95,10 @@ define void @vec_reduce_or_v4i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i16>, ptr %src
@@ -112,8 +112,8 @@ define void @vec_reduce_or_v2i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i16>, ptr %src
@@ -126,10 +126,10 @@ define void @vec_reduce_or_v4i32(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_or_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.w $vr1, $vr0, 14
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i32>, ptr %src
@@ -143,8 +143,8 @@ define void @vec_reduce_or_v2i32(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i32>, ptr %src
@@ -157,8 +157,8 @@ define void @vec_reduce_or_v2i64(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_or_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.d $vr1, $vr0, 1
-; CHECK-NEXT: vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i64>, ptr %src
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smax.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smax.ll
index 5d8c3e3..bdf153a 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smax.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smax.ll
@@ -6,13 +6,13 @@ define void @vec_reduce_smax_v16i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1
-; CHECK-NEXT: vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmax.b $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <16 x i8>, ptr %src
@@ -26,12 +26,12 @@ define void @vec_reduce_smax_v8i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmax.b $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i8>, ptr %src
@@ -45,10 +45,10 @@ define void @vec_reduce_smax_v4i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmax.b $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i8>, ptr %src
@@ -62,8 +62,8 @@ define void @vec_reduce_smax_v2i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.h $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmax.b $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i8>, ptr %src
@@ -77,11 +77,11 @@ define void @vec_reduce_smax_v8i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT: vmax.h $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT: vmax.h $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vmax.h $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmax.h $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmax.h $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i16>, ptr %src
@@ -95,10 +95,10 @@ define void @vec_reduce_smax_v4i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT: vmax.h $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmax.h $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmax.h $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i16>, ptr %src
@@ -112,8 +112,8 @@ define void @vec_reduce_smax_v2i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmax.h $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i16>, ptr %src
@@ -126,10 +126,10 @@ define void @vec_reduce_smax_v4i32(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_smax_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.w $vr1, $vr0, 14
-; CHECK-NEXT: vmax.w $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT: vmax.w $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmax.w $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmax.w $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i32>, ptr %src
@@ -143,8 +143,8 @@ define void @vec_reduce_smax_v2i32(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT: vmax.w $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmax.w $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i32>, ptr %src
@@ -157,8 +157,8 @@ define void @vec_reduce_smax_v2i64(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_smax_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.d $vr1, $vr0, 1
-; CHECK-NEXT: vmax.d $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmax.d $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i64>, ptr %src
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smin.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smin.ll
index 2d53095..e3b3c5e 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smin.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smin.ll
@@ -6,13 +6,13 @@ define void @vec_reduce_smin_v16i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT: vmin.b $vr0, $vr0, $vr1
-; CHECK-NEXT: vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT: vmin.b $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vmin.b $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmin.b $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <16 x i8>, ptr %src
@@ -26,12 +26,12 @@ define void @vec_reduce_smin_v8i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT: vmin.b $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vmin.b $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmin.b $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i8>, ptr %src
@@ -45,10 +45,10 @@ define void @vec_reduce_smin_v4i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vmin.b $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmin.b $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i8>, ptr %src
@@ -62,8 +62,8 @@ define void @vec_reduce_smin_v2i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.h $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmin.b $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i8>, ptr %src
@@ -77,11 +77,11 @@ define void @vec_reduce_smin_v8i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT: vmin.h $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT: vmin.h $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vmin.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vmin.h $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmin.h $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmin.h $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i16>, ptr %src
@@ -95,10 +95,10 @@ define void @vec_reduce_smin_v4i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT: vmin.h $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vmin.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmin.h $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmin.h $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i16>, ptr %src
@@ -112,8 +112,8 @@ define void @vec_reduce_smin_v2i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vmin.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmin.h $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i16>, ptr %src
@@ -126,10 +126,10 @@ define void @vec_reduce_smin_v4i32(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_smin_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.w $vr1, $vr0, 14
-; CHECK-NEXT: vmin.w $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT: vmin.w $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmin.w $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmin.w $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i32>, ptr %src
@@ -143,8 +143,8 @@ define void @vec_reduce_smin_v2i32(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT: vmin.w $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmin.w $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i32>, ptr %src
@@ -157,8 +157,8 @@ define void @vec_reduce_smin_v2i64(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_smin_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.d $vr1, $vr0, 1
-; CHECK-NEXT: vmin.d $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmin.d $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i64>, ptr %src
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umax.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umax.ll
index abe9ba7..fff2304 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umax.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umax.ll
@@ -6,13 +6,13 @@ define void @vec_reduce_umax_v16i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
-; CHECK-NEXT: vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT: vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmax.bu $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <16 x i8>, ptr %src
@@ -26,12 +26,12 @@ define void @vec_reduce_umax_v8i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmax.bu $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i8>, ptr %src
@@ -45,10 +45,10 @@ define void @vec_reduce_umax_v4i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmax.bu $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i8>, ptr %src
@@ -62,8 +62,8 @@ define void @vec_reduce_umax_v2i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.h $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmax.bu $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i8>, ptr %src
@@ -77,11 +77,11 @@ define void @vec_reduce_umax_v8i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT: vmax.hu $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT: vmax.hu $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT: vmax.hu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmax.hu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmax.hu $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i16>, ptr %src
@@ -95,10 +95,10 @@ define void @vec_reduce_umax_v4i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT: vmax.hu $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmax.hu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmax.hu $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i16>, ptr %src
@@ -112,8 +112,8 @@ define void @vec_reduce_umax_v2i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmax.hu $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i16>, ptr %src
@@ -126,10 +126,10 @@ define void @vec_reduce_umax_v4i32(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_umax_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.w $vr1, $vr0, 14
-; CHECK-NEXT: vmax.wu $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT: vmax.wu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmax.wu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmax.wu $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i32>, ptr %src
@@ -143,8 +143,8 @@ define void @vec_reduce_umax_v2i32(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT: vmax.wu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmax.wu $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i32>, ptr %src
@@ -157,8 +157,8 @@ define void @vec_reduce_umax_v2i64(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_umax_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.d $vr1, $vr0, 1
-; CHECK-NEXT: vmax.du $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmax.du $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i64>, ptr %src
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umin.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umin.ll
index 3d396f36..e14a294 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umin.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umin.ll
@@ -6,13 +6,13 @@ define void @vec_reduce_umin_v16i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT: vmin.bu $vr0, $vr0, $vr1
-; CHECK-NEXT: vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT: vmin.bu $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vmin.bu $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <16 x i8>, ptr %src
@@ -26,12 +26,12 @@ define void @vec_reduce_umin_v8i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT: vmin.bu $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vmin.bu $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i8>, ptr %src
@@ -45,10 +45,10 @@ define void @vec_reduce_umin_v4i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vmin.bu $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i8>, ptr %src
@@ -62,8 +62,8 @@ define void @vec_reduce_umin_v2i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.h $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i8>, ptr %src
@@ -77,11 +77,11 @@ define void @vec_reduce_umin_v8i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT: vmin.hu $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT: vmin.hu $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vmin.hu $vr0, $vr0, $vr1
+; CHECK-NEXT: vmin.hu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmin.hu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmin.hu $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i16>, ptr %src
@@ -95,10 +95,10 @@ define void @vec_reduce_umin_v4i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT: vmin.hu $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vmin.hu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmin.hu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmin.hu $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i16>, ptr %src
@@ -112,8 +112,8 @@ define void @vec_reduce_umin_v2i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vmin.hu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vmin.hu $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i16>, ptr %src
@@ -126,10 +126,10 @@ define void @vec_reduce_umin_v4i32(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_umin_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.w $vr1, $vr0, 14
-; CHECK-NEXT: vmin.wu $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT: vmin.wu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmin.wu $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmin.wu $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i32>, ptr %src
@@ -143,8 +143,8 @@ define void @vec_reduce_umin_v2i32(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT: vmin.wu $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vmin.wu $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i32>, ptr %src
@@ -157,8 +157,8 @@ define void @vec_reduce_umin_v2i64(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_umin_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.d $vr1, $vr0, 1
-; CHECK-NEXT: vmin.du $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vmin.du $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i64>, ptr %src
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-xor.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-xor.ll
index 1894532..ae2bb8f 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-xor.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-xor.ll
@@ -6,13 +6,13 @@ define void @vec_reduce_xor_v16i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <16 x i8>, ptr %src
@@ -26,12 +26,12 @@ define void @vec_reduce_xor_v8i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i8>, ptr %src
@@ -45,10 +45,10 @@ define void @vec_reduce_xor_v4i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i8>, ptr %src
@@ -62,8 +62,8 @@ define void @vec_reduce_xor_v2i8(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.h $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i8>, ptr %src
@@ -77,11 +77,11 @@ define void @vec_reduce_xor_v8i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <8 x i16>, ptr %src
@@ -95,10 +95,10 @@ define void @vec_reduce_xor_v4i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i16>, ptr %src
@@ -112,8 +112,8 @@ define void @vec_reduce_xor_v2i16(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i16>, ptr %src
@@ -126,10 +126,10 @@ define void @vec_reduce_xor_v4i32(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_xor_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
-; CHECK-NEXT: vshuf4i.w $vr1, $vr0, 14
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT: vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <4 x i32>, ptr %src
@@ -143,8 +143,8 @@ define void @vec_reduce_xor_v2i32(ptr %src, ptr %dst) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i32>, ptr %src
@@ -157,8 +157,8 @@ define void @vec_reduce_xor_v2i64(ptr %src, ptr %dst) nounwind {
; CHECK-LABEL: vec_reduce_xor_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a0, 0
-; CHECK-NEXT: vreplvei.d $vr1, $vr0, 1
-; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0
; CHECK-NEXT: ret
%v = load <2 x i64>, ptr %src