1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=riscv32 -mattr=+v -o - %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+v -o - %s | FileCheck %s
define <4 x i32> @partial_reduce_add_v4i32_v4i32(<4 x i32> %accumulator, <4 x i32> %0) {
; CHECK-LABEL: partial_reduce_add_v4i32_v4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
%partial.reduce = call <4 x i32> @llvm.vector.partial.reduce.add(<4 x i32> %accumulator, <4 x i32> %0)
ret <4 x i32> %partial.reduce
}
define <4 x i32> @partial_reduce_add_v4i32_v8i32(<4 x i32> %accumulator, <8 x i32> %0) {
; CHECK-LABEL: partial_reduce_add_v4i32_v8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vadd.vv v12, v8, v10
; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v10, 4
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
%partial.reduce = call <4 x i32> @llvm.vector.partial.reduce.add(<4 x i32> %accumulator, <8 x i32> %0)
ret <4 x i32> %partial.reduce
}
define <vscale x 4 x i32> @partial_reduce_add_nvx4i32_nvx4i32(<vscale x 4 x i32> %accumulator, <vscale x 4 x i32> %0) {
; CHECK-LABEL: partial_reduce_add_nvx4i32_nvx4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
%partial.reduce = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add(<vscale x 4 x i32> %accumulator, <vscale x 4 x i32> %0)
ret <vscale x 4 x i32> %partial.reduce
}
define <vscale x 4 x i32> @partial_reduce_add_nvx4i32_nvx8i32(<vscale x 4 x i32> %accumulator, <vscale x 8 x i32> %0) {
; CHECK-LABEL: partial_reduce_add_nvx4i32_nvx8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v12
; CHECK-NEXT: vadd.vv v8, v14, v8
; CHECK-NEXT: ret
entry:
%partial.reduce = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add(<vscale x 4 x i32> %accumulator, <vscale x 8 x i32> %0)
ret <vscale x 4 x i32> %partial.reduce
}
define <vscale x 4 x i32> @partial_reduce_add_nvx4i32_nvx16i32(<vscale x 4 x i32> %accumulator, <vscale x 16 x i32> %0) {
; CHECK-LABEL: partial_reduce_add_nvx4i32_nvx16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; CHECK-NEXT: vadd.vv v10, v18, v20
; CHECK-NEXT: vadd.vv v8, v8, v16
; CHECK-NEXT: vadd.vv v8, v22, v8
; CHECK-NEXT: vadd.vv v8, v10, v8
; CHECK-NEXT: ret
entry:
%partial.reduce = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add(<vscale x 4 x i32> %accumulator, <vscale x 16 x i32> %0)
ret <vscale x 4 x i32> %partial.reduce
}
define <vscale x 8 x i32> @partial_reduce_add_nvx8i32_nvx16i32(<vscale x 8 x i32> %accumulator, <vscale x 16 x i32> %0) {
; CHECK-LABEL: partial_reduce_add_nvx8i32_nvx16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v16
; CHECK-NEXT: vadd.vv v8, v20, v8
; CHECK-NEXT: ret
entry:
%partial.reduce = call <vscale x 8 x i32> @llvm.vector.partial.reduce.add(<vscale x 8 x i32> %accumulator, <vscale x 16 x i32> %0)
ret <vscale x 8 x i32> %partial.reduce
}
|