aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/RISCV/reduction-formation.ll
blob: ced3a38ab5ea00bc8e9d597cdc4f5fab86639b9d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64

; Negative test to ensure we don't try to generate a vector reduce when
; vector instructions are not available.

define i32 @reduce_sum_4xi32(<4 x i32> %v) {
; RV32-LABEL: reduce_sum_4xi32:
; RV32:       # %bb.0:
; RV32-NEXT:    lw a1, 0(a0)
; RV32-NEXT:    lw a2, 4(a0)
; RV32-NEXT:    lw a3, 8(a0)
; RV32-NEXT:    lw a0, 12(a0)
; RV32-NEXT:    add a1, a1, a2
; RV32-NEXT:    add a0, a3, a0
; RV32-NEXT:    add a0, a1, a0
; RV32-NEXT:    ret
;
; RV64-LABEL: reduce_sum_4xi32:
; RV64:       # %bb.0:
; RV64-NEXT:    lw a1, 0(a0)
; RV64-NEXT:    lw a2, 8(a0)
; RV64-NEXT:    lw a3, 16(a0)
; RV64-NEXT:    lw a0, 24(a0)
; RV64-NEXT:    add a1, a1, a2
; RV64-NEXT:    add a0, a3, a0
; RV64-NEXT:    addw a0, a1, a0
; RV64-NEXT:    ret
  %e0 = extractelement <4 x i32> %v, i32 0
  %e1 = extractelement <4 x i32> %v, i32 1
  %e2 = extractelement <4 x i32> %v, i32 2
  %e3 = extractelement <4 x i32> %v, i32 3
  %add0 = add i32 %e0, %e1
  %add1 = add i32 %add0, %e2
  %add2 = add i32 %add1, %e3
  ret i32 %add2
}

define i32 @reduce_xor_4xi32(<4 x i32> %v) {
; RV32-LABEL: reduce_xor_4xi32:
; RV32:       # %bb.0:
; RV32-NEXT:    lw a1, 0(a0)
; RV32-NEXT:    lw a2, 4(a0)
; RV32-NEXT:    lw a3, 8(a0)
; RV32-NEXT:    lw a0, 12(a0)
; RV32-NEXT:    xor a1, a1, a2
; RV32-NEXT:    xor a0, a3, a0
; RV32-NEXT:    xor a0, a1, a0
; RV32-NEXT:    ret
;
; RV64-LABEL: reduce_xor_4xi32:
; RV64:       # %bb.0:
; RV64-NEXT:    ld a1, 0(a0)
; RV64-NEXT:    ld a2, 8(a0)
; RV64-NEXT:    ld a3, 16(a0)
; RV64-NEXT:    ld a0, 24(a0)
; RV64-NEXT:    xor a1, a1, a2
; RV64-NEXT:    xor a0, a3, a0
; RV64-NEXT:    xor a0, a1, a0
; RV64-NEXT:    ret
  %e0 = extractelement <4 x i32> %v, i32 0
  %e1 = extractelement <4 x i32> %v, i32 1
  %e2 = extractelement <4 x i32> %v, i32 2
  %e3 = extractelement <4 x i32> %v, i32 3
  %xor0 = xor i32 %e0, %e1
  %xor1 = xor i32 %xor0, %e2
  %xor2 = xor i32 %xor1, %e3
  ret i32 %xor2
}

define i32 @reduce_or_4xi32(<4 x i32> %v) {
; RV32-LABEL: reduce_or_4xi32:
; RV32:       # %bb.0:
; RV32-NEXT:    lw a1, 0(a0)
; RV32-NEXT:    lw a2, 4(a0)
; RV32-NEXT:    lw a3, 8(a0)
; RV32-NEXT:    lw a0, 12(a0)
; RV32-NEXT:    or a1, a1, a2
; RV32-NEXT:    or a0, a3, a0
; RV32-NEXT:    or a0, a1, a0
; RV32-NEXT:    ret
;
; RV64-LABEL: reduce_or_4xi32:
; RV64:       # %bb.0:
; RV64-NEXT:    ld a1, 0(a0)
; RV64-NEXT:    ld a2, 8(a0)
; RV64-NEXT:    ld a3, 16(a0)
; RV64-NEXT:    ld a0, 24(a0)
; RV64-NEXT:    or a1, a1, a2
; RV64-NEXT:    or a0, a3, a0
; RV64-NEXT:    or a0, a1, a0
; RV64-NEXT:    ret
  %e0 = extractelement <4 x i32> %v, i32 0
  %e1 = extractelement <4 x i32> %v, i32 1
  %e2 = extractelement <4 x i32> %v, i32 2
  %e3 = extractelement <4 x i32> %v, i32 3
  %or0 = or i32 %e0, %e1
  %or1 = or i32 %or0, %e2
  %or2 = or i32 %or1, %e3
  ret i32 %or2
}