aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vw-web-simplification.ll
blob: b9466e9f7cc15c769be720c4659f8dc96f7d083d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=1 | FileCheck %s --check-prefixes=NO_FOLDING1
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=1 | FileCheck %s --check-prefixes=NO_FOLDING1
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=2 | FileCheck %s --check-prefixes=NO_FOLDING2
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=2 | FileCheck %s --check-prefixes=NO_FOLDING2
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=3 | FileCheck %s --check-prefixes=FOLDING
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=3 | FileCheck %s --check-prefixes=FOLDING
; Check that the default value enables the web folding and
; that it is bigger than 3.
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=FOLDING
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=FOLDING


; Check that the add/sub/mul operations are all promoted into their
; vw counterpart when the folding of the web size is increased to 3.
; We need the web size to be at least 3 for the folding to happen, because
; %c has 3 uses.
define <2 x i16> @vwmul_v2i16_multiple_users(ptr %x, ptr %y, ptr %z) {
; NO_FOLDING1-LABEL: vwmul_v2i16_multiple_users:
; NO_FOLDING1:       # %bb.0:
; NO_FOLDING1-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; NO_FOLDING1-NEXT:    vle8.v v8, (a0)
; NO_FOLDING1-NEXT:    vle8.v v9, (a1)
; NO_FOLDING1-NEXT:    vle8.v v10, (a2)
; NO_FOLDING1-NEXT:    vsext.vf2 v11, v8
; NO_FOLDING1-NEXT:    vsext.vf2 v8, v9
; NO_FOLDING1-NEXT:    vsext.vf2 v9, v10
; NO_FOLDING1-NEXT:    vmul.vv v8, v11, v8
; NO_FOLDING1-NEXT:    vadd.vv v10, v11, v9
; NO_FOLDING1-NEXT:    vsub.vv v9, v11, v9
; NO_FOLDING1-NEXT:    vor.vv v8, v8, v10
; NO_FOLDING1-NEXT:    vor.vv v8, v8, v9
; NO_FOLDING1-NEXT:    ret
;
; NO_FOLDING2-LABEL: vwmul_v2i16_multiple_users:
; NO_FOLDING2:       # %bb.0:
; NO_FOLDING2-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; NO_FOLDING2-NEXT:    vle8.v v8, (a0)
; NO_FOLDING2-NEXT:    vle8.v v9, (a1)
; NO_FOLDING2-NEXT:    vle8.v v10, (a2)
; NO_FOLDING2-NEXT:    vsext.vf2 v11, v8
; NO_FOLDING2-NEXT:    vsext.vf2 v8, v9
; NO_FOLDING2-NEXT:    vmul.vv v8, v11, v8
; NO_FOLDING2-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
; NO_FOLDING2-NEXT:    vwadd.wv v9, v11, v10
; NO_FOLDING2-NEXT:    vwsub.wv v11, v11, v10
; NO_FOLDING2-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
; NO_FOLDING2-NEXT:    vor.vv v8, v8, v9
; NO_FOLDING2-NEXT:    vor.vv v8, v8, v11
; NO_FOLDING2-NEXT:    ret
;
; FOLDING-LABEL: vwmul_v2i16_multiple_users:
; FOLDING:       # %bb.0:
; FOLDING-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
; FOLDING-NEXT:    vle8.v v8, (a0)
; FOLDING-NEXT:    vle8.v v9, (a1)
; FOLDING-NEXT:    vle8.v v10, (a2)
; FOLDING-NEXT:    vwmul.vv v11, v8, v9
; FOLDING-NEXT:    vwadd.vv v9, v8, v10
; FOLDING-NEXT:    vwsub.vv v12, v8, v10
; FOLDING-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
; FOLDING-NEXT:    vor.vv v8, v11, v9
; FOLDING-NEXT:    vor.vv v8, v8, v12
; FOLDING-NEXT:    ret
  %a = load <2 x i8>, ptr %x
  %b = load <2 x i8>, ptr %y
  %b2 = load <2 x i8>, ptr %z
  %c = sext <2 x i8> %a to <2 x i16>
  %d = sext <2 x i8> %b to <2 x i16>
  %d2 = sext <2 x i8> %b2 to <2 x i16>
  %e = mul <2 x i16> %c, %d
  %f = add <2 x i16> %c, %d2
  %g = sub <2 x i16> %c, %d2
  %h = or <2 x i16> %e, %f
  %i = or <2 x i16> %h, %g
  ret <2 x i16> %i
}

; Make sure we have a vsext.vl and a vwaddu.vx.
define <4 x i32> @pr159152(<4 x i8> %x) {
; NO_FOLDING-LABEL: pr159152:
; NO_FOLDING:       # %bb.0:
; NO_FOLDING-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
; NO_FOLDING-NEXT:    vsext.vf2 v9, v8
; NO_FOLDING-NEXT:    li a0, 9
; NO_FOLDING-NEXT:    vwaddu.vx v8, v9, a0
; NO_FOLDING-NEXT:    ret
;
; FOLDING-LABEL: pr159152:
; FOLDING:       # %bb.0:
; FOLDING-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
; FOLDING-NEXT:    vsext.vf2 v9, v8
; FOLDING-NEXT:    li a0, 9
; FOLDING-NEXT:    vwaddu.vx v8, v9, a0
; FOLDING-NEXT:    ret
  %a = sext <4 x i8> %x to <4 x i16>
  %b = zext <4 x i16> %a to <4 x i32>
  %c = add <4 x i32> %b, <i32 9, i32 9, i32 9, i32 9>
  ret <4 x i32> %c
}