aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AArch64/sve2-bcax.ll
blob: c4a82e69a05ae074c541cb95caf7bb90d37fb363 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=aarch64 -mattr=+sve < %s -o - | FileCheck --check-prefix=SVE %s
; RUN: llc -mtriple=aarch64 -mattr=+sve2 < %s -o - | FileCheck --check-prefix=SVE2 %s

define <vscale x 2 x i64> @bcax_nxv2i64_1(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2) {
; SVE-LABEL: bcax_nxv2i64_1:
; SVE:       // %bb.0:
; SVE-NEXT:    bic z1.d, z2.d, z1.d
; SVE-NEXT:    eor z0.d, z1.d, z0.d
; SVE-NEXT:    ret
;
; SVE2-LABEL: bcax_nxv2i64_1:
; SVE2:       // %bb.0:
; SVE2-NEXT:    bcax z0.d, z0.d, z2.d, z1.d
; SVE2-NEXT:    ret
  %4 = xor <vscale x 2 x i64> %1, splat (i64 -1)
  %5 = and <vscale x 2 x i64> %4, %2
  %6 = xor <vscale x 2 x i64> %5, %0
  ret <vscale x 2 x i64> %6
}

define <vscale x 2 x i64> @bcax_nxv2i64_2(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2) {
; SVE-LABEL: bcax_nxv2i64_2:
; SVE:       // %bb.0:
; SVE-NEXT:    bic z0.d, z0.d, z1.d
; SVE-NEXT:    eor z0.d, z0.d, z2.d
; SVE-NEXT:    ret
;
; SVE2-LABEL: bcax_nxv2i64_2:
; SVE2:       // %bb.0:
; SVE2-NEXT:    bcax z2.d, z2.d, z0.d, z1.d
; SVE2-NEXT:    mov z0.d, z2.d
; SVE2-NEXT:    ret
  %4 = xor <vscale x 2 x i64> %1, splat (i64 -1)
  %5 = and <vscale x 2 x i64> %4, %0
  %6 = xor <vscale x 2 x i64> %5, %2
  ret <vscale x 2 x i64> %6
}

define <vscale x 4 x i32> @bcax_nxv4i32_1(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2) {
; SVE-LABEL: bcax_nxv4i32_1:
; SVE:       // %bb.0:
; SVE-NEXT:    bic z1.d, z2.d, z1.d
; SVE-NEXT:    eor z0.d, z1.d, z0.d
; SVE-NEXT:    ret
;
; SVE2-LABEL: bcax_nxv4i32_1:
; SVE2:       // %bb.0:
; SVE2-NEXT:    bcax z0.d, z0.d, z2.d, z1.d
; SVE2-NEXT:    ret
  %4 = xor <vscale x 4 x i32> %1, splat (i32 -1)
  %5 = and <vscale x 4 x i32> %4, %2
  %6 = xor <vscale x 4 x i32> %5, %0
  ret <vscale x 4 x i32> %6
}

define <vscale x 4 x i32> @bcax_nxv4i32_2(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2) {
; SVE-LABEL: bcax_nxv4i32_2:
; SVE:       // %bb.0:
; SVE-NEXT:    bic z0.d, z0.d, z1.d
; SVE-NEXT:    eor z0.d, z0.d, z2.d
; SVE-NEXT:    ret
;
; SVE2-LABEL: bcax_nxv4i32_2:
; SVE2:       // %bb.0:
; SVE2-NEXT:    bcax z2.d, z2.d, z0.d, z1.d
; SVE2-NEXT:    mov z0.d, z2.d
; SVE2-NEXT:    ret
  %4 = xor <vscale x 4 x i32> %1, splat (i32 -1)
  %5 = and <vscale x 4 x i32> %4, %0
  %6 = xor <vscale x 4 x i32> %5, %2
  ret <vscale x 4 x i32> %6
}

define <vscale x 8 x i16> @bcax_nxv8i16_1(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2) {
; SVE-LABEL: bcax_nxv8i16_1:
; SVE:       // %bb.0:
; SVE-NEXT:    bic z1.d, z2.d, z1.d
; SVE-NEXT:    eor z0.d, z1.d, z0.d
; SVE-NEXT:    ret
;
; SVE2-LABEL: bcax_nxv8i16_1:
; SVE2:       // %bb.0:
; SVE2-NEXT:    bcax z0.d, z0.d, z2.d, z1.d
; SVE2-NEXT:    ret
  %4 = xor <vscale x 8 x i16> %1, splat (i16 -1)
  %5 = and <vscale x 8 x i16> %4, %2
  %6 = xor <vscale x 8 x i16> %5, %0
  ret <vscale x 8 x i16> %6
}

define <vscale x 8 x i16> @bcax_nxv8i16_2(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2) {
; SVE-LABEL: bcax_nxv8i16_2:
; SVE:       // %bb.0:
; SVE-NEXT:    bic z0.d, z0.d, z1.d
; SVE-NEXT:    eor z0.d, z0.d, z2.d
; SVE-NEXT:    ret
;
; SVE2-LABEL: bcax_nxv8i16_2:
; SVE2:       // %bb.0:
; SVE2-NEXT:    bcax z2.d, z2.d, z0.d, z1.d
; SVE2-NEXT:    mov z0.d, z2.d
; SVE2-NEXT:    ret
  %4 = xor <vscale x 8 x i16> %1, splat (i16 -1)
  %5 = and <vscale x 8 x i16> %4, %0
  %6 = xor <vscale x 8 x i16> %5, %2
  ret <vscale x 8 x i16> %6
}

define <vscale x 16 x i8> @bcax_nxv16i8_1(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2) {
; SVE-LABEL: bcax_nxv16i8_1:
; SVE:       // %bb.0:
; SVE-NEXT:    bic z1.d, z2.d, z1.d
; SVE-NEXT:    eor z0.d, z1.d, z0.d
; SVE-NEXT:    ret
;
; SVE2-LABEL: bcax_nxv16i8_1:
; SVE2:       // %bb.0:
; SVE2-NEXT:    bcax z0.d, z0.d, z2.d, z1.d
; SVE2-NEXT:    ret
  %4 = xor <vscale x 16 x i8> %1, splat (i8 -1)
  %5 = and <vscale x 16 x i8> %4, %2
  %6 = xor <vscale x 16 x i8> %5, %0
  ret <vscale x 16 x i8> %6
}

define <vscale x 16 x i8> @bcax_nxv16i8_2(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2) {
; SVE-LABEL: bcax_nxv16i8_2:
; SVE:       // %bb.0:
; SVE-NEXT:    bic z0.d, z0.d, z1.d
; SVE-NEXT:    eor z0.d, z0.d, z2.d
; SVE-NEXT:    ret
;
; SVE2-LABEL: bcax_nxv16i8_2:
; SVE2:       // %bb.0:
; SVE2-NEXT:    bcax z2.d, z2.d, z0.d, z1.d
; SVE2-NEXT:    mov z0.d, z2.d
; SVE2-NEXT:    ret
  %4 = xor <vscale x 16 x i8> %1, splat (i8 -1)
  %5 = and <vscale x 16 x i8> %4, %0
  %6 = xor <vscale x 16 x i8> %5, %2
  ret <vscale x 16 x i8> %6
}