aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AArch64/aarch64-matmul.ll
blob: 649d0a9bfcab479ceac8355d0de7f1077136f712 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon,+i8mm < %s -o -| FileCheck %s

define <4 x i32> @smmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) {
entry:
; CHECK-LABEL: smmla.v4i32.v16i8
; CHECK: smmla   v0.4s, v1.16b, v2.16b
  %vmmla1.i = tail call <4 x i32> @llvm.aarch64.neon.smmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b)
  ret <4 x i32> %vmmla1.i
}

define <4 x i32> @ummla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) {
entry:
; CHECK-LABEL: ummla.v4i32.v16i8
; CHECK: ummla   v0.4s, v1.16b, v2.16b
  %vmmla1.i = tail call <4 x i32> @llvm.aarch64.neon.ummla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b)
  ret <4 x i32> %vmmla1.i
}

define <4 x i32> @usmmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) {
entry:
; CHECK-LABEL: usmmla.v4i32.v16i8
; CHECK: usmmla   v0.4s, v1.16b, v2.16b
  %vusmmla1.i = tail call <4 x i32> @llvm.aarch64.neon.usmmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) #3
  ret <4 x i32> %vusmmla1.i
}

define <2 x i32> @usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) {
entry:
; CHECK-LABEL: usdot.v2i32.v8i8
; CHECK: usdot   v0.2s, v1.8b, v2.8b
  %vusdot1.i = tail call <2 x i32> @llvm.aarch64.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b)
  ret <2 x i32> %vusdot1.i
}

define <2 x i32> @usdot_lane.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) {
entry:
; CHECK-LABEL: usdot_lane.v2i32.v8i8
; CHECK: usdot   v0.2s, v1.8b, v2.4b[0]
  %0 = bitcast <8 x i8> %b to <2 x i32>
  %shuffle = shufflevector <2 x i32> %0, <2 x i32> undef, <2 x i32> zeroinitializer
  %1 = bitcast <2 x i32> %shuffle to <8 x i8>
  %vusdot1.i = tail call <2 x i32> @llvm.aarch64.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %1)
  ret <2 x i32> %vusdot1.i
}

define <2 x i32> @sudot_lane.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) {
entry:
; CHECK-LABEL: sudot_lane.v2i32.v8i8
; CHECK: sudot   v0.2s, v1.8b, v2.4b[0]
  %0 = bitcast <8 x i8> %b to <2 x i32>
  %shuffle = shufflevector <2 x i32> %0, <2 x i32> undef, <2 x i32> zeroinitializer
  %1 = bitcast <2 x i32> %shuffle to <8 x i8>
  %vusdot1.i = tail call <2 x i32> @llvm.aarch64.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %1, <8 x i8> %a)
  ret <2 x i32> %vusdot1.i
}

define <2 x i32> @usdot_lane.v2i32.v16i8(<2 x i32> %r, <8 x i8> %a, <16 x i8> %b) {
entry:
; CHECK-LABEL: usdot_lane.v2i32.v16i8
; CHECK: usdot   v0.2s, v1.8b, v2.4b[0]
  %0 = bitcast <16 x i8> %b to <4 x i32>
  %shuffle = shufflevector <4 x i32> %0, <4 x i32> undef, <2 x i32> zeroinitializer
  %1 = bitcast <2 x i32> %shuffle to <8 x i8>
  %vusdot1.i = tail call <2 x i32> @llvm.aarch64.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %1)
  ret <2 x i32> %vusdot1.i
}

define <2 x i32> @sudot_lane.v2i32.v16i8(<2 x i32> %r, <8 x i8> %a, <16 x i8> %b) {
entry:
; CHECK-LABEL: sudot_lane.v2i32.v16i8
; CHECK: sudot   v0.2s, v1.8b, v2.4b[0]
  %0 = bitcast <16 x i8> %b to <4 x i32>
  %shuffle = shufflevector <4 x i32> %0, <4 x i32> undef, <2 x i32> zeroinitializer
  %1 = bitcast <2 x i32> %shuffle to <8 x i8>
  %vusdot1.i = tail call <2 x i32> @llvm.aarch64.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %1, <8 x i8> %a) #3
  ret <2 x i32> %vusdot1.i
}

define <4 x i32> @usdot.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) {
entry:
; CHECK-LABEL: usdot.v4i32.v16i8
; CHECK: usdot   v0.4s, v1.16b, v2.16b
  %vusdot1.i = tail call <4 x i32> @llvm.aarch64.neon.usdot.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) #3
  ret <4 x i32> %vusdot1.i
}

define <4 x i32> @usdot_lane.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <8 x i8> %b) {
entry:
; CHECK-LABEL: usdot_lane.v4i32.v16i8
; CHECK: usdot   v0.4s, v1.16b, v2.4b[0]
  %0 = bitcast <8 x i8> %b to <2 x i32>
  %shuffle = shufflevector <2 x i32> %0, <2 x i32> undef, <4 x i32> zeroinitializer
  %1 = bitcast <4 x i32> %shuffle to <16 x i8>
  %vusdot1.i = tail call <4 x i32> @llvm.aarch64.neon.usdot.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %1) #3
  ret <4 x i32> %vusdot1.i
}

define <4 x i32> @sudot_lane.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <8 x i8> %b) {
entry:
; CHECK-LABEL: sudot_lane.v4i32.v16i8
; CHECK: sudot   v0.4s, v1.16b, v2.4b[0]
  %0 = bitcast <8 x i8> %b to <2 x i32>
  %shuffle = shufflevector <2 x i32> %0, <2 x i32> undef, <4 x i32> zeroinitializer
  %1 = bitcast <4 x i32> %shuffle to <16 x i8>
  %vusdot1.i = tail call <4 x i32> @llvm.aarch64.neon.usdot.v4i32.v16i8(<4 x i32> %r, <16 x i8> %1, <16 x i8> %a) #3
  ret <4 x i32> %vusdot1.i
}

define <4 x i32> @usdot_laneq.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) {
entry:
; CHECK-LABEL: usdot_laneq.v4i32.v16i8
; CHECK: usdot   v0.4s, v1.16b, v2.4b[0]
  %0 = bitcast <16 x i8> %b to <4 x i32>
  %shuffle = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> zeroinitializer
  %1 = bitcast <4 x i32> %shuffle to <16 x i8>
  %vusdot1.i = tail call <4 x i32> @llvm.aarch64.neon.usdot.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %1) #3
  ret <4 x i32> %vusdot1.i
}

define <4 x i32> @sudot_laneq.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) {
entry:
; CHECK-LABEL: sudot_laneq.v4i32.v16i8
; CHECK: sudot   v0.4s, v1.16b, v2.4b[0]
  %0 = bitcast <16 x i8> %b to <4 x i32>
  %shuffle = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> zeroinitializer
  %1 = bitcast <4 x i32> %shuffle to <16 x i8>
  %vusdot1.i = tail call <4 x i32> @llvm.aarch64.neon.usdot.v4i32.v16i8(<4 x i32> %r, <16 x i8> %1, <16 x i8> %a) #3
  ret <4 x i32> %vusdot1.i
}

declare <4 x i32> @llvm.aarch64.neon.smmla.v4i32.v16i8(<4 x i32>, <16 x i8>, <16 x i8>) #2
declare <4 x i32> @llvm.aarch64.neon.ummla.v4i32.v16i8(<4 x i32>, <16 x i8>, <16 x i8>) #2
declare <4 x i32> @llvm.aarch64.neon.usmmla.v4i32.v16i8(<4 x i32>, <16 x i8>, <16 x i8>) #2
declare <2 x i32> @llvm.aarch64.neon.usdot.v2i32.v8i8(<2 x i32>, <8 x i8>, <8 x i8>) #2
declare <4 x i32> @llvm.aarch64.neon.usdot.v4i32.v16i8(<4 x i32>, <16 x i8>, <16 x i8>) #2