aboutsummaryrefslogtreecommitdiff
path: root/target/arm/vfp.decode
blob: 1a7c9b533de675d353c82f5b961a8947b53e7d4c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
# AArch32 VFP instruction descriptions (conditional insns)
#
#  Copyright (c) 2019 Linaro, Ltd
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <http://www.gnu.org/licenses/>.

#
# This file is processed by scripts/decodetree.py
#
# Encodings for the conditional VFP instructions are here:
# generally anything matching A32
#  cccc 11.. .... .... .... 101. .... ....
# and T32
#  1110 110. .... .... .... 101. .... ....
#  1110 1110 .... .... .... 101. .... ....
# (but those patterns might also cover some Neon instructions,
# which do not live in this file.)

# VFP registers have an odd encoding with a four-bit field
# and a one-bit field which are assembled in different orders
# depending on whether the register is double or single precision.
# Each individual instruction function must do the checks for
# "double register selected but CPU does not have double support"
# and "double register number has bit 4 set but CPU does not
# support D16-D31" (which should UNDEF).
%vm_dp  5:1 0:4
%vm_sp  0:4 5:1
%vn_dp  7:1 16:4
%vn_sp  16:4 7:1
%vd_dp  22:1 12:4
%vd_sp  12:4 22:1

%vmov_idx_b     21:1 5:2
%vmov_idx_h     21:1 6:1

# VMOV scalar to general-purpose register; note that this does
# include some Neon cases.
VMOV_to_gp   ---- 1110 u:1 1.        1 .... rt:4 1011 ... 1 0000 \
             vn=%vn_dp size=0 index=%vmov_idx_b
VMOV_to_gp   ---- 1110 u:1 0.        1 .... rt:4 1011 ..1 1 0000 \
             vn=%vn_dp size=1 index=%vmov_idx_h
VMOV_to_gp   ---- 1110 0   0 index:1 1 .... rt:4 1011 .00 1 0000 \
             vn=%vn_dp size=2 u=0

VMOV_from_gp ---- 1110 0 1.        0 .... rt:4 1011 ... 1 0000 \
             vn=%vn_dp size=0 index=%vmov_idx_b
VMOV_from_gp ---- 1110 0 0.        0 .... rt:4 1011 ..1 1 0000 \
             vn=%vn_dp size=1 index=%vmov_idx_h
VMOV_from_gp ---- 1110 0 0 index:1 0 .... rt:4 1011 .00 1 0000 \
             vn=%vn_dp size=2

VDUP         ---- 1110 1 b:1 q:1 0 .... rt:4 1011 . 0 e:1 1 0000 \
             vn=%vn_dp

VMSR_VMRS    ---- 1110 111 l:1 reg:4 rt:4 1010 0001 0000
VMOV_single  ---- 1110 000 l:1 .... rt:4 1010 . 001 0000 \
             vn=%vn_sp

VMOV_64_sp   ---- 1100 010 op:1 rt2:4 rt:4 1010 00.1 .... \
             vm=%vm_sp
VMOV_64_dp   ---- 1100 010 op:1 rt2:4 rt:4 1011 00.1 .... \
             vm=%vm_dp

# Note that the half-precision variants of VLDR and VSTR are
# not part of this decodetree at all because they have bits [9:8] == 0b01
VLDR_VSTR_sp ---- 1101 u:1 .0 l:1 rn:4 .... 1010 imm:8 \
             vd=%vd_sp
VLDR_VSTR_dp ---- 1101 u:1 .0 l:1 rn:4 .... 1011 imm:8 \
             vd=%vd_dp

# We split the load/store multiple up into two patterns to avoid
# overlap with other insns in the "Advanced SIMD load/store and 64-bit move"
# grouping:
#   P=0 U=0 W=0 is 64-bit VMOV
#   P=1 W=0 is VLDR/VSTR
#   P=U W=1 is UNDEF
# leaving P=0 U=1 W=x and P=1 U=0 W=1 for load/store multiple.
# These include FSTM/FLDM.
VLDM_VSTM_sp ---- 1100 1 . w:1 l:1 rn:4 .... 1010 imm:8 \
             vd=%vd_sp p=0 u=1
VLDM_VSTM_dp ---- 1100 1 . w:1 l:1 rn:4 .... 1011 imm:8 \
             vd=%vd_dp p=0 u=1

VLDM_VSTM_sp ---- 1101 0.1 l:1 rn:4 .... 1010 imm:8 \
             vd=%vd_sp p=1 u=0 w=1
VLDM_VSTM_dp ---- 1101 0.1 l:1 rn:4 .... 1011 imm:8 \
             vd=%vd_dp p=1 u=0 w=1

# 3-register VFP data-processing; bits [23,21:20,6] identify the operation.
VMLA_sp      ---- 1110 0.00 .... .... 1010 .0.0 .... \
             vm=%vm_sp vn=%vn_sp vd=%vd_sp
VMLA_dp      ---- 1110 0.00 .... .... 1011 .0.0 .... \
             vm=%vm_dp vn=%vn_dp vd=%vd_dp

VMLS_sp      ---- 1110 0.00 .... .... 1010 .1.0 .... \
             vm=%vm_sp vn=%vn_sp vd=%vd_sp
VMLS_dp      ---- 1110 0.00 .... .... 1011 .1.0 .... \
             vm=%vm_dp vn=%vn_dp vd=%vd_dp

VNMLS_sp     ---- 1110 0.01 .... .... 1010 .0.0 .... \
             vm=%vm_sp vn=%vn_sp vd=%vd_sp
VNMLS_dp     ---- 1110 0.01 .... .... 1011 .0.0 .... \
             vm=%vm_dp vn=%vn_dp vd=%vd_dp

VNMLA_sp     ---- 1110 0.01 .... .... 1010 .1.0 .... \
             vm=%vm_sp vn=%vn_sp vd=%vd_sp
VNMLA_dp     ---- 1110 0.01 .... .... 1011 .1.0 .... \
             vm=%vm_dp vn=%vn_dp vd=%vd_dp

VMUL_sp      ---- 1110 0.10 .... .... 1010 .0.0 .... \
             vm=%vm_sp vn=%vn_sp vd=%vd_sp
VMUL_dp      ---- 1110 0.10 .... .... 1011 .0.0 .... \
             vm=%vm_dp vn=%vn_dp vd=%vd_dp

VNMUL_sp     ---- 1110 0.10 .... .... 1010 .1.0 .... \
             vm=%vm_sp vn=%vn_sp vd=%vd_sp
VNMUL_dp     ---- 1110 0.10 .... .... 1011 .1.0 .... \
             vm=%vm_dp vn=%vn_dp vd=%vd_dp

VADD_sp      ---- 1110 0.11 .... .... 1010 .0.0 .... \
             vm=%vm_sp vn=%vn_sp vd=%vd_sp
VADD_dp      ---- 1110 0.11 .... .... 1011 .0.0 .... \
             vm=%vm_dp vn=%vn_dp vd=%vd_dp

VSUB_sp      ---- 1110 0.11 .... .... 1010 .1.0 .... \
             vm=%vm_sp vn=%vn_sp vd=%vd_sp
VSUB_dp      ---- 1110 0.11 .... .... 1011 .1.0 .... \
             vm=%vm_dp vn=%vn_dp vd=%vd_dp

VDIV_sp      ---- 1110 1.00 .... .... 1010 .0.0 .... \
             vm=%vm_sp vn=%vn_sp vd=%vd_sp
VDIV_dp      ---- 1110 1.00 .... .... 1011 .0.0 .... \
             vm=%vm_dp vn=%vn_dp vd=%vd_dp

VFM_sp       ---- 1110 1.01 .... .... 1010 . o2:1 . 0 .... \
             vm=%vm_sp vn=%vn_sp vd=%vd_sp o1=1
VFM_dp       ---- 1110 1.01 .... .... 1011 . o2:1 . 0 .... \
             vm=%vm_dp vn=%vn_dp vd=%vd_dp o1=1
VFM_sp       ---- 1110 1.10 .... .... 1010 . o2:1 . 0 .... \
             vm=%vm_sp vn=%vn_sp vd=%vd_sp o1=2
VFM_dp       ---- 1110 1.10 .... .... 1011 . o2:1 . 0 .... \
             vm=%vm_dp vn=%vn_dp vd=%vd_dp o1=2

VMOV_imm_sp  ---- 1110 1.11 imm4h:4 .... 1010 0000 imm4l:4 \
             vd=%vd_sp
VMOV_imm_dp  ---- 1110 1.11 imm4h:4 .... 1011 0000 imm4l:4 \
             vd=%vd_dp

VMOV_reg_sp  ---- 1110 1.11 0000 .... 1010 01.0 .... \
             vd=%vd_sp vm=%vm_sp
VMOV_reg_dp  ---- 1110 1.11 0000 .... 1011 01.0 .... \
             vd=%vd_dp vm=%vm_dp

VABS_sp      ---- 1110 1.11 0000 .... 1010 11.0 .... \
             vd=%vd_sp vm=%vm_sp
VABS_dp      ---- 1110 1.11 0000 .... 1011 11.0 .... \
             vd=%vd_dp vm=%vm_dp

VNEG_sp      ---- 1110 1.11 0001 .... 1010 01.0 .... \
             vd=%vd_sp vm=%vm_sp
VNEG_dp      ---- 1110 1.11 0001 .... 1011 01.0 .... \
             vd=%vd_dp vm=%vm_dp

VSQRT_sp     ---- 1110 1.11 0001 .... 1010 11.0 .... \
             vd=%vd_sp vm=%vm_sp
VSQRT_dp     ---- 1110 1.11 0001 .... 1011 11.0 .... \
             vd=%vd_dp vm=%vm_dp

VCMP_sp      ---- 1110 1.11 010 z:1 .... 1010 e:1 1.0 .... \
             vd=%vd_sp vm=%vm_sp
VCMP_dp      ---- 1110 1.11 010 z:1 .... 1011 e:1 1.0 .... \
             vd=%vd_dp vm=%vm_dp

# VCVTT and VCVTB from f16: Vd format depends on size bit; Vm is always vm_sp
VCVT_f32_f16 ---- 1110 1.11 0010 .... 1010 t:1 1.0 .... \
             vd=%vd_sp vm=%vm_sp
VCVT_f64_f16 ---- 1110 1.11 0010 .... 1011 t:1 1.0 .... \
             vd=%vd_dp vm=%vm_sp

# VCVTB and VCVTT to f16: Vd format is always vd_sp; Vm format depends on size bit
VCVT_f16_f32 ---- 1110 1.11 0011 .... 1010 t:1 1.0 .... \
             vd=%vd_sp vm=%vm_sp
VCVT_f16_f64 ---- 1110 1.11 0011 .... 1011 t:1 1.0 .... \
             vd=%vd_sp vm=%vm_dp

VRINTR_sp    ---- 1110 1.11 0110 .... 1010 01.0 .... \
             vd=%vd_sp vm=%vm_sp
VRINTR_dp    ---- 1110 1.11 0110 .... 1011 01.0 .... \
             vd=%vd_dp vm=%vm_dp

VRINTZ_sp    ---- 1110 1.11 0110 .... 1010 11.0 .... \
             vd=%vd_sp vm=%vm_sp
VRINTZ_dp    ---- 1110 1.11 0110 .... 1011 11.0 .... \
             vd=%vd_dp vm=%vm_dp

VRINTX_sp    ---- 1110 1.11 0111 .... 1010 01.0 .... \
             vd=%vd_sp vm=%vm_sp
VRINTX_dp    ---- 1110 1.11 0111 .... 1011 01.0 .... \
             vd=%vd_dp vm=%vm_dp

# VCVT between single and double: Vm precision depends on size; Vd is its reverse
VCVT_sp      ---- 1110 1.11 0111 .... 1010 11.0 .... \
             vd=%vd_dp vm=%vm_sp
VCVT_dp      ---- 1110 1.11 0111 .... 1011 11.0 .... \
             vd=%vd_sp vm=%vm_dp

# VCVT from integer to floating point: Vm always single; Vd depends on size
VCVT_int_sp  ---- 1110 1.11 1000 .... 1010 s:1 1.0 .... \
             vd=%vd_sp vm=%vm_sp
VCVT_int_dp  ---- 1110 1.11 1000 .... 1011 s:1 1.0 .... \
             vd=%vd_dp vm=%vm_sp

# VJCVT is always dp to sp
VJCVT        ---- 1110 1.11 1001 .... 1011 11.0 .... \
             vd=%vd_sp vm=%vm_dp