1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
|
//=- AMDGPUCombine.td - Define AMDGPU Combine Rules ----------*- tablegen -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
include "llvm/Target/GlobalISel/Combine.td"
// TODO: This really belongs after legalization after scalarization.
def fmin_fmax_legacy_matchdata : GIDefMatchData<"FMinFMaxLegacyInfo">;
let Predicates = [HasFminFmaxLegacy] in
def fcmp_select_to_fmin_fmax_legacy : GICombineRule<
(defs root:$select, fmin_fmax_legacy_matchdata:$matchinfo),
(match (G_FCMP $cond, $pred, $lhs, $rhs):$fcmp,
(G_SELECT f32:$dst, $cond, $true, $false):$select,
[{ return matchFMinFMaxLegacy(*${select}, *${fcmp}, ${matchinfo}); }]),
(apply [{ applySelectFCmpToFMinFMaxLegacy(*${select}, ${matchinfo}); }])>;
def uchar_to_float : GICombineRule<
(defs root:$itofp),
(match (wip_match_opcode G_UITOFP, G_SITOFP):$itofp,
[{ return matchUCharToFloat(*${itofp}); }]),
(apply [{ applyUCharToFloat(*${itofp}); }])>;
def rcp_sqrt_to_rsq : GICombineRule<
(defs root:$rcp, build_fn_matchinfo:$matchinfo),
(match (wip_match_opcode G_INTRINSIC, G_FSQRT):$rcp,
[{ return matchRcpSqrtToRsq(*${rcp}, ${matchinfo}); }]),
(apply [{ Helper.applyBuildFn(*${rcp}, ${matchinfo}); }])>;
def fdiv_by_sqrt_to_rsq_f16 : GICombineRule<
(defs root:$root),
(match (G_FSQRT f16:$sqrt, $x, (MIFlags FmContract)),
(G_FDIV f16:$dst, $y, $sqrt, (MIFlags FmContract)):$root,
[{ return matchFDivSqrtToRsqF16(*${root}); }]),
(apply [{ applyFDivSqrtToRsqF16(*${root}, ${x}.getReg()); }])>;
def cvt_f32_ubyteN_matchdata : GIDefMatchData<"CvtF32UByteMatchInfo">;
def cvt_f32_ubyteN : GICombineRule<
(defs root:$cvt_f32_ubyteN, cvt_f32_ubyteN_matchdata:$matchinfo),
(match (wip_match_opcode G_AMDGPU_CVT_F32_UBYTE0,
G_AMDGPU_CVT_F32_UBYTE1,
G_AMDGPU_CVT_F32_UBYTE2,
G_AMDGPU_CVT_F32_UBYTE3):$cvt_f32_ubyteN,
[{ return matchCvtF32UByteN(*${cvt_f32_ubyteN}, ${matchinfo}); }]),
(apply [{ applyCvtF32UByteN(*${cvt_f32_ubyteN}, ${matchinfo}); }])>;
def clamp_i64_to_i16_matchdata : GIDefMatchData<"ClampI64ToI16MatchInfo">;
def clamp_i64_to_i16 : GICombineRule<
(defs root:$clamp_i64_to_i16, clamp_i64_to_i16_matchdata:$matchinfo),
(match (wip_match_opcode G_TRUNC):$clamp_i64_to_i16,
[{ return matchClampI64ToI16(*${clamp_i64_to_i16}, MRI, MF, ${matchinfo}); }]),
(apply [{ applyClampI64ToI16(*${clamp_i64_to_i16}, ${matchinfo}); }])>;
def med3_matchdata : GIDefMatchData<"Med3MatchInfo">;
def int_minmax_to_med3 : GICombineRule<
(defs root:$min_or_max, med3_matchdata:$matchinfo),
(match (wip_match_opcode G_SMAX,
G_SMIN,
G_UMAX,
G_UMIN):$min_or_max,
[{ return matchIntMinMaxToMed3(*${min_or_max}, ${matchinfo}); }]),
(apply [{ applyMed3(*${min_or_max}, ${matchinfo}); }])>;
let Predicates = [Predicate<"Subtarget->d16PreservesUnusedBits()">] in
def d16_load : GICombineRule<
(defs root:$bitcast),
(combine (G_BITCAST $dst, $src):$bitcast,
[{ return combineD16Load(*${bitcast} ); }])>;
def fp_minmax_to_med3 : GICombineRule<
(defs root:$min_or_max, med3_matchdata:$matchinfo),
(match (wip_match_opcode G_FMAXNUM,
G_FMINNUM,
G_FMAXNUM_IEEE,
G_FMINNUM_IEEE):$min_or_max,
[{ return matchFPMinMaxToMed3(*${min_or_max}, ${matchinfo}); }]),
(apply [{ applyMed3(*${min_or_max}, ${matchinfo}); }])>;
def fp_minmax_to_clamp : GICombineRule<
(defs root:$min_or_max, register_matchinfo:$matchinfo),
(match (wip_match_opcode G_FMAXNUM,
G_FMINNUM,
G_FMAXNUM_IEEE,
G_FMINNUM_IEEE):$min_or_max,
[{ return matchFPMinMaxToClamp(*${min_or_max}, ${matchinfo}); }]),
(apply [{ applyClamp(*${min_or_max}, ${matchinfo}); }])>;
def fmed3_intrinsic_to_clamp : GICombineRule<
(defs root:$fmed3, register_matchinfo:$matchinfo),
(match (wip_match_opcode G_AMDGPU_FMED3):$fmed3,
[{ return matchFPMed3ToClamp(*${fmed3}, ${matchinfo}); }]),
(apply [{ applyClamp(*${fmed3}, ${matchinfo}); }])>;
def remove_fcanonicalize : GICombineRule<
(defs root:$fcanonicalize, register_matchinfo:$matchinfo),
(match (wip_match_opcode G_FCANONICALIZE):$fcanonicalize,
[{ return matchRemoveFcanonicalize(*${fcanonicalize}, ${matchinfo}); }]),
(apply [{ Helper.replaceSingleDefInstWithReg(*${fcanonicalize}, ${matchinfo}); }])>;
def foldable_fneg_matchdata : GIDefMatchData<"MachineInstr *">;
def foldable_fneg : GICombineRule<
(defs root:$ffn, foldable_fneg_matchdata:$matchinfo),
(match (wip_match_opcode G_FNEG):$ffn,
[{ return Helper.matchFoldableFneg(*${ffn}, ${matchinfo}); }]),
(apply [{ Helper.applyFoldableFneg(*${ffn}, ${matchinfo}); }])>;
// Detects s_mul_u64 instructions whose higher bits are zero/sign extended.
def smulu64 : GICombineRule<
(defs root:$smul, unsigned_matchinfo:$matchinfo),
(match (wip_match_opcode G_MUL):$smul,
[{ return matchCombine_s_mul_u64(*${smul}, ${matchinfo}); }]),
(apply [{ Helper.replaceOpcodeWith(*${smul}, ${matchinfo}); }])>;
def sign_exension_in_reg_matchdata : GIDefMatchData<"std::pair<MachineInstr *, unsigned>">;
def sign_extension_in_reg : GICombineRule<
(defs root:$sign_inreg, sign_exension_in_reg_matchdata:$matchinfo),
(match (wip_match_opcode G_SEXT_INREG):$sign_inreg,
[{ return matchCombineSignExtendInReg(*${sign_inreg}, ${matchinfo}); }]),
(apply [{ applyCombineSignExtendInReg(*${sign_inreg}, ${matchinfo}); }])>;
// Do the following combines :
// fmul x, select(y, A, B) -> fldexp (x, select i32 (y, a, b))
// fmul x, select(y, -A, -B) -> fldexp ((fneg x), select i32 (y, a, b))
def combine_fmul_with_select_to_fldexp : GICombineRule<
(defs root:$root, build_fn_matchinfo:$matchinfo),
(match (G_FMUL $dst, $x, $select):$root,
(G_SELECT $select, $y, $A, $B):$sel,
[{ return Helper.matchCombineFmulWithSelectToFldexp(*${root}, *${sel}, ${matchinfo}); }]),
(apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
// (shift x, (zext amt)) -> (shift x, (and (anyext amt), mask)
//
// The pattern is longer, but is better for matching during ISel.
class canonicalize_zext_shift_amt<Instruction opc> : GICombineRule<
(defs root:$dst),
(match (G_ZEXT $amt, $amtsrc):$zext,
(opc $dst, $src, $amt):$shift),
(apply [{ applyCanonicalizeZextShiftAmt(*${shift}, *${zext}); }])>;
def canonicalize_zext_lshr : canonicalize_zext_shift_amt<G_LSHR>;
def canonicalize_zext_ashr : canonicalize_zext_shift_amt<G_ASHR>;
def canonicalize_zext_shl : canonicalize_zext_shift_amt<G_SHL>;
def zext_of_shift_amount_combines : GICombineGroup<[
canonicalize_zext_lshr, canonicalize_zext_ashr, canonicalize_zext_shl
]>;
// (and/or i64:x, i64:y) -> i64:(merge (and/or lo_32(x), lo_32(y)), (and/or hi_32(x), hi_32(y)))
// when either x or y is all ones in low or high parts
class combine_binop_s64_with_s32_mask<Instruction opcode> : GICombineRule<
(defs root:$dst),
(match (opcode $dst, i64:$x, i64:$y):$dst,
[{ return Helper.matchConstantIs32BitMask(${x}.getReg()) ||
Helper.matchConstantIs32BitMask(${y}.getReg()); }]),
(apply (G_UNMERGE_VALUES i32:$x_lo, i32:$x_hi, $x),
(G_UNMERGE_VALUES i32:$y_lo, i32:$y_hi, $y),
(opcode i32:$lo, $x_lo, $y_lo),
(opcode i32:$hi, $x_hi, $y_hi),
(G_MERGE_VALUES $dst, $lo, $hi))>;
def combine_or_s64_with_s32_mask : combine_binop_s64_with_s32_mask<G_OR>;
def combine_and_s64_with_s32_mask : combine_binop_s64_with_s32_mask<G_AND>;
def binop_s64_with_s32_mask_combines : GICombineGroup<[
combine_or_s64_with_s32_mask, combine_and_s64_with_s32_mask
]>;
let Predicates = [Has16BitInsts, NotHasMed3_16] in {
// For gfx8, expand f16-fmed3-as-f32 into a min/max f16 sequence. This
// saves one instruction compared to the promotion.
//
// FIXME: Should have ComplexPattern like in/out matchers
//
// FIXME: We should be able to match either G_AMDGPU_FMED3 or
// G_INTRINSIC @llvm.amdgcn.fmed3. Currently the legalizer will
// replace the intrinsic with G_AMDGPU_FMED3 since we can't write a
// pattern to match it.
def expand_promoted_fmed3 : GICombineRule<
(defs root:$fptrunc_dst),
(match (G_FPTRUNC $fptrunc_dst, $fmed3_dst):$fptrunc,
(G_AMDGPU_FMED3 $fmed3_dst, $src0, $src1, $src2),
[{ return Helper.matchExpandPromotedF16FMed3(*${fptrunc}, ${src0}.getReg(), ${src1}.getReg(), ${src2}.getReg()); }]),
(apply [{ Helper.applyExpandPromotedF16FMed3(*${fptrunc}, ${src0}.getReg(), ${src1}.getReg(), ${src2}.getReg()); }])
>;
} // End Predicates = [NotHasMed3_16]
// Combines which should only apply on SI/CI
def gfx6gfx7_combines : GICombineGroup<[fcmp_select_to_fmin_fmax_legacy]>;
// Combines which should only apply on VI
def gfx8_combines : GICombineGroup<[expand_promoted_fmed3]>;
def AMDGPUPreLegalizerCombiner: GICombiner<
"AMDGPUPreLegalizerCombinerImpl",
[all_combines, combine_fmul_with_select_to_fldexp, clamp_i64_to_i16,
foldable_fneg, combine_shuffle_vector_to_build_vector,
binop_s64_with_s32_mask_combines]> {
let CombineAllMethodName = "tryCombineAllImpl";
}
def AMDGPUPostLegalizerCombiner: GICombiner<
"AMDGPUPostLegalizerCombinerImpl",
[all_combines, gfx6gfx7_combines, gfx8_combines, combine_fmul_with_select_to_fldexp,
uchar_to_float, cvt_f32_ubyteN, remove_fcanonicalize, foldable_fneg,
rcp_sqrt_to_rsq, fdiv_by_sqrt_to_rsq_f16, sign_extension_in_reg, smulu64,
binop_s64_with_s32_mask_combines]> {
let CombineAllMethodName = "tryCombineAllImpl";
}
def AMDGPURegBankCombiner : GICombiner<
"AMDGPURegBankCombinerImpl",
[unmerge_merge, unmerge_cst, unmerge_undef,
zext_trunc_fold, int_minmax_to_med3, ptr_add_immed_chain,
fp_minmax_to_clamp, fp_minmax_to_med3, fmed3_intrinsic_to_clamp,
identity_combines, redundant_and, constant_fold_cast_op,
cast_of_cast_combines, sext_trunc, zext_of_shift_amount_combines,
d16_load]> {
}
|