1//===-- AMDGPUGIsel.td - AMDGPU GlobalISel Patterns---------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This files contains patterns that should only be used by GlobalISel.  For
9// example patterns for V_* instructions that have S_* equivalents.
10// SelectionDAG does not support selecting V_* instructions.
11//===----------------------------------------------------------------------===//
12
13include "AMDGPU.td"
14include "AMDGPUCombine.td"
15
16def sd_vsrc0 : ComplexPattern<i32, 1, "">;
17def gi_vsrc0 :
18    GIComplexOperandMatcher<s32, "selectVSRC0">,
19    GIComplexPatternEquiv<sd_vsrc0>;
20
21def sd_vcsrc : ComplexPattern<i32, 1, "">;
22def gi_vcsrc :
23    GIComplexOperandMatcher<s32, "selectVCSRC">,
24    GIComplexPatternEquiv<sd_vcsrc>;
25
26def gi_vop3mods0 :
27    GIComplexOperandMatcher<s32, "selectVOP3Mods0">,
28    GIComplexPatternEquiv<VOP3Mods0>;
29
30def gi_vop3mods :
31    GIComplexOperandMatcher<s32, "selectVOP3Mods">,
32    GIComplexPatternEquiv<VOP3Mods>;
33
34def gi_vop3modsnoncanonicalizing :
35    GIComplexOperandMatcher<s32, "selectVOP3ModsNonCanonicalizing">,
36    GIComplexPatternEquiv<VOP3ModsNonCanonicalizing>;
37
38def gi_vop3_no_mods :
39    GIComplexOperandMatcher<s32, "selectVOP3NoMods">,
40    GIComplexPatternEquiv<VOP3NoMods>;
41
42def gi_vop3omods :
43    GIComplexOperandMatcher<s32, "selectVOP3OMods">,
44    GIComplexPatternEquiv<VOP3OMods>;
45
46def gi_vop3pmods :
47    GIComplexOperandMatcher<s32, "selectVOP3PMods">,
48    GIComplexPatternEquiv<VOP3PMods>;
49
50def gi_vop3pmodsdot :
51    GIComplexOperandMatcher<s32, "selectVOP3PModsDOT">,
52    GIComplexPatternEquiv<VOP3PModsDOT>;
53
54def gi_dotiuvop3pmods :
55    GIComplexOperandMatcher<s32, "selectDotIUVOP3PMods">,
56    GIComplexPatternEquiv<DotIUVOP3PMods>;
57
58def gi_wmmaopselvop3pmods :
59    GIComplexOperandMatcher<s32, "selectWMMAOpSelVOP3PMods">,
60    GIComplexPatternEquiv<WMMAOpSelVOP3PMods>;
61
62def gi_vop3opselmods :
63    GIComplexOperandMatcher<s32, "selectVOP3OpSelMods">,
64    GIComplexPatternEquiv<VOP3OpSelMods>;
65
66def gi_vinterpmods :
67    GIComplexOperandMatcher<s32, "selectVINTERPMods">,
68    GIComplexPatternEquiv<VINTERPMods>;
69
70def gi_vinterpmods_hi :
71    GIComplexOperandMatcher<s32, "selectVINTERPModsHi">,
72    GIComplexPatternEquiv<VINTERPModsHi>;
73
74// FIXME: Why do we have both VOP3OpSel and VOP3OpSelMods?
75def gi_vop3opsel :
76    GIComplexOperandMatcher<s32, "selectVOP3OpSelMods">,
77    GIComplexPatternEquiv<VOP3OpSel>;
78
79def gi_smrd_imm :
80    GIComplexOperandMatcher<s64, "selectSmrdImm">,
81    GIComplexPatternEquiv<SMRDImm>;
82
83def gi_smrd_imm32 :
84    GIComplexOperandMatcher<s64, "selectSmrdImm32">,
85    GIComplexPatternEquiv<SMRDImm32>;
86
87def gi_smrd_sgpr :
88    GIComplexOperandMatcher<s64, "selectSmrdSgpr">,
89    GIComplexPatternEquiv<SMRDSgpr>;
90
91def gi_smrd_sgpr_imm :
92    GIComplexOperandMatcher<s64, "selectSmrdSgprImm">,
93    GIComplexPatternEquiv<SMRDSgprImm>;
94
95def gi_flat_offset :
96    GIComplexOperandMatcher<s64, "selectFlatOffset">,
97    GIComplexPatternEquiv<FlatOffset>;
98def gi_global_offset :
99    GIComplexOperandMatcher<s64, "selectGlobalOffset">,
100    GIComplexPatternEquiv<GlobalOffset>;
101def gi_global_saddr :
102    GIComplexOperandMatcher<s64, "selectGlobalSAddr">,
103    GIComplexPatternEquiv<GlobalSAddr>;
104
105def gi_mubuf_scratch_offset :
106    GIComplexOperandMatcher<s32, "selectMUBUFScratchOffset">,
107    GIComplexPatternEquiv<MUBUFScratchOffset>;
108def gi_mubuf_scratch_offen :
109    GIComplexOperandMatcher<s32, "selectMUBUFScratchOffen">,
110    GIComplexPatternEquiv<MUBUFScratchOffen>;
111
112def gi_flat_scratch_offset :
113    GIComplexOperandMatcher<s32, "selectScratchOffset">,
114    GIComplexPatternEquiv<ScratchOffset>;
115
116def gi_flat_scratch_saddr :
117    GIComplexOperandMatcher<s32, "selectScratchSAddr">,
118    GIComplexPatternEquiv<ScratchSAddr>;
119
120def gi_flat_scratch_svaddr :
121    GIComplexOperandMatcher<s32, "selectScratchSVAddr">,
122    GIComplexPatternEquiv<ScratchSVAddr>;
123
124def gi_ds_1addr_1offset :
125    GIComplexOperandMatcher<s32, "selectDS1Addr1Offset">,
126    GIComplexPatternEquiv<DS1Addr1Offset>;
127
128def gi_ds_64bit_4byte_aligned :
129    GIComplexOperandMatcher<s64, "selectDS64Bit4ByteAligned">,
130    GIComplexPatternEquiv<DS64Bit4ByteAligned>;
131
132def gi_ds_128bit_8byte_aligned :
133    GIComplexOperandMatcher<s64, "selectDS128Bit8ByteAligned">,
134    GIComplexPatternEquiv<DS128Bit8ByteAligned>;
135
136def gi_mubuf_addr64 :
137    GIComplexOperandMatcher<s64, "selectMUBUFAddr64">,
138    GIComplexPatternEquiv<MUBUFAddr64>;
139
140def gi_mubuf_offset :
141    GIComplexOperandMatcher<s64, "selectMUBUFOffset">,
142    GIComplexPatternEquiv<MUBUFOffset>;
143
144def gi_smrd_buffer_imm :
145    GIComplexOperandMatcher<s64, "selectSMRDBufferImm">,
146    GIComplexPatternEquiv<SMRDBufferImm>;
147
148def gi_smrd_buffer_imm32 :
149    GIComplexOperandMatcher<s64, "selectSMRDBufferImm32">,
150    GIComplexPatternEquiv<SMRDBufferImm32>;
151
152def gi_smrd_buffer_sgpr_imm :
153    GIComplexOperandMatcher<s64, "selectSMRDBufferSgprImm">,
154    GIComplexPatternEquiv<SMRDBufferSgprImm>;
155
156def gi_vop3_mad_mix_mods :
157    GIComplexOperandMatcher<s64, "selectVOP3PMadMixMods">,
158    GIComplexPatternEquiv<VOP3PMadMixMods>;
159
160def gi_vop3_mad_mix_mods_ext :
161    GIComplexOperandMatcher<s64, "selectVOP3PMadMixModsExt">,
162    GIComplexPatternEquiv<VOP3PMadMixModsExt>;
163
164// Separate load nodes are defined to glue m0 initialization in
165// SelectionDAG. The GISel selector can just insert m0 initialization
166// directly before selecting a glue-less load, so hide this
167// distinction.
168
169def : GINodeEquiv<G_LOAD, AMDGPUld_glue> {
170  let CheckMMOIsNonAtomic = 1;
171  let IfSignExtend = G_SEXTLOAD;
172  let IfZeroExtend = G_ZEXTLOAD;
173}
174
175def : GINodeEquiv<G_STORE, AMDGPUst_glue> {
176  let CheckMMOIsNonAtomic = 1;
177}
178
179def : GINodeEquiv<G_LOAD, AMDGPUatomic_ld_glue> {
180  bit CheckMMOIsAtomic = 1;
181}
182
183def : GINodeEquiv<G_STORE, AMDGPUatomic_st_glue> {
184  bit CheckMMOIsAtomic = 1;
185}
186
187
188def : GINodeEquiv<G_ATOMIC_CMPXCHG, atomic_cmp_swap_glue>;
189def : GINodeEquiv<G_ATOMICRMW_XCHG, atomic_swap_glue>;
190def : GINodeEquiv<G_ATOMICRMW_ADD, atomic_load_add_glue>;
191def : GINodeEquiv<G_ATOMICRMW_SUB, atomic_load_sub_glue>;
192def : GINodeEquiv<G_ATOMICRMW_AND, atomic_load_and_glue>;
193def : GINodeEquiv<G_ATOMICRMW_OR, atomic_load_or_glue>;
194def : GINodeEquiv<G_ATOMICRMW_XOR, atomic_load_xor_glue>;
195def : GINodeEquiv<G_ATOMICRMW_MIN, atomic_load_min_glue>;
196def : GINodeEquiv<G_ATOMICRMW_MAX, atomic_load_max_glue>;
197def : GINodeEquiv<G_ATOMICRMW_UMIN, atomic_load_umin_glue>;
198def : GINodeEquiv<G_ATOMICRMW_UMAX, atomic_load_umax_glue>;
199def : GINodeEquiv<G_ATOMICRMW_FADD, atomic_load_fadd_glue>;
200
201def : GINodeEquiv<G_AMDGPU_FFBH_U32, AMDGPUffbh_u32_impl>;
202def : GINodeEquiv<G_AMDGPU_FFBL_B32, AMDGPUffbl_b32_impl>;
203def : GINodeEquiv<G_AMDGPU_FMIN_LEGACY, AMDGPUfmin_legacy>;
204def : GINodeEquiv<G_AMDGPU_FMAX_LEGACY, AMDGPUfmax_legacy>;
205def : GINodeEquiv<G_AMDGPU_RCP_IFLAG, AMDGPUrcp_iflag>;
206
207def : GINodeEquiv<G_AMDGPU_CVT_F32_UBYTE0, AMDGPUcvt_f32_ubyte0>;
208def : GINodeEquiv<G_AMDGPU_CVT_F32_UBYTE1, AMDGPUcvt_f32_ubyte1>;
209def : GINodeEquiv<G_AMDGPU_CVT_F32_UBYTE2, AMDGPUcvt_f32_ubyte2>;
210def : GINodeEquiv<G_AMDGPU_CVT_F32_UBYTE3, AMDGPUcvt_f32_ubyte3>;
211
212def : GINodeEquiv<G_AMDGPU_CVT_PK_I16_I32, AMDGPUpk_i16_i32_impl>;
213def : GINodeEquiv<G_AMDGPU_SMED3, AMDGPUsmed3>;
214def : GINodeEquiv<G_AMDGPU_UMED3, AMDGPUumed3>;
215def : GINodeEquiv<G_AMDGPU_FMED3, AMDGPUfmed3_impl>;
216def : GINodeEquiv<G_AMDGPU_CLAMP, AMDGPUclamp>;
217
218def : GINodeEquiv<G_AMDGPU_ATOMIC_CMPXCHG, AMDGPUatomic_cmp_swap>;
219def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD, SIbuffer_load>;
220def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_USHORT, SIbuffer_load_ushort>;
221def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_UBYTE, SIbuffer_load_ubyte>;
222def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_SSHORT, SIbuffer_load_short>;
223def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_SBYTE, SIbuffer_load_byte>;
224def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_FORMAT, SIbuffer_load_format>;
225def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_FORMAT_TFE, SIbuffer_load_format_tfe>;
226def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_FORMAT_D16, SIbuffer_load_format_d16>;
227def : GINodeEquiv<G_AMDGPU_TBUFFER_LOAD_FORMAT, SItbuffer_load>;
228def : GINodeEquiv<G_AMDGPU_TBUFFER_LOAD_FORMAT_D16, SItbuffer_load_d16>;
229def : GINodeEquiv<G_AMDGPU_BUFFER_STORE, SIbuffer_store>;
230def : GINodeEquiv<G_AMDGPU_BUFFER_STORE_SHORT, SIbuffer_store_short>;
231def : GINodeEquiv<G_AMDGPU_BUFFER_STORE_BYTE, SIbuffer_store_byte>;
232def : GINodeEquiv<G_AMDGPU_BUFFER_STORE_FORMAT, SIbuffer_store_format>;
233def : GINodeEquiv<G_AMDGPU_BUFFER_STORE_FORMAT_D16, SIbuffer_store_format_d16>;
234def : GINodeEquiv<G_AMDGPU_TBUFFER_STORE_FORMAT, SItbuffer_store>;
235def : GINodeEquiv<G_AMDGPU_TBUFFER_STORE_FORMAT_D16, SItbuffer_store_d16>;
236
237// FIXME: Check MMO is atomic
238def : GINodeEquiv<G_ATOMICRMW_UINC_WRAP, atomic_load_uinc_wrap_glue>;
239def : GINodeEquiv<G_ATOMICRMW_UDEC_WRAP, atomic_load_udec_wrap_glue>;
240def : GINodeEquiv<G_AMDGPU_ATOMIC_FMIN, SIatomic_fmin>;
241def : GINodeEquiv<G_AMDGPU_ATOMIC_FMAX, SIatomic_fmax>;
242def : GINodeEquiv<G_AMDGPU_ATOMIC_FMIN, atomic_load_fmin_glue>;
243def : GINodeEquiv<G_AMDGPU_ATOMIC_FMAX, atomic_load_fmax_glue>;
244
245
246def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_SWAP, SIbuffer_atomic_swap>;
247def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_ADD, SIbuffer_atomic_add>;
248def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_SUB, SIbuffer_atomic_sub>;
249def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_SMIN, SIbuffer_atomic_smin>;
250def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_UMIN, SIbuffer_atomic_umin>;
251def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_SMAX, SIbuffer_atomic_smax>;
252def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_UMAX, SIbuffer_atomic_umax>;
253def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_AND, SIbuffer_atomic_and>;
254def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_OR, SIbuffer_atomic_or>;
255def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_XOR, SIbuffer_atomic_xor>;
256def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_INC, SIbuffer_atomic_inc>;
257def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_DEC, SIbuffer_atomic_dec>;
258def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_FADD, SIbuffer_atomic_fadd>;
259def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_FMIN, SIbuffer_atomic_fmin>;
260def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_FMAX, SIbuffer_atomic_fmax>;
261def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_CMPSWAP, SIbuffer_atomic_cmpswap>;
262def : GINodeEquiv<G_AMDGPU_S_BUFFER_LOAD, SIsbuffer_load>;
263
264def : GINodeEquiv<G_FPTRUNC_ROUND_UPWARD, SIfptrunc_round_upward>;
265def : GINodeEquiv<G_FPTRUNC_ROUND_DOWNWARD, SIfptrunc_round_downward>;
266
267class GISelSop2Pat <
268  SDPatternOperator node,
269  Instruction inst,
270  ValueType dst_vt,
271  ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt>   : GCNPat <
272
273  (dst_vt (node (src0_vt SReg_32:$src0), (src1_vt SReg_32:$src1))),
274  (inst src0_vt:$src0, src1_vt:$src1)
275>;
276
277class GISelVop2Pat <
278  SDPatternOperator node,
279  Instruction inst,
280  ValueType dst_vt,
281  ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt>   : GCNPat <
282
283  (dst_vt (node (src0_vt (sd_vsrc0 src0_vt:$src0)), (src1_vt VGPR_32:$src1))),
284  (inst src0_vt:$src0, src1_vt:$src1)
285>;
286
287class GISelVop2CommutePat <
288  SDPatternOperator node,
289  Instruction inst,
290  ValueType dst_vt,
291  ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt>   : GCNPat <
292
293  (dst_vt (node (src1_vt VGPR_32:$src1), (src0_vt (sd_vsrc0 src0_vt:$src0)))),
294  (inst src0_vt:$src0, src1_vt:$src1)
295>;
296
297class GISelVop3Pat2 <
298  SDPatternOperator node,
299  Instruction inst,
300  ValueType dst_vt,
301  ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt>   : GCNPat <
302
303  (dst_vt (node (src0_vt (sd_vcsrc src0_vt:$src0)), (src1_vt (sd_vcsrc src1_vt:$src1)))),
304  (inst src0_vt:$src0, src1_vt:$src1)
305>;
306
307class GISelVop3Pat2CommutePat <
308  SDPatternOperator node,
309  Instruction inst,
310  ValueType dst_vt,
311  ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt>   : GCNPat <
312
313  (dst_vt (node (src0_vt (sd_vcsrc src0_vt:$src0)), (src1_vt (sd_vcsrc src1_vt:$src1)))),
314  (inst src0_vt:$src1, src1_vt:$src0)
315>;
316
317class GISelVop3Pat2ModsPat <
318  SDPatternOperator node,
319  Instruction inst,
320  ValueType dst_vt,
321  ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt> : GCNPat <
322
323  (dst_vt (node (src0_vt (VOP3Mods0 src0_vt:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omods)),
324                (src1_vt (VOP3Mods src1_vt:$src1, i32:$src1_modifiers)))),
325  (inst i32:$src0_modifiers, src0_vt:$src0,
326        i32:$src1_modifiers, src1_vt:$src1, $clamp, $omods)
327>;
328
329multiclass GISelVop2IntrPat <
330  SDPatternOperator node, Instruction inst,
331  ValueType dst_vt, ValueType src_vt = dst_vt> {
332
333  def : GISelVop2Pat <node, inst, dst_vt, src_vt>;
334
335  // FIXME: Intrinsics aren't marked as commutable, so we need to add an explicit
336  // pattern to handle commuting.  This is another reason why legalizing to a
337  // generic machine instruction may be better that matching the intrinsic
338  // directly.
339  def : GISelVop2CommutePat <node, inst, dst_vt, src_vt>;
340}
341
342// Since GlobalISel is more flexible then SelectionDAG, I think we can get
343// away with adding patterns for integer types and not legalizing all
344// loads and stores to vector types.  This should help simplify the load/store
345// legalization.
346foreach Ty = [i64, p0, p1, p4] in {
347  defm : SMRD_Pattern <"S_LOAD_DWORDX2",  Ty>;
348}
349
350def gi_as_i32timm : GICustomOperandRenderer<"renderTruncTImm">,
351  GISDNodeXFormEquiv<as_i32timm>;
352
353def gi_as_i16timm : GICustomOperandRenderer<"renderTruncTImm">,
354  GISDNodeXFormEquiv<as_i16timm>;
355
356def gi_as_i8timm : GICustomOperandRenderer<"renderTruncTImm">,
357  GISDNodeXFormEquiv<as_i8timm>;
358
359def gi_as_i1timm : GICustomOperandRenderer<"renderTruncTImm">,
360  GISDNodeXFormEquiv<as_i1timm>;
361
362def gi_NegateImm : GICustomOperandRenderer<"renderNegateImm">,
363  GISDNodeXFormEquiv<NegateImm>;
364
365def gi_bitcast_fpimm_to_i32 : GICustomOperandRenderer<"renderBitcastImm">,
366  GISDNodeXFormEquiv<bitcast_fpimm_to_i32>;
367
368def gi_IMMPopCount : GICustomOperandRenderer<"renderPopcntImm">,
369  GISDNodeXFormEquiv<IMMPopCount>;
370
371def gi_extract_cpol : GICustomOperandRenderer<"renderExtractCPol">,
372  GISDNodeXFormEquiv<extract_cpol>;
373
374def gi_extract_swz : GICustomOperandRenderer<"renderExtractSWZ">,
375  GISDNodeXFormEquiv<extract_swz>;
376
377def gi_set_glc : GICustomOperandRenderer<"renderSetGLC">,
378  GISDNodeXFormEquiv<set_glc>;
379
380def gi_frameindex_to_targetframeindex : GICustomOperandRenderer<"renderFrameIndex">,
381  GISDNodeXFormEquiv<frameindex_to_targetframeindex>;
382