1//===-- AMDGPUInstructions.td - Common instruction defs ---*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains instruction defs that are common to all hw codegen
10// targets.
11//
12//===----------------------------------------------------------------------===//
13
14class AddressSpacesImpl {
15  int Flat = 0;
16  int Global = 1;
17  int Region = 2;
18  int Local = 3;
19  int Constant = 4;
20  int Private = 5;
21}
22
23def AddrSpaces : AddressSpacesImpl;
24
25
26class AMDGPUInst <dag outs, dag ins, string asm = "",
27  list<dag> pattern = []> : Instruction {
28  field bit isRegisterLoad = 0;
29  field bit isRegisterStore = 0;
30
31  let Namespace = "AMDGPU";
32  let OutOperandList = outs;
33  let InOperandList = ins;
34  let AsmString = asm;
35  let Pattern = pattern;
36  let Itinerary = NullALU;
37
38  // SoftFail is a field the disassembler can use to provide a way for
39  // instructions to not match without killing the whole decode process. It is
40  // mainly used for ARM, but Tablegen expects this field to exist or it fails
41  // to build the decode table.
42  field bits<64> SoftFail = 0;
43
44  let DecoderNamespace = Namespace;
45
46  let TSFlags{63} = isRegisterLoad;
47  let TSFlags{62} = isRegisterStore;
48}
49
50class AMDGPUShaderInst <dag outs, dag ins, string asm = "",
51  list<dag> pattern = []> : AMDGPUInst<outs, ins, asm, pattern> {
52
53  field bits<32> Inst = 0xffffffff;
54}
55
56//===---------------------------------------------------------------------===//
57// Return instruction
58//===---------------------------------------------------------------------===//
59
60class ILFormat<dag outs, dag ins, string asmstr, list<dag> pattern>
61: Instruction {
62
63     let Namespace = "AMDGPU";
64     dag OutOperandList = outs;
65     dag InOperandList = ins;
66     let Pattern = pattern;
67     let AsmString = !strconcat(asmstr, "\n");
68     let isPseudo = 1;
69     let Itinerary = NullALU;
70     bit hasIEEEFlag = 0;
71     bit hasZeroOpFlag = 0;
72     let mayLoad = 0;
73     let mayStore = 0;
74     let hasSideEffects = 0;
75     let isCodeGenOnly = 1;
76}
77
78def TruePredicate : Predicate<"">;
79
80// FIXME: Tablegen should specially supports this
81def FalsePredicate : Predicate<"false">;
82
83// Add a predicate to the list if does not already exist to deduplicate it.
84class PredConcat<list<Predicate> lst, Predicate pred> {
85  list<Predicate> ret =
86      !listconcat([pred], !filter(item, lst, !ne(item, pred)));
87}
88
89class PredicateControl {
90  Predicate SubtargetPredicate = TruePredicate;
91  Predicate AssemblerPredicate = TruePredicate;
92  Predicate WaveSizePredicate = TruePredicate;
93  list<Predicate> OtherPredicates = [];
94  list<Predicate> Predicates = PredConcat<
95                                 PredConcat<PredConcat<OtherPredicates,
96                                                       SubtargetPredicate>.ret,
97                                            AssemblerPredicate>.ret,
98                                 WaveSizePredicate>.ret;
99}
100
101class AMDGPUPat<dag pattern, dag result> : Pat<pattern, result>,
102      PredicateControl;
103
104let RecomputePerFunction = 1 in {
105def FP16Denormals : Predicate<"MF->getInfo<SIMachineFunctionInfo>()->getMode().allFP64FP16Denormals()">;
106def FP32Denormals : Predicate<"MF->getInfo<SIMachineFunctionInfo>()->getMode().allFP32Denormals()">;
107def FP64Denormals : Predicate<"MF->getInfo<SIMachineFunctionInfo>()->getMode().allFP64FP16Denormals()">;
108def NoFP16Denormals : Predicate<"!MF->getInfo<SIMachineFunctionInfo>()->getMode().allFP64FP16Denormals()">;
109def NoFP32Denormals : Predicate<"!MF->getInfo<SIMachineFunctionInfo>()->getMode().allFP32Denormals()">;
110def NoFP64Denormals : Predicate<"!MF->getInfo<SIMachineFunctionInfo>()->getMode().allFP64FP16Denormals()">;
111def UnsafeFPMath : Predicate<"TM.Options.UnsafeFPMath">;
112}
113
114def FMA : Predicate<"Subtarget->hasFMA()">;
115
116def InstFlag : OperandWithDefaultOps <i32, (ops (i32 0))>;
117
118def u16ImmTarget : AsmOperandClass {
119  let Name = "U16Imm";
120  let RenderMethod = "addImmOperands";
121}
122
123def s16ImmTarget : AsmOperandClass {
124  let Name = "S16Imm";
125  let RenderMethod = "addImmOperands";
126}
127
128let OperandType = "OPERAND_IMMEDIATE" in {
129
130def u32imm : Operand<i32> {
131  let PrintMethod = "printU32ImmOperand";
132}
133
134def u16imm : Operand<i16> {
135  let PrintMethod = "printU16ImmOperand";
136  let ParserMatchClass = u16ImmTarget;
137}
138
139def s16imm : Operand<i16> {
140  let PrintMethod = "printU16ImmOperand";
141  let ParserMatchClass = s16ImmTarget;
142}
143
144def u8imm : Operand<i8> {
145  let PrintMethod = "printU8ImmOperand";
146}
147
148} // End OperandType = "OPERAND_IMMEDIATE"
149
150//===--------------------------------------------------------------------===//
151// Custom Operands
152//===--------------------------------------------------------------------===//
153def brtarget   : Operand<OtherVT>;
154
155//===----------------------------------------------------------------------===//
156// Misc. PatFrags
157//===----------------------------------------------------------------------===//
158
159class HasOneUseUnaryOp<SDPatternOperator op> : PatFrag<
160  (ops node:$src0),
161  (op $src0),
162  [{ return N->hasOneUse(); }]> {
163
164  let GISelPredicateCode = [{
165    return MRI.hasOneNonDBGUse(MI.getOperand(0).getReg());
166  }];
167}
168
169class HasOneUseBinOp<SDPatternOperator op> : PatFrag<
170  (ops node:$src0, node:$src1),
171  (op $src0, $src1),
172  [{ return N->hasOneUse(); }]> {
173  let GISelPredicateCode = [{
174    return MRI.hasOneNonDBGUse(MI.getOperand(0).getReg());
175  }];
176}
177
178class HasOneUseTernaryOp<SDPatternOperator op> : PatFrag<
179  (ops node:$src0, node:$src1, node:$src2),
180  (op $src0, $src1, $src2),
181  [{ return N->hasOneUse(); }]> {
182  let GISelPredicateCode = [{
183    return MRI.hasOneNonDBGUse(MI.getOperand(0).getReg());
184  }];
185}
186
187class is_canonicalized<SDPatternOperator op> : PatFrag<
188  (ops node:$src0, node:$src1),
189  (op $src0, $src1),
190  [{
191    const SITargetLowering &Lowering =
192              *static_cast<const SITargetLowering *>(getTargetLowering());
193
194    return Lowering.isCanonicalized(*CurDAG, N->getOperand(0)) &&
195      Lowering.isCanonicalized(*CurDAG, N->getOperand(1));
196   }]> {
197
198  // TODO: Improve the Legalizer for g_build_vector in Global Isel to match this class
199  let GISelPredicateCode = [{
200    const SITargetLowering *TLI = static_cast<const SITargetLowering *>(
201      MF.getSubtarget().getTargetLowering());
202
203    return TLI->isCanonicalized(MI.getOperand(1).getReg(), const_cast<MachineFunction&>(MF)) &&
204      TLI->isCanonicalized(MI.getOperand(2).getReg(), const_cast<MachineFunction&>(MF));
205  }];
206}
207
208
209let Properties = [SDNPCommutative, SDNPAssociative] in {
210def smax_oneuse : HasOneUseBinOp<smax>;
211def smin_oneuse : HasOneUseBinOp<smin>;
212def umax_oneuse : HasOneUseBinOp<umax>;
213def umin_oneuse : HasOneUseBinOp<umin>;
214
215def fminnum_oneuse : HasOneUseBinOp<fminnum>;
216def fmaxnum_oneuse : HasOneUseBinOp<fmaxnum>;
217
218def fminnum_ieee_oneuse : HasOneUseBinOp<fminnum_ieee>;
219def fmaxnum_ieee_oneuse : HasOneUseBinOp<fmaxnum_ieee>;
220
221
222def and_oneuse : HasOneUseBinOp<and>;
223def or_oneuse : HasOneUseBinOp<or>;
224def xor_oneuse : HasOneUseBinOp<xor>;
225} // Properties = [SDNPCommutative, SDNPAssociative]
226
227def not_oneuse : HasOneUseUnaryOp<not>;
228
229def add_oneuse : HasOneUseBinOp<add>;
230def sub_oneuse : HasOneUseBinOp<sub>;
231
232def srl_oneuse : HasOneUseBinOp<srl>;
233def shl_oneuse : HasOneUseBinOp<shl>;
234
235def select_oneuse : HasOneUseTernaryOp<select>;
236
237def AMDGPUmul_u24_oneuse : HasOneUseBinOp<AMDGPUmul_u24>;
238def AMDGPUmul_i24_oneuse : HasOneUseBinOp<AMDGPUmul_i24>;
239
240def srl_16 : PatFrag<
241  (ops node:$src0), (srl_oneuse node:$src0, (i32 16))
242>;
243
244
245def hi_i16_elt : PatFrag<
246  (ops node:$src0), (i16 (trunc (i32 (srl_16 node:$src0))))
247>;
248
249
250def hi_f16_elt : PatLeaf<
251  (vt), [{
252  if (N->getOpcode() != ISD::BITCAST)
253    return false;
254  SDValue Tmp = N->getOperand(0);
255
256  if (Tmp.getOpcode() != ISD::SRL)
257    return false;
258    if (const auto *RHS = dyn_cast<ConstantSDNode>(Tmp.getOperand(1))
259      return RHS->getZExtValue() == 16;
260    return false;
261}]>;
262
263//===----------------------------------------------------------------------===//
264// PatLeafs for floating-point comparisons
265//===----------------------------------------------------------------------===//
266
267def COND_OEQ : PatFrags<(ops), [(OtherVT SETOEQ), (OtherVT SETEQ)]>;
268def COND_ONE : PatFrags<(ops), [(OtherVT SETONE), (OtherVT SETNE)]>;
269def COND_OGT : PatFrags<(ops), [(OtherVT SETOGT), (OtherVT SETGT)]>;
270def COND_OGE : PatFrags<(ops), [(OtherVT SETOGE), (OtherVT SETGE)]>;
271def COND_OLT : PatFrags<(ops), [(OtherVT SETOLT), (OtherVT SETLT)]>;
272def COND_OLE : PatFrags<(ops), [(OtherVT SETOLE), (OtherVT SETLE)]>;
273def COND_O   : PatFrags<(ops), [(OtherVT SETO)]>;
274def COND_UO  : PatFrags<(ops), [(OtherVT SETUO)]>;
275
276//===----------------------------------------------------------------------===//
277// PatLeafs for unsigned / unordered comparisons
278//===----------------------------------------------------------------------===//
279
280def COND_UEQ : PatFrag<(ops), (OtherVT SETUEQ)>;
281def COND_UNE : PatFrag<(ops), (OtherVT SETUNE)>;
282def COND_UGT : PatFrag<(ops), (OtherVT SETUGT)>;
283def COND_UGE : PatFrag<(ops), (OtherVT SETUGE)>;
284def COND_ULT : PatFrag<(ops), (OtherVT SETULT)>;
285def COND_ULE : PatFrag<(ops), (OtherVT SETULE)>;
286
287// XXX - For some reason R600 version is preferring to use unordered
288// for setne?
289def COND_UNE_NE  : PatFrags<(ops), [(OtherVT SETUNE), (OtherVT SETNE)]>;
290
291//===----------------------------------------------------------------------===//
292// PatLeafs for signed comparisons
293//===----------------------------------------------------------------------===//
294
295def COND_SGT : PatFrag<(ops), (OtherVT SETGT)>;
296def COND_SGE : PatFrag<(ops), (OtherVT SETGE)>;
297def COND_SLT : PatFrag<(ops), (OtherVT SETLT)>;
298def COND_SLE : PatFrag<(ops), (OtherVT SETLE)>;
299
300//===----------------------------------------------------------------------===//
301// PatLeafs for integer equality
302//===----------------------------------------------------------------------===//
303
304def COND_EQ : PatFrags<(ops), [(OtherVT SETEQ), (OtherVT SETUEQ)]>;
305def COND_NE : PatFrags<(ops), [(OtherVT SETNE), (OtherVT SETUNE)]>;
306
307// FIXME: Should not need code predicate
308//def COND_NULL : PatLeaf<(OtherVT null_frag)>;
309def COND_NULL : PatLeaf <
310  (cond),
311  [{(void)N; return false;}]
312>;
313
314//===----------------------------------------------------------------------===//
315// PatLeafs for Texture Constants
316//===----------------------------------------------------------------------===//
317
318def TEX_ARRAY : PatLeaf<
319  (imm),
320  [{uint32_t TType = (uint32_t)N->getZExtValue();
321    return TType == 9 || TType == 10 || TType == 16;
322  }]
323>;
324
325def TEX_RECT : PatLeaf<
326  (imm),
327  [{uint32_t TType = (uint32_t)N->getZExtValue();
328    return TType == 5;
329  }]
330>;
331
332def TEX_SHADOW : PatLeaf<
333  (imm),
334  [{uint32_t TType = (uint32_t)N->getZExtValue();
335    return (TType >= 6 && TType <= 8) || TType == 13;
336  }]
337>;
338
339def TEX_SHADOW_ARRAY : PatLeaf<
340  (imm),
341  [{uint32_t TType = (uint32_t)N->getZExtValue();
342    return TType == 11 || TType == 12 || TType == 17;
343  }]
344>;
345
346//===----------------------------------------------------------------------===//
347// Load/Store Pattern Fragments
348//===----------------------------------------------------------------------===//
349
350def atomic_cmp_swap_glue : SDNode <"ISD::ATOMIC_CMP_SWAP", SDTAtomic3,
351  [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand, SDNPInGlue]
352>;
353
354class AddressSpaceList<list<int> AS> {
355  list<int> AddrSpaces = AS;
356}
357
358class Aligned<int Bytes> {
359  int MinAlignment = Bytes;
360}
361
362class StoreHi16<SDPatternOperator op> : PatFrag <
363  (ops node:$value, node:$ptr), (op (srl node:$value, (i32 16)), node:$ptr)> {
364  let IsStore = 1;
365}
366
367def LoadAddress_constant : AddressSpaceList<[  AddrSpaces.Constant ]>;
368def LoadAddress_global : AddressSpaceList<[  AddrSpaces.Global, AddrSpaces.Constant ]>;
369def StoreAddress_global : AddressSpaceList<[ AddrSpaces.Global ]>;
370
371def LoadAddress_flat : AddressSpaceList<[  AddrSpaces.Flat,
372                                           AddrSpaces.Global,
373                                           AddrSpaces.Constant ]>;
374def StoreAddress_flat : AddressSpaceList<[ AddrSpaces.Flat, AddrSpaces.Global ]>;
375
376def LoadAddress_private : AddressSpaceList<[ AddrSpaces.Private ]>;
377def StoreAddress_private : AddressSpaceList<[ AddrSpaces.Private ]>;
378
379def LoadAddress_local : AddressSpaceList<[ AddrSpaces.Local ]>;
380def StoreAddress_local : AddressSpaceList<[ AddrSpaces.Local ]>;
381
382def LoadAddress_region : AddressSpaceList<[ AddrSpaces.Region ]>;
383def StoreAddress_region : AddressSpaceList<[ AddrSpaces.Region ]>;
384
385
386
387foreach as = [ "global", "flat", "constant", "local", "private", "region" ] in {
388let AddressSpaces = !cast<AddressSpaceList>("LoadAddress_"#as).AddrSpaces in {
389
390def load_#as : PatFrag<(ops node:$ptr), (unindexedload node:$ptr)> {
391  let IsLoad = 1;
392  let IsNonExtLoad = 1;
393}
394
395def extloadi8_#as  : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
396  let IsLoad = 1;
397  let MemoryVT = i8;
398}
399
400def extloadi16_#as : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
401  let IsLoad = 1;
402  let MemoryVT = i16;
403}
404
405def sextloadi8_#as  : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
406  let IsLoad = 1;
407  let MemoryVT = i8;
408}
409
410def sextloadi16_#as : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
411  let IsLoad = 1;
412  let MemoryVT = i16;
413}
414
415def zextloadi8_#as  : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
416  let IsLoad = 1;
417  let MemoryVT = i8;
418}
419
420def zextloadi16_#as : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
421  let IsLoad = 1;
422  let MemoryVT = i16;
423}
424
425def atomic_load_32_#as : PatFrag<(ops node:$ptr), (atomic_load_32 node:$ptr)> {
426  let IsAtomic = 1;
427  let MemoryVT = i32;
428}
429
430def atomic_load_64_#as : PatFrag<(ops node:$ptr), (atomic_load_64 node:$ptr)> {
431  let IsAtomic = 1;
432  let MemoryVT = i64;
433}
434} // End let AddressSpaces
435} // End foreach as
436
437
438foreach as = [ "global", "flat", "local", "private", "region" ] in {
439let AddressSpaces = !cast<AddressSpaceList>("StoreAddress_"#as).AddrSpaces in {
440def store_#as : PatFrag<(ops node:$val, node:$ptr),
441                    (unindexedstore node:$val, node:$ptr)> {
442  let IsStore = 1;
443  let IsTruncStore = 0;
444}
445
446// truncstore fragments.
447def truncstore_#as : PatFrag<(ops node:$val, node:$ptr),
448                             (unindexedstore node:$val, node:$ptr)> {
449  let IsStore = 1;
450  let IsTruncStore = 1;
451}
452
453// TODO: We don't really need the truncstore here. We can use
454// unindexedstore with MemoryVT directly, which will save an
455// unnecessary check that the memory size is less than the value type
456// in the generated matcher table.
457def truncstorei8_#as : PatFrag<(ops node:$val, node:$ptr),
458                               (truncstore node:$val, node:$ptr)> {
459  let IsStore = 1;
460  let MemoryVT = i8;
461}
462
463def truncstorei16_#as : PatFrag<(ops node:$val, node:$ptr),
464                                (truncstore node:$val, node:$ptr)> {
465  let IsStore = 1;
466  let MemoryVT = i16;
467}
468
469def store_hi16_#as : StoreHi16 <truncstorei16>;
470def truncstorei8_hi16_#as : StoreHi16<truncstorei8>;
471def truncstorei16_hi16_#as : StoreHi16<truncstorei16>;
472
473defm atomic_store_#as : binary_atomic_op<atomic_store>;
474
475} // End let AddressSpaces
476} // End foreach as
477
478
479multiclass ret_noret_binary_atomic_op<SDNode atomic_op, bit IsInt = 1> {
480  foreach as = [ "global", "flat", "constant", "local", "private", "region" ] in {
481    let AddressSpaces = !cast<AddressSpaceList>("LoadAddress_"#as).AddrSpaces in {
482      defm "_"#as : binary_atomic_op<atomic_op, IsInt>;
483
484      let PredicateCode = [{return (SDValue(N, 0).use_empty());}] in {
485        defm "_"#as#"_noret" : binary_atomic_op<atomic_op, IsInt>;
486      }
487
488      let PredicateCode = [{return !(SDValue(N, 0).use_empty());}] in {
489        defm "_"#as#"_ret" : binary_atomic_op<atomic_op, IsInt>;
490      }
491    }
492  }
493}
494
495defm atomic_swap : ret_noret_binary_atomic_op<atomic_swap>;
496defm atomic_load_add : ret_noret_binary_atomic_op<atomic_load_add>;
497defm atomic_load_and : ret_noret_binary_atomic_op<atomic_load_and>;
498defm atomic_load_max : ret_noret_binary_atomic_op<atomic_load_max>;
499defm atomic_load_min : ret_noret_binary_atomic_op<atomic_load_min>;
500defm atomic_load_or : ret_noret_binary_atomic_op<atomic_load_or>;
501defm atomic_load_sub : ret_noret_binary_atomic_op<atomic_load_sub>;
502defm atomic_load_umax : ret_noret_binary_atomic_op<atomic_load_umax>;
503defm atomic_load_umin : ret_noret_binary_atomic_op<atomic_load_umin>;
504defm atomic_load_xor : ret_noret_binary_atomic_op<atomic_load_xor>;
505defm atomic_load_fadd : ret_noret_binary_atomic_op<atomic_load_fadd, 0>;
506let MemoryVT = v2f16 in
507defm atomic_load_fadd_v2f16 : ret_noret_binary_atomic_op<atomic_load_fadd, 0>;
508defm AMDGPUatomic_cmp_swap : ret_noret_binary_atomic_op<AMDGPUatomic_cmp_swap>;
509
510def load_align8_local : PatFrag<(ops node:$ptr), (load_local node:$ptr)>,
511                        Aligned<8> {
512  let IsLoad = 1;
513  let IsNonExtLoad = 1;
514}
515
516def load_align16_local : PatFrag<(ops node:$ptr), (load_local node:$ptr)>,
517                        Aligned<16> {
518  let IsLoad = 1;
519  let IsNonExtLoad = 1;
520}
521
522def store_align8_local: PatFrag<(ops node:$val, node:$ptr),
523                                (store_local node:$val, node:$ptr)>, Aligned<8> {
524  let IsStore = 1;
525  let IsTruncStore = 0;
526}
527
528def store_align16_local: PatFrag<(ops node:$val, node:$ptr),
529                                (store_local node:$val, node:$ptr)>, Aligned<16> {
530  let IsStore = 1;
531  let IsTruncStore = 0;
532}
533
534let AddressSpaces = StoreAddress_local.AddrSpaces in {
535defm atomic_cmp_swap_local : ternary_atomic_op<atomic_cmp_swap>;
536defm atomic_cmp_swap_local_m0 : ternary_atomic_op<atomic_cmp_swap_glue>;
537}
538
539let AddressSpaces = StoreAddress_region.AddrSpaces in {
540defm atomic_cmp_swap_region : ternary_atomic_op<atomic_cmp_swap>;
541defm atomic_cmp_swap_region_m0 : ternary_atomic_op<atomic_cmp_swap_glue>;
542}
543
544//===----------------------------------------------------------------------===//
545// Misc Pattern Fragments
546//===----------------------------------------------------------------------===//
547
548class Constants {
549int TWO_PI = 0x40c90fdb;
550int PI = 0x40490fdb;
551int TWO_PI_INV = 0x3e22f983;
552int FP_4294966784 = 0x4f7ffffe; // 4294966784 = 4294967296 - 512 = 2^32 - 2^9
553int FP16_ONE = 0x3C00;
554int FP16_NEG_ONE = 0xBC00;
555int FP32_ONE = 0x3f800000;
556int FP32_NEG_ONE = 0xbf800000;
557int FP64_ONE = 0x3ff0000000000000;
558int FP64_NEG_ONE = 0xbff0000000000000;
559}
560def CONST : Constants;
561
562def FP_ZERO : PatLeaf <
563  (fpimm),
564  [{return N->getValueAPF().isZero();}]
565>;
566
567def FP_ONE : PatLeaf <
568  (fpimm),
569  [{return N->isExactlyValue(1.0);}]
570>;
571
572def FP_HALF : PatLeaf <
573  (fpimm),
574  [{return N->isExactlyValue(0.5);}]
575>;
576
577/* Generic helper patterns for intrinsics */
578/* -------------------------------------- */
579
580class POW_Common <AMDGPUInst log_ieee, AMDGPUInst exp_ieee, AMDGPUInst mul>
581  : AMDGPUPat <
582  (fpow f32:$src0, f32:$src1),
583  (exp_ieee (mul f32:$src1, (log_ieee f32:$src0)))
584>;
585
586/* Other helper patterns */
587/* --------------------- */
588
589/* Extract element pattern */
590class Extract_Element <ValueType sub_type, ValueType vec_type, int sub_idx,
591                       SubRegIndex sub_reg>
592  : AMDGPUPat<
593  (sub_type (extractelt vec_type:$src, sub_idx)),
594  (EXTRACT_SUBREG $src, sub_reg)
595>;
596
597/* Insert element pattern */
598class Insert_Element <ValueType elem_type, ValueType vec_type,
599                      int sub_idx, SubRegIndex sub_reg>
600  : AMDGPUPat <
601  (insertelt vec_type:$vec, elem_type:$elem, sub_idx),
602  (INSERT_SUBREG $vec, $elem, sub_reg)
603>;
604
605// XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
606// can handle COPY instructions.
607// bitconvert pattern
608class BitConvert <ValueType dt, ValueType st, RegisterClass rc> : AMDGPUPat <
609  (dt (bitconvert (st rc:$src0))),
610  (dt rc:$src0)
611>;
612
613// XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
614// can handle COPY instructions.
615class DwordAddrPat<ValueType vt, RegisterClass rc> : AMDGPUPat <
616  (vt (AMDGPUdwordaddr (vt rc:$addr))),
617  (vt rc:$addr)
618>;
619
620// rotr pattern
621class ROTRPattern <Instruction BIT_ALIGN> : AMDGPUPat <
622  (rotr i32:$src0, i32:$src1),
623  (BIT_ALIGN $src0, $src0, $src1)
624>;
625
626// Special conversion patterns
627
628def cvt_rpi_i32_f32 : PatFrag <
629  (ops node:$src),
630  (fp_to_sint (ffloor (fadd $src, FP_HALF))),
631  [{ (void) N; return TM.Options.NoNaNsFPMath; }]
632>;
633
634def cvt_flr_i32_f32 : PatFrag <
635  (ops node:$src),
636  (fp_to_sint (ffloor $src)),
637  [{ (void)N; return TM.Options.NoNaNsFPMath; }]
638>;
639
640let AddedComplexity = 2 in {
641class IMad24Pat<Instruction Inst, bit HasClamp = 0> : AMDGPUPat <
642  (add (AMDGPUmul_i24 i32:$src0, i32:$src1), i32:$src2),
643  !if(HasClamp, (Inst $src0, $src1, $src2, (i1 0)),
644                (Inst $src0, $src1, $src2))
645>;
646
647class UMad24Pat<Instruction Inst, bit HasClamp = 0> : AMDGPUPat <
648  (add (AMDGPUmul_u24 i32:$src0, i32:$src1), i32:$src2),
649  !if(HasClamp, (Inst $src0, $src1, $src2, (i1 0)),
650                (Inst $src0, $src1, $src2))
651>;
652} // AddedComplexity.
653
654class RcpPat<Instruction RcpInst, ValueType vt> : AMDGPUPat <
655  (fdiv FP_ONE, vt:$src),
656  (RcpInst $src)
657>;
658
659class RsqPat<Instruction RsqInst, ValueType vt> : AMDGPUPat <
660  (AMDGPUrcp (fsqrt vt:$src)),
661  (RsqInst $src)
662>;
663
664// Instructions which select to the same v_min_f*
665def fminnum_like : PatFrags<(ops node:$src0, node:$src1),
666  [(fminnum_ieee node:$src0, node:$src1),
667   (fminnum node:$src0, node:$src1)]
668>;
669
670// Instructions which select to the same v_max_f*
671def fmaxnum_like : PatFrags<(ops node:$src0, node:$src1),
672  [(fmaxnum_ieee node:$src0, node:$src1),
673   (fmaxnum node:$src0, node:$src1)]
674>;
675
676def fminnum_like_oneuse : PatFrags<(ops node:$src0, node:$src1),
677  [(fminnum_ieee_oneuse node:$src0, node:$src1),
678   (fminnum_oneuse node:$src0, node:$src1)]
679>;
680
681def fmaxnum_like_oneuse : PatFrags<(ops node:$src0, node:$src1),
682  [(fmaxnum_ieee_oneuse node:$src0, node:$src1),
683   (fmaxnum_oneuse node:$src0, node:$src1)]
684>;
685
686def any_fmad : PatFrags<(ops node:$src0, node:$src1, node:$src2),
687  [(fmad node:$src0, node:$src1, node:$src2),
688   (AMDGPUfmad_ftz node:$src0, node:$src1, node:$src2)]
689>;
690
691// FIXME: fsqrt should not select directly
692def any_amdgcn_sqrt : PatFrags<(ops node:$src0),
693  [(fsqrt node:$src0), (int_amdgcn_sqrt node:$src0)]
694>;
695