1 //===-- AMDGPUISelLowering.h - AMDGPU Lowering Interface --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Interface definition of the TargetLowering class that is common
11 /// to all AMD GPUs.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H
16 #define LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H
17 
18 #include "AMDGPU.h"
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 
22 namespace llvm {
23 
24 class AMDGPUMachineFunction;
25 class AMDGPUSubtarget;
26 struct ArgDescriptor;
27 
28 class AMDGPUTargetLowering : public TargetLowering {
29 private:
30   const AMDGPUSubtarget *Subtarget;
31 
32   /// \returns AMDGPUISD::FFBH_U32 node if the incoming \p Op may have been
33   /// legalized from a smaller type VT. Need to match pre-legalized type because
34   /// the generic legalization inserts the add/sub between the select and
35   /// compare.
36   SDValue getFFBX_U32(SelectionDAG &DAG, SDValue Op, const SDLoc &DL, unsigned Opc) const;
37 
38 public:
39   static unsigned numBitsUnsigned(SDValue Op, SelectionDAG &DAG);
40   static unsigned numBitsSigned(SDValue Op, SelectionDAG &DAG);
41   static bool hasDefinedInitializer(const GlobalValue *GV);
42 
43 protected:
44   SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
45   SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
46   /// Split a vector store into multiple scalar stores.
47   /// \returns The resulting chain.
48 
49   SDValue LowerFREM(SDValue Op, SelectionDAG &DAG) const;
50   SDValue LowerFCEIL(SDValue Op, SelectionDAG &DAG) const;
51   SDValue LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const;
52   SDValue LowerFRINT(SDValue Op, SelectionDAG &DAG) const;
53   SDValue LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const;
54 
55   SDValue LowerFROUND_LegalFTRUNC(SDValue Op, SelectionDAG &DAG) const;
56   SDValue LowerFROUND64(SDValue Op, SelectionDAG &DAG) const;
57   SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) const;
58   SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const;
59   SDValue LowerFLOG(SDValue Op, SelectionDAG &DAG,
60                     double Log2BaseInverted) const;
61   SDValue lowerFEXP(SDValue Op, SelectionDAG &DAG) const;
62 
63   SDValue LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const;
64 
65   SDValue LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, bool Signed) const;
66   SDValue LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, bool Signed) const;
67   SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
68   SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
69 
70   SDValue LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG, bool Signed) const;
71   SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const;
72   SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
73   SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
74 
75   SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
76 
77 protected:
78   bool shouldCombineMemoryType(EVT VT) const;
79   SDValue performLoadCombine(SDNode *N, DAGCombinerInfo &DCI) const;
80   SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const;
81   SDValue performAssertSZExtCombine(SDNode *N, DAGCombinerInfo &DCI) const;
82   SDValue performIntrinsicWOChainCombine(SDNode *N, DAGCombinerInfo &DCI) const;
83 
84   SDValue splitBinaryBitConstantOpImpl(DAGCombinerInfo &DCI, const SDLoc &SL,
85                                        unsigned Opc, SDValue LHS,
86                                        uint32_t ValLo, uint32_t ValHi) const;
87   SDValue performShlCombine(SDNode *N, DAGCombinerInfo &DCI) const;
88   SDValue performSraCombine(SDNode *N, DAGCombinerInfo &DCI) const;
89   SDValue performSrlCombine(SDNode *N, DAGCombinerInfo &DCI) const;
90   SDValue performTruncateCombine(SDNode *N, DAGCombinerInfo &DCI) const;
91   SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const;
92   SDValue performMulhsCombine(SDNode *N, DAGCombinerInfo &DCI) const;
93   SDValue performMulhuCombine(SDNode *N, DAGCombinerInfo &DCI) const;
94   SDValue performMulLoHi24Combine(SDNode *N, DAGCombinerInfo &DCI) const;
95   SDValue performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond, SDValue LHS,
96                              SDValue RHS, DAGCombinerInfo &DCI) const;
97   SDValue performSelectCombine(SDNode *N, DAGCombinerInfo &DCI) const;
98 
99   bool isConstantCostlierToNegate(SDValue N) const;
100   SDValue performFNegCombine(SDNode *N, DAGCombinerInfo &DCI) const;
101   SDValue performFAbsCombine(SDNode *N, DAGCombinerInfo &DCI) const;
102   SDValue performRcpCombine(SDNode *N, DAGCombinerInfo &DCI) const;
103 
104   static EVT getEquivalentMemType(LLVMContext &Context, EVT VT);
105 
106   virtual SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
107                                      SelectionDAG &DAG) const;
108 
109   /// Return 64-bit value Op as two 32-bit integers.
110   std::pair<SDValue, SDValue> split64BitValue(SDValue Op,
111                                               SelectionDAG &DAG) const;
112   SDValue getLoHalf64(SDValue Op, SelectionDAG &DAG) const;
113   SDValue getHiHalf64(SDValue Op, SelectionDAG &DAG) const;
114 
115   /// Split a vector type into two parts. The first part is a power of two
116   /// vector. The second part is whatever is left over, and is a scalar if it
117   /// would otherwise be a 1-vector.
118   std::pair<EVT, EVT> getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const;
119 
120   /// Split a vector value into two parts of types LoVT and HiVT. HiVT could be
121   /// scalar.
122   std::pair<SDValue, SDValue> splitVector(const SDValue &N, const SDLoc &DL,
123                                           const EVT &LoVT, const EVT &HighVT,
124                                           SelectionDAG &DAG) const;
125 
126   /// Split a vector load into 2 loads of half the vector.
127   SDValue SplitVectorLoad(SDValue Op, SelectionDAG &DAG) const;
128 
129   /// Widen a vector load from vec3 to vec4.
130   SDValue WidenVectorLoad(SDValue Op, SelectionDAG &DAG) const;
131 
132   /// Split a vector store into 2 stores of half the vector.
133   SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const;
134 
135   SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
136   SDValue LowerSDIVREM(SDValue Op, SelectionDAG &DAG) const;
137   SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
138   SDValue LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const;
139   void LowerUDIVREM64(SDValue Op, SelectionDAG &DAG,
140                                     SmallVectorImpl<SDValue> &Results) const;
141 
142   void analyzeFormalArgumentsCompute(
143     CCState &State,
144     const SmallVectorImpl<ISD::InputArg> &Ins) const;
145 
146 public:
147   AMDGPUTargetLowering(const TargetMachine &TM, const AMDGPUSubtarget &STI);
148 
149   bool mayIgnoreSignedZero(SDValue Op) const {
150     if (getTargetMachine().Options.NoSignedZerosFPMath)
151       return true;
152 
153     const auto Flags = Op.getNode()->getFlags();
154     if (Flags.isDefined())
155       return Flags.hasNoSignedZeros();
156 
157     return false;
158   }
159 
160   static inline SDValue stripBitcast(SDValue Val) {
161     return Val.getOpcode() == ISD::BITCAST ? Val.getOperand(0) : Val;
162   }
163 
164   static bool allUsesHaveSourceMods(const SDNode *N,
165                                     unsigned CostThreshold = 4);
166   bool isFAbsFree(EVT VT) const override;
167   bool isFNegFree(EVT VT) const override;
168   bool isTruncateFree(EVT Src, EVT Dest) const override;
169   bool isTruncateFree(Type *Src, Type *Dest) const override;
170 
171   bool isZExtFree(Type *Src, Type *Dest) const override;
172   bool isZExtFree(EVT Src, EVT Dest) const override;
173   bool isZExtFree(SDValue Val, EVT VT2) const override;
174 
175   bool isNarrowingProfitable(EVT VT1, EVT VT2) const override;
176 
177   MVT getVectorIdxTy(const DataLayout &) const override;
178   bool isSelectSupported(SelectSupportKind) const override;
179 
180   bool isFPImmLegal(const APFloat &Imm, EVT VT,
181                     bool ForCodeSize) const override;
182   bool ShouldShrinkFPConstant(EVT VT) const override;
183   bool shouldReduceLoadWidth(SDNode *Load,
184                              ISD::LoadExtType ExtType,
185                              EVT ExtVT) const override;
186 
187   bool isLoadBitCastBeneficial(EVT, EVT, const SelectionDAG &DAG,
188                                const MachineMemOperand &MMO) const final;
189 
190   bool storeOfVectorConstantIsCheap(EVT MemVT,
191                                     unsigned NumElem,
192                                     unsigned AS) const override;
193   bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override;
194   bool isCheapToSpeculateCttz() const override;
195   bool isCheapToSpeculateCtlz() const override;
196 
197   bool isSDNodeAlwaysUniform(const SDNode *N) const override;
198   static CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg);
199   static CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg);
200 
201   SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
202                       const SmallVectorImpl<ISD::OutputArg> &Outs,
203                       const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
204                       SelectionDAG &DAG) const override;
205 
206   SDValue addTokenForArgument(SDValue Chain,
207                               SelectionDAG &DAG,
208                               MachineFrameInfo &MFI,
209                               int ClobberedFI) const;
210 
211   SDValue lowerUnhandledCall(CallLoweringInfo &CLI,
212                              SmallVectorImpl<SDValue> &InVals,
213                              StringRef Reason) const;
214   SDValue LowerCall(CallLoweringInfo &CLI,
215                     SmallVectorImpl<SDValue> &InVals) const override;
216 
217   SDValue LowerDYNAMIC_STACKALLOC(SDValue Op,
218                                   SelectionDAG &DAG) const;
219 
220   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
221   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
222   void ReplaceNodeResults(SDNode * N,
223                           SmallVectorImpl<SDValue> &Results,
224                           SelectionDAG &DAG) const override;
225 
226   SDValue combineFMinMaxLegacy(const SDLoc &DL, EVT VT, SDValue LHS,
227                                SDValue RHS, SDValue True, SDValue False,
228                                SDValue CC, DAGCombinerInfo &DCI) const;
229 
230   const char* getTargetNodeName(unsigned Opcode) const override;
231 
232   // FIXME: Turn off MergeConsecutiveStores() before Instruction Selection for
233   // AMDGPU.  Commit r319036,
234   // (https://github.com/llvm/llvm-project/commit/db77e57ea86d941a4262ef60261692f4cb6893e6)
235   // turned on MergeConsecutiveStores() before Instruction Selection for all
236   // targets.  Enough AMDGPU compiles go into an infinite loop (
237   // MergeConsecutiveStores() merges two stores; LegalizeStoreOps() un-merges;
238   // MergeConsecutiveStores() re-merges, etc. ) to warrant turning it off for
239   // now.
240   bool mergeStoresAfterLegalization(EVT) const override { return false; }
241 
242   bool isFsqrtCheap(SDValue Operand, SelectionDAG &DAG) const override {
243     return true;
244   }
245   SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
246                            int &RefinementSteps, bool &UseOneConstNR,
247                            bool Reciprocal) const override;
248   SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
249                            int &RefinementSteps) const override;
250 
251   virtual SDNode *PostISelFolding(MachineSDNode *N,
252                                   SelectionDAG &DAG) const = 0;
253 
254   /// Determine which of the bits specified in \p Mask are known to be
255   /// either zero or one and return them in the \p KnownZero and \p KnownOne
256   /// bitsets.
257   void computeKnownBitsForTargetNode(const SDValue Op,
258                                      KnownBits &Known,
259                                      const APInt &DemandedElts,
260                                      const SelectionDAG &DAG,
261                                      unsigned Depth = 0) const override;
262 
263   unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts,
264                                            const SelectionDAG &DAG,
265                                            unsigned Depth = 0) const override;
266 
267   bool isKnownNeverNaNForTargetNode(SDValue Op,
268                                     const SelectionDAG &DAG,
269                                     bool SNaN = false,
270                                     unsigned Depth = 0) const override;
271 
272   /// Helper function that adds Reg to the LiveIn list of the DAG's
273   /// MachineFunction.
274   ///
275   /// \returns a RegisterSDNode representing Reg if \p RawReg is true, otherwise
276   /// a copy from the register.
277   SDValue CreateLiveInRegister(SelectionDAG &DAG,
278                                const TargetRegisterClass *RC,
279                                unsigned Reg, EVT VT,
280                                const SDLoc &SL,
281                                bool RawReg = false) const;
282   SDValue CreateLiveInRegister(SelectionDAG &DAG,
283                                const TargetRegisterClass *RC,
284                                unsigned Reg, EVT VT) const {
285     return CreateLiveInRegister(DAG, RC, Reg, VT, SDLoc(DAG.getEntryNode()));
286   }
287 
288   // Returns the raw live in register rather than a copy from it.
289   SDValue CreateLiveInRegisterRaw(SelectionDAG &DAG,
290                                   const TargetRegisterClass *RC,
291                                   unsigned Reg, EVT VT) const {
292     return CreateLiveInRegister(DAG, RC, Reg, VT, SDLoc(DAG.getEntryNode()), true);
293   }
294 
295   /// Similar to CreateLiveInRegister, except value maybe loaded from a stack
296   /// slot rather than passed in a register.
297   SDValue loadStackInputValue(SelectionDAG &DAG,
298                               EVT VT,
299                               const SDLoc &SL,
300                               int64_t Offset) const;
301 
302   SDValue storeStackInputValue(SelectionDAG &DAG,
303                                const SDLoc &SL,
304                                SDValue Chain,
305                                SDValue ArgVal,
306                                int64_t Offset) const;
307 
308   SDValue loadInputValue(SelectionDAG &DAG,
309                          const TargetRegisterClass *RC,
310                          EVT VT, const SDLoc &SL,
311                          const ArgDescriptor &Arg) const;
312 
313   enum ImplicitParameter {
314     FIRST_IMPLICIT,
315     GRID_DIM = FIRST_IMPLICIT,
316     GRID_OFFSET,
317   };
318 
319   /// Helper function that returns the byte offset of the given
320   /// type of implicit parameter.
321   uint32_t getImplicitParameterOffset(const MachineFunction &MF,
322                                       const ImplicitParameter Param) const;
323 
324   MVT getFenceOperandTy(const DataLayout &DL) const override {
325     return MVT::i32;
326   }
327 
328   AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
329 };
330 
331 namespace AMDGPUISD {
332 
333 enum NodeType : unsigned {
334   // AMDIL ISD Opcodes
335   FIRST_NUMBER = ISD::BUILTIN_OP_END,
336   UMUL,        // 32bit unsigned multiplication
337   BRANCH_COND,
338   // End AMDIL ISD Opcodes
339 
340   // Function call.
341   CALL,
342   TC_RETURN,
343   TRAP,
344 
345   // Masked control flow nodes.
346   IF,
347   ELSE,
348   LOOP,
349 
350   // A uniform kernel return that terminates the wavefront.
351   ENDPGM,
352 
353   // Return to a shader part's epilog code.
354   RETURN_TO_EPILOG,
355 
356   // Return with values from a non-entry function.
357   RET_FLAG,
358 
359   DWORDADDR,
360   FRACT,
361 
362   /// CLAMP value between 0.0 and 1.0. NaN clamped to 0, following clamp output
363   /// modifier behavior with dx10_enable.
364   CLAMP,
365 
366   // This is SETCC with the full mask result which is used for a compare with a
367   // result bit per item in the wavefront.
368   SETCC,
369   SETREG,
370 
371   DENORM_MODE,
372 
373   // FP ops with input and output chain.
374   FMA_W_CHAIN,
375   FMUL_W_CHAIN,
376 
377   // SIN_HW, COS_HW - f32 for SI, 1 ULP max error, valid from -100 pi to 100 pi.
378   // Denormals handled on some parts.
379   COS_HW,
380   SIN_HW,
381   FMAX_LEGACY,
382   FMIN_LEGACY,
383 
384   FMAX3,
385   SMAX3,
386   UMAX3,
387   FMIN3,
388   SMIN3,
389   UMIN3,
390   FMED3,
391   SMED3,
392   UMED3,
393   FDOT2,
394   URECIP,
395   DIV_SCALE,
396   DIV_FMAS,
397   DIV_FIXUP,
398   // For emitting ISD::FMAD when f32 denormals are enabled because mac/mad is
399   // treated as an illegal operation.
400   FMAD_FTZ,
401   TRIG_PREOP, // 1 ULP max error for f64
402 
403   // RCP, RSQ - For f32, 1 ULP max error, no denormal handling.
404   //            For f64, max error 2^29 ULP, handles denormals.
405   RCP,
406   RSQ,
407   RCP_LEGACY,
408   RSQ_LEGACY,
409   RCP_IFLAG,
410   FMUL_LEGACY,
411   RSQ_CLAMP,
412   LDEXP,
413   FP_CLASS,
414   DOT4,
415   CARRY,
416   BORROW,
417   BFE_U32, // Extract range of bits with zero extension to 32-bits.
418   BFE_I32, // Extract range of bits with sign extension to 32-bits.
419   BFI, // (src0 & src1) | (~src0 & src2)
420   BFM, // Insert a range of bits into a 32-bit word.
421   FFBH_U32, // ctlz with -1 if input is zero.
422   FFBH_I32,
423   FFBL_B32, // cttz with -1 if input is zero.
424   MUL_U24,
425   MUL_I24,
426   MULHI_U24,
427   MULHI_I24,
428   MAD_U24,
429   MAD_I24,
430   MAD_U64_U32,
431   MAD_I64_I32,
432   MUL_LOHI_I24,
433   MUL_LOHI_U24,
434   PERM,
435   TEXTURE_FETCH,
436   EXPORT, // exp on SI+
437   EXPORT_DONE, // exp on SI+ with done bit set
438   R600_EXPORT,
439   CONST_ADDRESS,
440   REGISTER_LOAD,
441   REGISTER_STORE,
442   SAMPLE,
443   SAMPLEB,
444   SAMPLED,
445   SAMPLEL,
446 
447   // These cvt_f32_ubyte* nodes need to remain consecutive and in order.
448   CVT_F32_UBYTE0,
449   CVT_F32_UBYTE1,
450   CVT_F32_UBYTE2,
451   CVT_F32_UBYTE3,
452 
453   // Convert two float 32 numbers into a single register holding two packed f16
454   // with round to zero.
455   CVT_PKRTZ_F16_F32,
456   CVT_PKNORM_I16_F32,
457   CVT_PKNORM_U16_F32,
458   CVT_PK_I16_I32,
459   CVT_PK_U16_U32,
460 
461   // Same as the standard node, except the high bits of the resulting integer
462   // are known 0.
463   FP_TO_FP16,
464 
465   // Wrapper around fp16 results that are known to zero the high bits.
466   FP16_ZEXT,
467 
468   /// This node is for VLIW targets and it is used to represent a vector
469   /// that is stored in consecutive registers with the same channel.
470   /// For example:
471   ///   |X  |Y|Z|W|
472   /// T0|v.x| | | |
473   /// T1|v.y| | | |
474   /// T2|v.z| | | |
475   /// T3|v.w| | | |
476   BUILD_VERTICAL_VECTOR,
477   /// Pointer to the start of the shader's constant data.
478   CONST_DATA_PTR,
479   INTERP_P1LL_F16,
480   INTERP_P1LV_F16,
481   INTERP_P2_F16,
482   PC_ADD_REL_OFFSET,
483   LDS,
484   KILL,
485   DUMMY_CHAIN,
486   FIRST_MEM_OPCODE_NUMBER = ISD::FIRST_TARGET_MEMORY_OPCODE,
487   LOAD_D16_HI,
488   LOAD_D16_LO,
489   LOAD_D16_HI_I8,
490   LOAD_D16_HI_U8,
491   LOAD_D16_LO_I8,
492   LOAD_D16_LO_U8,
493 
494   STORE_MSKOR,
495   LOAD_CONSTANT,
496   TBUFFER_STORE_FORMAT,
497   TBUFFER_STORE_FORMAT_D16,
498   TBUFFER_LOAD_FORMAT,
499   TBUFFER_LOAD_FORMAT_D16,
500   DS_ORDERED_COUNT,
501   ATOMIC_CMP_SWAP,
502   ATOMIC_INC,
503   ATOMIC_DEC,
504   ATOMIC_LOAD_FMIN,
505   ATOMIC_LOAD_FMAX,
506   BUFFER_LOAD,
507   BUFFER_LOAD_UBYTE,
508   BUFFER_LOAD_USHORT,
509   BUFFER_LOAD_BYTE,
510   BUFFER_LOAD_SHORT,
511   BUFFER_LOAD_FORMAT,
512   BUFFER_LOAD_FORMAT_D16,
513   SBUFFER_LOAD,
514   BUFFER_STORE,
515   BUFFER_STORE_BYTE,
516   BUFFER_STORE_SHORT,
517   BUFFER_STORE_FORMAT,
518   BUFFER_STORE_FORMAT_D16,
519   BUFFER_ATOMIC_SWAP,
520   BUFFER_ATOMIC_ADD,
521   BUFFER_ATOMIC_SUB,
522   BUFFER_ATOMIC_SMIN,
523   BUFFER_ATOMIC_UMIN,
524   BUFFER_ATOMIC_SMAX,
525   BUFFER_ATOMIC_UMAX,
526   BUFFER_ATOMIC_AND,
527   BUFFER_ATOMIC_OR,
528   BUFFER_ATOMIC_XOR,
529   BUFFER_ATOMIC_INC,
530   BUFFER_ATOMIC_DEC,
531   BUFFER_ATOMIC_CMPSWAP,
532   BUFFER_ATOMIC_FADD,
533   BUFFER_ATOMIC_PK_FADD,
534   ATOMIC_PK_FADD,
535 
536   LAST_AMDGPU_ISD_NUMBER
537 };
538 
539 
540 } // End namespace AMDGPUISD
541 
542 } // End namespace llvm
543 
544 #endif
545