1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This is the parent TargetLowering class for hardware code gen
11 /// targets.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPUISelLowering.h"
16 #include "AMDGPU.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPUMachineFunction.h"
19 #include "GCNSubtarget.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "llvm/CodeGen/Analysis.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/IR/DiagnosticInfo.h"
24 #include "llvm/IR/IntrinsicsAMDGPU.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/KnownBits.h"
27 #include "llvm/Target/TargetMachine.h"
28 
29 using namespace llvm;
30 
31 #include "AMDGPUGenCallingConv.inc"
32 
33 static cl::opt<bool> AMDGPUBypassSlowDiv(
34   "amdgpu-bypass-slow-div",
35   cl::desc("Skip 64-bit divide for dynamic 32-bit values"),
36   cl::init(true));
37 
38 // Find a larger type to do a load / store of a vector with.
39 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
40   unsigned StoreSize = VT.getStoreSizeInBits();
41   if (StoreSize <= 32)
42     return EVT::getIntegerVT(Ctx, StoreSize);
43 
44   assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
45   return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
46 }
47 
48 unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) {
49   return DAG.computeKnownBits(Op).countMaxActiveBits();
50 }
51 
52 unsigned AMDGPUTargetLowering::numBitsSigned(SDValue Op, SelectionDAG &DAG) {
53   // In order for this to be a signed 24-bit value, bit 23, must
54   // be a sign bit.
55   return DAG.ComputeMaxSignificantBits(Op);
56 }
57 
58 AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
59                                            const AMDGPUSubtarget &STI)
60     : TargetLowering(TM), Subtarget(&STI) {
61   // Lower floating point store/load to integer store/load to reduce the number
62   // of patterns in tablegen.
63   setOperationAction(ISD::LOAD, MVT::f32, Promote);
64   AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
65 
66   setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
67   AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
68 
69   setOperationAction(ISD::LOAD, MVT::v3f32, Promote);
70   AddPromotedToType(ISD::LOAD, MVT::v3f32, MVT::v3i32);
71 
72   setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
73   AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
74 
75   setOperationAction(ISD::LOAD, MVT::v5f32, Promote);
76   AddPromotedToType(ISD::LOAD, MVT::v5f32, MVT::v5i32);
77 
78   setOperationAction(ISD::LOAD, MVT::v6f32, Promote);
79   AddPromotedToType(ISD::LOAD, MVT::v6f32, MVT::v6i32);
80 
81   setOperationAction(ISD::LOAD, MVT::v7f32, Promote);
82   AddPromotedToType(ISD::LOAD, MVT::v7f32, MVT::v7i32);
83 
84   setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
85   AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
86 
87   setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
88   AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
89 
90   setOperationAction(ISD::LOAD, MVT::v32f32, Promote);
91   AddPromotedToType(ISD::LOAD, MVT::v32f32, MVT::v32i32);
92 
93   setOperationAction(ISD::LOAD, MVT::i64, Promote);
94   AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
95 
96   setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
97   AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32);
98 
99   setOperationAction(ISD::LOAD, MVT::f64, Promote);
100   AddPromotedToType(ISD::LOAD, MVT::f64, MVT::v2i32);
101 
102   setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
103   AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32);
104 
105   setOperationAction(ISD::LOAD, MVT::v3i64, Promote);
106   AddPromotedToType(ISD::LOAD, MVT::v3i64, MVT::v6i32);
107 
108   setOperationAction(ISD::LOAD, MVT::v4i64, Promote);
109   AddPromotedToType(ISD::LOAD, MVT::v4i64, MVT::v8i32);
110 
111   setOperationAction(ISD::LOAD, MVT::v3f64, Promote);
112   AddPromotedToType(ISD::LOAD, MVT::v3f64, MVT::v6i32);
113 
114   setOperationAction(ISD::LOAD, MVT::v4f64, Promote);
115   AddPromotedToType(ISD::LOAD, MVT::v4f64, MVT::v8i32);
116 
117   setOperationAction(ISD::LOAD, MVT::v8i64, Promote);
118   AddPromotedToType(ISD::LOAD, MVT::v8i64, MVT::v16i32);
119 
120   setOperationAction(ISD::LOAD, MVT::v8f64, Promote);
121   AddPromotedToType(ISD::LOAD, MVT::v8f64, MVT::v16i32);
122 
123   setOperationAction(ISD::LOAD, MVT::v16i64, Promote);
124   AddPromotedToType(ISD::LOAD, MVT::v16i64, MVT::v32i32);
125 
126   setOperationAction(ISD::LOAD, MVT::v16f64, Promote);
127   AddPromotedToType(ISD::LOAD, MVT::v16f64, MVT::v32i32);
128 
129   // There are no 64-bit extloads. These should be done as a 32-bit extload and
130   // an extension to 64-bit.
131   for (MVT VT : MVT::integer_valuetypes())
132     setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::i64, VT,
133                      Expand);
134 
135   for (MVT VT : MVT::integer_valuetypes()) {
136     if (VT == MVT::i64)
137       continue;
138 
139     for (auto Op : {ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}) {
140       setLoadExtAction(Op, VT, MVT::i1, Promote);
141       setLoadExtAction(Op, VT, MVT::i8, Legal);
142       setLoadExtAction(Op, VT, MVT::i16, Legal);
143       setLoadExtAction(Op, VT, MVT::i32, Expand);
144     }
145   }
146 
147   for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
148     for (auto MemVT :
149          {MVT::v2i8, MVT::v4i8, MVT::v2i16, MVT::v3i16, MVT::v4i16})
150       setLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}, VT, MemVT,
151                        Expand);
152 
153   setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
154   setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
155   setLoadExtAction(ISD::EXTLOAD, MVT::v3f32, MVT::v3f16, Expand);
156   setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
157   setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
158   setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Expand);
159   setLoadExtAction(ISD::EXTLOAD, MVT::v32f32, MVT::v32f16, Expand);
160 
161   setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
162   setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
163   setLoadExtAction(ISD::EXTLOAD, MVT::v3f64, MVT::v3f32, Expand);
164   setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
165   setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f32, Expand);
166   setLoadExtAction(ISD::EXTLOAD, MVT::v16f64, MVT::v16f32, Expand);
167 
168   setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
169   setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
170   setLoadExtAction(ISD::EXTLOAD, MVT::v3f64, MVT::v3f16, Expand);
171   setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
172   setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
173   setLoadExtAction(ISD::EXTLOAD, MVT::v16f64, MVT::v16f16, Expand);
174 
175   setOperationAction(ISD::STORE, MVT::f32, Promote);
176   AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
177 
178   setOperationAction(ISD::STORE, MVT::v2f32, Promote);
179   AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
180 
181   setOperationAction(ISD::STORE, MVT::v3f32, Promote);
182   AddPromotedToType(ISD::STORE, MVT::v3f32, MVT::v3i32);
183 
184   setOperationAction(ISD::STORE, MVT::v4f32, Promote);
185   AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
186 
187   setOperationAction(ISD::STORE, MVT::v5f32, Promote);
188   AddPromotedToType(ISD::STORE, MVT::v5f32, MVT::v5i32);
189 
190   setOperationAction(ISD::STORE, MVT::v6f32, Promote);
191   AddPromotedToType(ISD::STORE, MVT::v6f32, MVT::v6i32);
192 
193   setOperationAction(ISD::STORE, MVT::v7f32, Promote);
194   AddPromotedToType(ISD::STORE, MVT::v7f32, MVT::v7i32);
195 
196   setOperationAction(ISD::STORE, MVT::v8f32, Promote);
197   AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
198 
199   setOperationAction(ISD::STORE, MVT::v16f32, Promote);
200   AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
201 
202   setOperationAction(ISD::STORE, MVT::v32f32, Promote);
203   AddPromotedToType(ISD::STORE, MVT::v32f32, MVT::v32i32);
204 
205   setOperationAction(ISD::STORE, MVT::i64, Promote);
206   AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
207 
208   setOperationAction(ISD::STORE, MVT::v2i64, Promote);
209   AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32);
210 
211   setOperationAction(ISD::STORE, MVT::f64, Promote);
212   AddPromotedToType(ISD::STORE, MVT::f64, MVT::v2i32);
213 
214   setOperationAction(ISD::STORE, MVT::v2f64, Promote);
215   AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32);
216 
217   setOperationAction(ISD::STORE, MVT::v3i64, Promote);
218   AddPromotedToType(ISD::STORE, MVT::v3i64, MVT::v6i32);
219 
220   setOperationAction(ISD::STORE, MVT::v3f64, Promote);
221   AddPromotedToType(ISD::STORE, MVT::v3f64, MVT::v6i32);
222 
223   setOperationAction(ISD::STORE, MVT::v4i64, Promote);
224   AddPromotedToType(ISD::STORE, MVT::v4i64, MVT::v8i32);
225 
226   setOperationAction(ISD::STORE, MVT::v4f64, Promote);
227   AddPromotedToType(ISD::STORE, MVT::v4f64, MVT::v8i32);
228 
229   setOperationAction(ISD::STORE, MVT::v8i64, Promote);
230   AddPromotedToType(ISD::STORE, MVT::v8i64, MVT::v16i32);
231 
232   setOperationAction(ISD::STORE, MVT::v8f64, Promote);
233   AddPromotedToType(ISD::STORE, MVT::v8f64, MVT::v16i32);
234 
235   setOperationAction(ISD::STORE, MVT::v16i64, Promote);
236   AddPromotedToType(ISD::STORE, MVT::v16i64, MVT::v32i32);
237 
238   setOperationAction(ISD::STORE, MVT::v16f64, Promote);
239   AddPromotedToType(ISD::STORE, MVT::v16f64, MVT::v32i32);
240 
241   setTruncStoreAction(MVT::i64, MVT::i1, Expand);
242   setTruncStoreAction(MVT::i64, MVT::i8, Expand);
243   setTruncStoreAction(MVT::i64, MVT::i16, Expand);
244   setTruncStoreAction(MVT::i64, MVT::i32, Expand);
245 
246   setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
247   setTruncStoreAction(MVT::v2i64, MVT::v2i8, Expand);
248   setTruncStoreAction(MVT::v2i64, MVT::v2i16, Expand);
249   setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand);
250 
251   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
252   setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand);
253   setTruncStoreAction(MVT::v3f32, MVT::v3f16, Expand);
254   setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand);
255   setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand);
256   setTruncStoreAction(MVT::v16f32, MVT::v16f16, Expand);
257   setTruncStoreAction(MVT::v32f32, MVT::v32f16, Expand);
258 
259   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
260   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
261 
262   setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
263   setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand);
264 
265   setTruncStoreAction(MVT::v3i64, MVT::v3i32, Expand);
266   setTruncStoreAction(MVT::v3i64, MVT::v3i16, Expand);
267   setTruncStoreAction(MVT::v3f64, MVT::v3f32, Expand);
268   setTruncStoreAction(MVT::v3f64, MVT::v3f16, Expand);
269 
270   setTruncStoreAction(MVT::v4i64, MVT::v4i32, Expand);
271   setTruncStoreAction(MVT::v4i64, MVT::v4i16, Expand);
272   setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand);
273   setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand);
274 
275   setTruncStoreAction(MVT::v8f64, MVT::v8f32, Expand);
276   setTruncStoreAction(MVT::v8f64, MVT::v8f16, Expand);
277 
278   setTruncStoreAction(MVT::v16f64, MVT::v16f32, Expand);
279   setTruncStoreAction(MVT::v16f64, MVT::v16f16, Expand);
280   setTruncStoreAction(MVT::v16i64, MVT::v16i16, Expand);
281   setTruncStoreAction(MVT::v16i64, MVT::v16i16, Expand);
282   setTruncStoreAction(MVT::v16i64, MVT::v16i8, Expand);
283   setTruncStoreAction(MVT::v16i64, MVT::v16i8, Expand);
284   setTruncStoreAction(MVT::v16i64, MVT::v16i1, Expand);
285 
286   setOperationAction(ISD::Constant, {MVT::i32, MVT::i64}, Legal);
287   setOperationAction(ISD::ConstantFP, {MVT::f32, MVT::f64}, Legal);
288 
289   setOperationAction({ISD::BR_JT, ISD::BRIND}, MVT::Other, Expand);
290 
291   // This is totally unsupported, just custom lower to produce an error.
292   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
293 
294   // Library functions.  These default to Expand, but we have instructions
295   // for them.
296   setOperationAction({ISD::FCEIL, ISD::FEXP2, ISD::FPOW, ISD::FLOG2, ISD::FABS,
297                       ISD::FFLOOR, ISD::FRINT, ISD::FTRUNC, ISD::FMINNUM,
298                       ISD::FMAXNUM},
299                      MVT::f32, Legal);
300 
301   setOperationAction(ISD::FROUND, {MVT::f32, MVT::f64}, Custom);
302 
303   setOperationAction({ISD::FLOG, ISD::FLOG10, ISD::FEXP}, MVT::f32, Custom);
304 
305   setOperationAction(ISD::FNEARBYINT, {MVT::f32, MVT::f64}, Custom);
306 
307   setOperationAction(ISD::FREM, {MVT::f16, MVT::f32, MVT::f64}, Custom);
308 
309   // Expand to fneg + fadd.
310   setOperationAction(ISD::FSUB, MVT::f64, Expand);
311 
312   setOperationAction(ISD::CONCAT_VECTORS,
313                      {MVT::v3i32, MVT::v3f32, MVT::v4i32, MVT::v4f32,
314                       MVT::v5i32, MVT::v5f32, MVT::v6i32, MVT::v6f32,
315                       MVT::v7i32, MVT::v7f32, MVT::v8i32, MVT::v8f32},
316                      Custom);
317   setOperationAction(
318       ISD::EXTRACT_SUBVECTOR,
319       {MVT::v2f16,  MVT::v2i16,  MVT::v4f16,  MVT::v4i16,  MVT::v2f32,
320        MVT::v2i32,  MVT::v3f32,  MVT::v3i32,  MVT::v4f32,  MVT::v4i32,
321        MVT::v5f32,  MVT::v5i32,  MVT::v6f32,  MVT::v6i32,  MVT::v7f32,
322        MVT::v7i32,  MVT::v8f32,  MVT::v8i32,  MVT::v16f16, MVT::v16i16,
323        MVT::v16f32, MVT::v16i32, MVT::v32f32, MVT::v32i32, MVT::v2f64,
324        MVT::v2i64,  MVT::v3f64,  MVT::v3i64,  MVT::v4f64,  MVT::v4i64,
325        MVT::v8f64,  MVT::v8i64,  MVT::v16f64, MVT::v16i64},
326       Custom);
327 
328   setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
329   setOperationAction(ISD::FP_TO_FP16, {MVT::f64, MVT::f32}, Custom);
330 
331   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
332   for (MVT VT : ScalarIntVTs) {
333     // These should use [SU]DIVREM, so set them to expand
334     setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM}, VT,
335                        Expand);
336 
337     // GPU does not have divrem function for signed or unsigned.
338     setOperationAction({ISD::SDIVREM, ISD::UDIVREM}, VT, Custom);
339 
340     // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
341     setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Expand);
342 
343     setOperationAction({ISD::BSWAP, ISD::CTTZ, ISD::CTLZ}, VT, Expand);
344 
345     // AMDGPU uses ADDC/SUBC/ADDE/SUBE
346     setOperationAction({ISD::ADDC, ISD::SUBC, ISD::ADDE, ISD::SUBE}, VT, Legal);
347   }
348 
349   // The hardware supports 32-bit FSHR, but not FSHL.
350   setOperationAction(ISD::FSHR, MVT::i32, Legal);
351 
352   // The hardware supports 32-bit ROTR, but not ROTL.
353   setOperationAction(ISD::ROTL, {MVT::i32, MVT::i64}, Expand);
354   setOperationAction(ISD::ROTR, MVT::i64, Expand);
355 
356   setOperationAction({ISD::MULHU, ISD::MULHS}, MVT::i16, Expand);
357 
358   setOperationAction({ISD::MUL, ISD::MULHU, ISD::MULHS}, MVT::i64, Expand);
359   setOperationAction(
360       {ISD::UINT_TO_FP, ISD::SINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
361       MVT::i64, Custom);
362   setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
363 
364   setOperationAction({ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX}, MVT::i32,
365                      Legal);
366 
367   setOperationAction(
368       {ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF},
369       MVT::i64, Custom);
370 
371   static const MVT::SimpleValueType VectorIntTypes[] = {
372       MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32, MVT::v6i32, MVT::v7i32};
373 
374   for (MVT VT : VectorIntTypes) {
375     // Expand the following operations for the current type by default.
376     setOperationAction({ISD::ADD,        ISD::AND,     ISD::FP_TO_SINT,
377                         ISD::FP_TO_UINT, ISD::MUL,     ISD::MULHU,
378                         ISD::MULHS,      ISD::OR,      ISD::SHL,
379                         ISD::SRA,        ISD::SRL,     ISD::ROTL,
380                         ISD::ROTR,       ISD::SUB,     ISD::SINT_TO_FP,
381                         ISD::UINT_TO_FP, ISD::SDIV,    ISD::UDIV,
382                         ISD::SREM,       ISD::UREM,    ISD::SMUL_LOHI,
383                         ISD::UMUL_LOHI,  ISD::SDIVREM, ISD::UDIVREM,
384                         ISD::SELECT,     ISD::VSELECT, ISD::SELECT_CC,
385                         ISD::XOR,        ISD::BSWAP,   ISD::CTPOP,
386                         ISD::CTTZ,       ISD::CTLZ,    ISD::VECTOR_SHUFFLE,
387                         ISD::SETCC},
388                        VT, Expand);
389   }
390 
391   static const MVT::SimpleValueType FloatVectorTypes[] = {
392       MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32, MVT::v6f32, MVT::v7f32};
393 
394   for (MVT VT : FloatVectorTypes) {
395     setOperationAction(
396         {ISD::FABS,    ISD::FMINNUM,      ISD::FMAXNUM,   ISD::FADD,
397          ISD::FCEIL,   ISD::FCOS,         ISD::FDIV,      ISD::FEXP2,
398          ISD::FEXP,    ISD::FLOG2,        ISD::FREM,      ISD::FLOG,
399          ISD::FLOG10,  ISD::FPOW,         ISD::FFLOOR,    ISD::FTRUNC,
400          ISD::FMUL,    ISD::FMA,          ISD::FRINT,     ISD::FNEARBYINT,
401          ISD::FSQRT,   ISD::FSIN,         ISD::FSUB,      ISD::FNEG,
402          ISD::VSELECT, ISD::SELECT_CC,    ISD::FCOPYSIGN, ISD::VECTOR_SHUFFLE,
403          ISD::SETCC,   ISD::FCANONICALIZE},
404         VT, Expand);
405   }
406 
407   // This causes using an unrolled select operation rather than expansion with
408   // bit operations. This is in general better, but the alternative using BFI
409   // instructions may be better if the select sources are SGPRs.
410   setOperationAction(ISD::SELECT, MVT::v2f32, Promote);
411   AddPromotedToType(ISD::SELECT, MVT::v2f32, MVT::v2i32);
412 
413   setOperationAction(ISD::SELECT, MVT::v3f32, Promote);
414   AddPromotedToType(ISD::SELECT, MVT::v3f32, MVT::v3i32);
415 
416   setOperationAction(ISD::SELECT, MVT::v4f32, Promote);
417   AddPromotedToType(ISD::SELECT, MVT::v4f32, MVT::v4i32);
418 
419   setOperationAction(ISD::SELECT, MVT::v5f32, Promote);
420   AddPromotedToType(ISD::SELECT, MVT::v5f32, MVT::v5i32);
421 
422   setOperationAction(ISD::SELECT, MVT::v6f32, Promote);
423   AddPromotedToType(ISD::SELECT, MVT::v6f32, MVT::v6i32);
424 
425   setOperationAction(ISD::SELECT, MVT::v7f32, Promote);
426   AddPromotedToType(ISD::SELECT, MVT::v7f32, MVT::v7i32);
427 
428   // There are no libcalls of any kind.
429   for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I)
430     setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr);
431 
432   setSchedulingPreference(Sched::RegPressure);
433   setJumpIsExpensive(true);
434 
435   // FIXME: This is only partially true. If we have to do vector compares, any
436   // SGPR pair can be a condition register. If we have a uniform condition, we
437   // are better off doing SALU operations, where there is only one SCC. For now,
438   // we don't have a way of knowing during instruction selection if a condition
439   // will be uniform and we always use vector compares. Assume we are using
440   // vector compares until that is fixed.
441   setHasMultipleConditionRegisters(true);
442 
443   setMinCmpXchgSizeInBits(32);
444   setSupportsUnalignedAtomics(false);
445 
446   PredictableSelectIsExpensive = false;
447 
448   // We want to find all load dependencies for long chains of stores to enable
449   // merging into very wide vectors. The problem is with vectors with > 4
450   // elements. MergeConsecutiveStores will attempt to merge these because x8/x16
451   // vectors are a legal type, even though we have to split the loads
452   // usually. When we can more precisely specify load legality per address
453   // space, we should be able to make FindBetterChain/MergeConsecutiveStores
454   // smarter so that they can figure out what to do in 2 iterations without all
455   // N > 4 stores on the same chain.
456   GatherAllAliasesMaxDepth = 16;
457 
458   // memcpy/memmove/memset are expanded in the IR, so we shouldn't need to worry
459   // about these during lowering.
460   MaxStoresPerMemcpy  = 0xffffffff;
461   MaxStoresPerMemmove = 0xffffffff;
462   MaxStoresPerMemset  = 0xffffffff;
463 
464   // The expansion for 64-bit division is enormous.
465   if (AMDGPUBypassSlowDiv)
466     addBypassSlowDiv(64, 32);
467 
468   setTargetDAGCombine({ISD::BITCAST,    ISD::SHL,
469                        ISD::SRA,        ISD::SRL,
470                        ISD::TRUNCATE,   ISD::MUL,
471                        ISD::SMUL_LOHI,  ISD::UMUL_LOHI,
472                        ISD::MULHU,      ISD::MULHS,
473                        ISD::SELECT,     ISD::SELECT_CC,
474                        ISD::STORE,      ISD::FADD,
475                        ISD::FSUB,       ISD::FNEG,
476                        ISD::FABS,       ISD::AssertZext,
477                        ISD::AssertSext, ISD::INTRINSIC_WO_CHAIN});
478 }
479 
480 bool AMDGPUTargetLowering::mayIgnoreSignedZero(SDValue Op) const {
481   if (getTargetMachine().Options.NoSignedZerosFPMath)
482     return true;
483 
484   const auto Flags = Op.getNode()->getFlags();
485   if (Flags.hasNoSignedZeros())
486     return true;
487 
488   return false;
489 }
490 
491 //===----------------------------------------------------------------------===//
492 // Target Information
493 //===----------------------------------------------------------------------===//
494 
495 LLVM_READNONE
496 static bool fnegFoldsIntoOp(unsigned Opc) {
497   switch (Opc) {
498   case ISD::FADD:
499   case ISD::FSUB:
500   case ISD::FMUL:
501   case ISD::FMA:
502   case ISD::FMAD:
503   case ISD::FMINNUM:
504   case ISD::FMAXNUM:
505   case ISD::FMINNUM_IEEE:
506   case ISD::FMAXNUM_IEEE:
507   case ISD::FSIN:
508   case ISD::FTRUNC:
509   case ISD::FRINT:
510   case ISD::FNEARBYINT:
511   case ISD::FCANONICALIZE:
512   case AMDGPUISD::RCP:
513   case AMDGPUISD::RCP_LEGACY:
514   case AMDGPUISD::RCP_IFLAG:
515   case AMDGPUISD::SIN_HW:
516   case AMDGPUISD::FMUL_LEGACY:
517   case AMDGPUISD::FMIN_LEGACY:
518   case AMDGPUISD::FMAX_LEGACY:
519   case AMDGPUISD::FMED3:
520     // TODO: handle llvm.amdgcn.fma.legacy
521     return true;
522   default:
523     return false;
524   }
525 }
526 
527 /// \p returns true if the operation will definitely need to use a 64-bit
528 /// encoding, and thus will use a VOP3 encoding regardless of the source
529 /// modifiers.
530 LLVM_READONLY
531 static bool opMustUseVOP3Encoding(const SDNode *N, MVT VT) {
532   return N->getNumOperands() > 2 || VT == MVT::f64;
533 }
534 
535 // Most FP instructions support source modifiers, but this could be refined
536 // slightly.
537 LLVM_READONLY
538 static bool hasSourceMods(const SDNode *N) {
539   if (isa<MemSDNode>(N))
540     return false;
541 
542   switch (N->getOpcode()) {
543   case ISD::CopyToReg:
544   case ISD::SELECT:
545   case ISD::FDIV:
546   case ISD::FREM:
547   case ISD::INLINEASM:
548   case ISD::INLINEASM_BR:
549   case AMDGPUISD::DIV_SCALE:
550   case ISD::INTRINSIC_W_CHAIN:
551 
552   // TODO: Should really be looking at the users of the bitcast. These are
553   // problematic because bitcasts are used to legalize all stores to integer
554   // types.
555   case ISD::BITCAST:
556     return false;
557   case ISD::INTRINSIC_WO_CHAIN: {
558     switch (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()) {
559     case Intrinsic::amdgcn_interp_p1:
560     case Intrinsic::amdgcn_interp_p2:
561     case Intrinsic::amdgcn_interp_mov:
562     case Intrinsic::amdgcn_interp_p1_f16:
563     case Intrinsic::amdgcn_interp_p2_f16:
564       return false;
565     default:
566       return true;
567     }
568   }
569   default:
570     return true;
571   }
572 }
573 
574 bool AMDGPUTargetLowering::allUsesHaveSourceMods(const SDNode *N,
575                                                  unsigned CostThreshold) {
576   // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus
577   // it is truly free to use a source modifier in all cases. If there are
578   // multiple users but for each one will necessitate using VOP3, there will be
579   // a code size increase. Try to avoid increasing code size unless we know it
580   // will save on the instruction count.
581   unsigned NumMayIncreaseSize = 0;
582   MVT VT = N->getValueType(0).getScalarType().getSimpleVT();
583 
584   // XXX - Should this limit number of uses to check?
585   for (const SDNode *U : N->uses()) {
586     if (!hasSourceMods(U))
587       return false;
588 
589     if (!opMustUseVOP3Encoding(U, VT)) {
590       if (++NumMayIncreaseSize > CostThreshold)
591         return false;
592     }
593   }
594 
595   return true;
596 }
597 
598 EVT AMDGPUTargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
599                                               ISD::NodeType ExtendKind) const {
600   assert(!VT.isVector() && "only scalar expected");
601 
602   // Round to the next multiple of 32-bits.
603   unsigned Size = VT.getSizeInBits();
604   if (Size <= 32)
605     return MVT::i32;
606   return EVT::getIntegerVT(Context, 32 * ((Size + 31) / 32));
607 }
608 
609 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const {
610   return MVT::i32;
611 }
612 
613 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
614   return true;
615 }
616 
617 // The backend supports 32 and 64 bit floating point immediates.
618 // FIXME: Why are we reporting vectors of FP immediates as legal?
619 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
620                                         bool ForCodeSize) const {
621   EVT ScalarVT = VT.getScalarType();
622   return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64 ||
623          (ScalarVT == MVT::f16 && Subtarget->has16BitInsts()));
624 }
625 
626 // We don't want to shrink f64 / f32 constants.
627 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
628   EVT ScalarVT = VT.getScalarType();
629   return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
630 }
631 
632 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N,
633                                                  ISD::LoadExtType ExtTy,
634                                                  EVT NewVT) const {
635   // TODO: This may be worth removing. Check regression tests for diffs.
636   if (!TargetLoweringBase::shouldReduceLoadWidth(N, ExtTy, NewVT))
637     return false;
638 
639   unsigned NewSize = NewVT.getStoreSizeInBits();
640 
641   // If we are reducing to a 32-bit load or a smaller multi-dword load,
642   // this is always better.
643   if (NewSize >= 32)
644     return true;
645 
646   EVT OldVT = N->getValueType(0);
647   unsigned OldSize = OldVT.getStoreSizeInBits();
648 
649   MemSDNode *MN = cast<MemSDNode>(N);
650   unsigned AS = MN->getAddressSpace();
651   // Do not shrink an aligned scalar load to sub-dword.
652   // Scalar engine cannot do sub-dword loads.
653   if (OldSize >= 32 && NewSize < 32 && MN->getAlign() >= Align(4) &&
654       (AS == AMDGPUAS::CONSTANT_ADDRESS ||
655        AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
656        (isa<LoadSDNode>(N) && AS == AMDGPUAS::GLOBAL_ADDRESS &&
657         MN->isInvariant())) &&
658       AMDGPUInstrInfo::isUniformMMO(MN->getMemOperand()))
659     return false;
660 
661   // Don't produce extloads from sub 32-bit types. SI doesn't have scalar
662   // extloads, so doing one requires using a buffer_load. In cases where we
663   // still couldn't use a scalar load, using the wider load shouldn't really
664   // hurt anything.
665 
666   // If the old size already had to be an extload, there's no harm in continuing
667   // to reduce the width.
668   return (OldSize < 32);
669 }
670 
671 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, EVT CastTy,
672                                                    const SelectionDAG &DAG,
673                                                    const MachineMemOperand &MMO) const {
674 
675   assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits());
676 
677   if (LoadTy.getScalarType() == MVT::i32)
678     return false;
679 
680   unsigned LScalarSize = LoadTy.getScalarSizeInBits();
681   unsigned CastScalarSize = CastTy.getScalarSizeInBits();
682 
683   if ((LScalarSize >= CastScalarSize) && (CastScalarSize < 32))
684     return false;
685 
686   bool Fast = false;
687   return allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
688                                         CastTy, MMO, &Fast) &&
689          Fast;
690 }
691 
692 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
693 // profitable with the expansion for 64-bit since it's generally good to
694 // speculate things.
695 // FIXME: These should really have the size as a parameter.
696 bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const {
697   return true;
698 }
699 
700 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const {
701   return true;
702 }
703 
704 bool AMDGPUTargetLowering::isSDNodeAlwaysUniform(const SDNode *N) const {
705   switch (N->getOpcode()) {
706   case ISD::EntryToken:
707   case ISD::TokenFactor:
708     return true;
709   case ISD::INTRINSIC_WO_CHAIN: {
710     unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
711     switch (IntrID) {
712     case Intrinsic::amdgcn_readfirstlane:
713     case Intrinsic::amdgcn_readlane:
714       return true;
715     }
716     return false;
717   }
718   case ISD::LOAD:
719     if (cast<LoadSDNode>(N)->getMemOperand()->getAddrSpace() ==
720         AMDGPUAS::CONSTANT_ADDRESS_32BIT)
721       return true;
722     return false;
723   case AMDGPUISD::SETCC: // ballot-style instruction
724     return true;
725   }
726   return false;
727 }
728 
729 SDValue AMDGPUTargetLowering::getNegatedExpression(
730     SDValue Op, SelectionDAG &DAG, bool LegalOperations, bool ForCodeSize,
731     NegatibleCost &Cost, unsigned Depth) const {
732 
733   switch (Op.getOpcode()) {
734   case ISD::FMA:
735   case ISD::FMAD: {
736     // Negating a fma is not free if it has users without source mods.
737     if (!allUsesHaveSourceMods(Op.getNode()))
738       return SDValue();
739     break;
740   }
741   default:
742     break;
743   }
744 
745   return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
746                                               ForCodeSize, Cost, Depth);
747 }
748 
749 //===---------------------------------------------------------------------===//
750 // Target Properties
751 //===---------------------------------------------------------------------===//
752 
753 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
754   assert(VT.isFloatingPoint());
755 
756   // Packed operations do not have a fabs modifier.
757   return VT == MVT::f32 || VT == MVT::f64 ||
758          (Subtarget->has16BitInsts() && VT == MVT::f16);
759 }
760 
761 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
762   assert(VT.isFloatingPoint());
763   // Report this based on the end legalized type.
764   VT = VT.getScalarType();
765   return VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f16;
766 }
767 
768 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT,
769                                                          unsigned NumElem,
770                                                          unsigned AS) const {
771   return true;
772 }
773 
774 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const {
775   // There are few operations which truly have vector input operands. Any vector
776   // operation is going to involve operations on each component, and a
777   // build_vector will be a copy per element, so it always makes sense to use a
778   // build_vector input in place of the extracted element to avoid a copy into a
779   // super register.
780   //
781   // We should probably only do this if all users are extracts only, but this
782   // should be the common case.
783   return true;
784 }
785 
786 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
787   // Truncate is just accessing a subregister.
788 
789   unsigned SrcSize = Source.getSizeInBits();
790   unsigned DestSize = Dest.getSizeInBits();
791 
792   return DestSize < SrcSize && DestSize % 32 == 0 ;
793 }
794 
795 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
796   // Truncate is just accessing a subregister.
797 
798   unsigned SrcSize = Source->getScalarSizeInBits();
799   unsigned DestSize = Dest->getScalarSizeInBits();
800 
801   if (DestSize== 16 && Subtarget->has16BitInsts())
802     return SrcSize >= 32;
803 
804   return DestSize < SrcSize && DestSize % 32 == 0;
805 }
806 
807 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
808   unsigned SrcSize = Src->getScalarSizeInBits();
809   unsigned DestSize = Dest->getScalarSizeInBits();
810 
811   if (SrcSize == 16 && Subtarget->has16BitInsts())
812     return DestSize >= 32;
813 
814   return SrcSize == 32 && DestSize == 64;
815 }
816 
817 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
818   // Any register load of a 64-bit value really requires 2 32-bit moves. For all
819   // practical purposes, the extra mov 0 to load a 64-bit is free.  As used,
820   // this will enable reducing 64-bit operations the 32-bit, which is always
821   // good.
822 
823   if (Src == MVT::i16)
824     return Dest == MVT::i32 ||Dest == MVT::i64 ;
825 
826   return Src == MVT::i32 && Dest == MVT::i64;
827 }
828 
829 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
830   return isZExtFree(Val.getValueType(), VT2);
831 }
832 
833 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
834   // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
835   // limited number of native 64-bit operations. Shrinking an operation to fit
836   // in a single 32-bit register should always be helpful. As currently used,
837   // this is much less general than the name suggests, and is only used in
838   // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
839   // not profitable, and may actually be harmful.
840   return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
841 }
842 
843 //===---------------------------------------------------------------------===//
844 // TargetLowering Callbacks
845 //===---------------------------------------------------------------------===//
846 
847 CCAssignFn *AMDGPUCallLowering::CCAssignFnForCall(CallingConv::ID CC,
848                                                   bool IsVarArg) {
849   switch (CC) {
850   case CallingConv::AMDGPU_VS:
851   case CallingConv::AMDGPU_GS:
852   case CallingConv::AMDGPU_PS:
853   case CallingConv::AMDGPU_CS:
854   case CallingConv::AMDGPU_HS:
855   case CallingConv::AMDGPU_ES:
856   case CallingConv::AMDGPU_LS:
857     return CC_AMDGPU;
858   case CallingConv::C:
859   case CallingConv::Fast:
860   case CallingConv::Cold:
861     return CC_AMDGPU_Func;
862   case CallingConv::AMDGPU_Gfx:
863     return CC_SI_Gfx;
864   case CallingConv::AMDGPU_KERNEL:
865   case CallingConv::SPIR_KERNEL:
866   default:
867     report_fatal_error("Unsupported calling convention for call");
868   }
869 }
870 
871 CCAssignFn *AMDGPUCallLowering::CCAssignFnForReturn(CallingConv::ID CC,
872                                                     bool IsVarArg) {
873   switch (CC) {
874   case CallingConv::AMDGPU_KERNEL:
875   case CallingConv::SPIR_KERNEL:
876     llvm_unreachable("kernels should not be handled here");
877   case CallingConv::AMDGPU_VS:
878   case CallingConv::AMDGPU_GS:
879   case CallingConv::AMDGPU_PS:
880   case CallingConv::AMDGPU_CS:
881   case CallingConv::AMDGPU_HS:
882   case CallingConv::AMDGPU_ES:
883   case CallingConv::AMDGPU_LS:
884     return RetCC_SI_Shader;
885   case CallingConv::AMDGPU_Gfx:
886     return RetCC_SI_Gfx;
887   case CallingConv::C:
888   case CallingConv::Fast:
889   case CallingConv::Cold:
890     return RetCC_AMDGPU_Func;
891   default:
892     report_fatal_error("Unsupported calling convention.");
893   }
894 }
895 
896 /// The SelectionDAGBuilder will automatically promote function arguments
897 /// with illegal types.  However, this does not work for the AMDGPU targets
898 /// since the function arguments are stored in memory as these illegal types.
899 /// In order to handle this properly we need to get the original types sizes
900 /// from the LLVM IR Function and fixup the ISD:InputArg values before
901 /// passing them to AnalyzeFormalArguments()
902 
903 /// When the SelectionDAGBuilder computes the Ins, it takes care of splitting
904 /// input values across multiple registers.  Each item in the Ins array
905 /// represents a single value that will be stored in registers.  Ins[x].VT is
906 /// the value type of the value that will be stored in the register, so
907 /// whatever SDNode we lower the argument to needs to be this type.
908 ///
909 /// In order to correctly lower the arguments we need to know the size of each
910 /// argument.  Since Ins[x].VT gives us the size of the register that will
911 /// hold the value, we need to look at Ins[x].ArgVT to see the 'real' type
912 /// for the original function argument so that we can deduce the correct memory
913 /// type to use for Ins[x].  In most cases the correct memory type will be
914 /// Ins[x].ArgVT.  However, this will not always be the case.  If, for example,
915 /// we have a kernel argument of type v8i8, this argument will be split into
916 /// 8 parts and each part will be represented by its own item in the Ins array.
917 /// For each part the Ins[x].ArgVT will be the v8i8, which is the full type of
918 /// the argument before it was split.  From this, we deduce that the memory type
919 /// for each individual part is i8.  We pass the memory type as LocVT to the
920 /// calling convention analysis function and the register type (Ins[x].VT) as
921 /// the ValVT.
922 void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(
923   CCState &State,
924   const SmallVectorImpl<ISD::InputArg> &Ins) const {
925   const MachineFunction &MF = State.getMachineFunction();
926   const Function &Fn = MF.getFunction();
927   LLVMContext &Ctx = Fn.getParent()->getContext();
928   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
929   const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset(Fn);
930   CallingConv::ID CC = Fn.getCallingConv();
931 
932   Align MaxAlign = Align(1);
933   uint64_t ExplicitArgOffset = 0;
934   const DataLayout &DL = Fn.getParent()->getDataLayout();
935 
936   unsigned InIndex = 0;
937 
938   for (const Argument &Arg : Fn.args()) {
939     const bool IsByRef = Arg.hasByRefAttr();
940     Type *BaseArgTy = Arg.getType();
941     Type *MemArgTy = IsByRef ? Arg.getParamByRefType() : BaseArgTy;
942     Align Alignment = DL.getValueOrABITypeAlignment(
943         IsByRef ? Arg.getParamAlign() : None, MemArgTy);
944     MaxAlign = std::max(Alignment, MaxAlign);
945     uint64_t AllocSize = DL.getTypeAllocSize(MemArgTy);
946 
947     uint64_t ArgOffset = alignTo(ExplicitArgOffset, Alignment) + ExplicitOffset;
948     ExplicitArgOffset = alignTo(ExplicitArgOffset, Alignment) + AllocSize;
949 
950     // We're basically throwing away everything passed into us and starting over
951     // to get accurate in-memory offsets. The "PartOffset" is completely useless
952     // to us as computed in Ins.
953     //
954     // We also need to figure out what type legalization is trying to do to get
955     // the correct memory offsets.
956 
957     SmallVector<EVT, 16> ValueVTs;
958     SmallVector<uint64_t, 16> Offsets;
959     ComputeValueVTs(*this, DL, BaseArgTy, ValueVTs, &Offsets, ArgOffset);
960 
961     for (unsigned Value = 0, NumValues = ValueVTs.size();
962          Value != NumValues; ++Value) {
963       uint64_t BasePartOffset = Offsets[Value];
964 
965       EVT ArgVT = ValueVTs[Value];
966       EVT MemVT = ArgVT;
967       MVT RegisterVT = getRegisterTypeForCallingConv(Ctx, CC, ArgVT);
968       unsigned NumRegs = getNumRegistersForCallingConv(Ctx, CC, ArgVT);
969 
970       if (NumRegs == 1) {
971         // This argument is not split, so the IR type is the memory type.
972         if (ArgVT.isExtended()) {
973           // We have an extended type, like i24, so we should just use the
974           // register type.
975           MemVT = RegisterVT;
976         } else {
977           MemVT = ArgVT;
978         }
979       } else if (ArgVT.isVector() && RegisterVT.isVector() &&
980                  ArgVT.getScalarType() == RegisterVT.getScalarType()) {
981         assert(ArgVT.getVectorNumElements() > RegisterVT.getVectorNumElements());
982         // We have a vector value which has been split into a vector with
983         // the same scalar type, but fewer elements.  This should handle
984         // all the floating-point vector types.
985         MemVT = RegisterVT;
986       } else if (ArgVT.isVector() &&
987                  ArgVT.getVectorNumElements() == NumRegs) {
988         // This arg has been split so that each element is stored in a separate
989         // register.
990         MemVT = ArgVT.getScalarType();
991       } else if (ArgVT.isExtended()) {
992         // We have an extended type, like i65.
993         MemVT = RegisterVT;
994       } else {
995         unsigned MemoryBits = ArgVT.getStoreSizeInBits() / NumRegs;
996         assert(ArgVT.getStoreSizeInBits() % NumRegs == 0);
997         if (RegisterVT.isInteger()) {
998           MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits);
999         } else if (RegisterVT.isVector()) {
1000           assert(!RegisterVT.getScalarType().isFloatingPoint());
1001           unsigned NumElements = RegisterVT.getVectorNumElements();
1002           assert(MemoryBits % NumElements == 0);
1003           // This vector type has been split into another vector type with
1004           // a different elements size.
1005           EVT ScalarVT = EVT::getIntegerVT(State.getContext(),
1006                                            MemoryBits / NumElements);
1007           MemVT = EVT::getVectorVT(State.getContext(), ScalarVT, NumElements);
1008         } else {
1009           llvm_unreachable("cannot deduce memory type.");
1010         }
1011       }
1012 
1013       // Convert one element vectors to scalar.
1014       if (MemVT.isVector() && MemVT.getVectorNumElements() == 1)
1015         MemVT = MemVT.getScalarType();
1016 
1017       // Round up vec3/vec5 argument.
1018       if (MemVT.isVector() && !MemVT.isPow2VectorType()) {
1019         assert(MemVT.getVectorNumElements() == 3 ||
1020                MemVT.getVectorNumElements() == 5);
1021         MemVT = MemVT.getPow2VectorType(State.getContext());
1022       } else if (!MemVT.isSimple() && !MemVT.isVector()) {
1023         MemVT = MemVT.getRoundIntegerType(State.getContext());
1024       }
1025 
1026       unsigned PartOffset = 0;
1027       for (unsigned i = 0; i != NumRegs; ++i) {
1028         State.addLoc(CCValAssign::getCustomMem(InIndex++, RegisterVT,
1029                                                BasePartOffset + PartOffset,
1030                                                MemVT.getSimpleVT(),
1031                                                CCValAssign::Full));
1032         PartOffset += MemVT.getStoreSize();
1033       }
1034     }
1035   }
1036 }
1037 
1038 SDValue AMDGPUTargetLowering::LowerReturn(
1039   SDValue Chain, CallingConv::ID CallConv,
1040   bool isVarArg,
1041   const SmallVectorImpl<ISD::OutputArg> &Outs,
1042   const SmallVectorImpl<SDValue> &OutVals,
1043   const SDLoc &DL, SelectionDAG &DAG) const {
1044   // FIXME: Fails for r600 tests
1045   //assert(!isVarArg && Outs.empty() && OutVals.empty() &&
1046   // "wave terminate should not have return values");
1047   return DAG.getNode(AMDGPUISD::ENDPGM, DL, MVT::Other, Chain);
1048 }
1049 
1050 //===---------------------------------------------------------------------===//
1051 // Target specific lowering
1052 //===---------------------------------------------------------------------===//
1053 
1054 /// Selects the correct CCAssignFn for a given CallingConvention value.
1055 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
1056                                                     bool IsVarArg) {
1057   return AMDGPUCallLowering::CCAssignFnForCall(CC, IsVarArg);
1058 }
1059 
1060 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
1061                                                       bool IsVarArg) {
1062   return AMDGPUCallLowering::CCAssignFnForReturn(CC, IsVarArg);
1063 }
1064 
1065 SDValue AMDGPUTargetLowering::addTokenForArgument(SDValue Chain,
1066                                                   SelectionDAG &DAG,
1067                                                   MachineFrameInfo &MFI,
1068                                                   int ClobberedFI) const {
1069   SmallVector<SDValue, 8> ArgChains;
1070   int64_t FirstByte = MFI.getObjectOffset(ClobberedFI);
1071   int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1;
1072 
1073   // Include the original chain at the beginning of the list. When this is
1074   // used by target LowerCall hooks, this helps legalize find the
1075   // CALLSEQ_BEGIN node.
1076   ArgChains.push_back(Chain);
1077 
1078   // Add a chain value for each stack argument corresponding
1079   for (SDNode *U : DAG.getEntryNode().getNode()->uses()) {
1080     if (LoadSDNode *L = dyn_cast<LoadSDNode>(U)) {
1081       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) {
1082         if (FI->getIndex() < 0) {
1083           int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
1084           int64_t InLastByte = InFirstByte;
1085           InLastByte += MFI.getObjectSize(FI->getIndex()) - 1;
1086 
1087           if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1088               (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1089             ArgChains.push_back(SDValue(L, 1));
1090         }
1091       }
1092     }
1093   }
1094 
1095   // Build a tokenfactor for all the chains.
1096   return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
1097 }
1098 
1099 SDValue AMDGPUTargetLowering::lowerUnhandledCall(CallLoweringInfo &CLI,
1100                                                  SmallVectorImpl<SDValue> &InVals,
1101                                                  StringRef Reason) const {
1102   SDValue Callee = CLI.Callee;
1103   SelectionDAG &DAG = CLI.DAG;
1104 
1105   const Function &Fn = DAG.getMachineFunction().getFunction();
1106 
1107   StringRef FuncName("<unknown>");
1108 
1109   if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
1110     FuncName = G->getSymbol();
1111   else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1112     FuncName = G->getGlobal()->getName();
1113 
1114   DiagnosticInfoUnsupported NoCalls(
1115     Fn, Reason + FuncName, CLI.DL.getDebugLoc());
1116   DAG.getContext()->diagnose(NoCalls);
1117 
1118   if (!CLI.IsTailCall) {
1119     for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
1120       InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
1121   }
1122 
1123   return DAG.getEntryNode();
1124 }
1125 
1126 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
1127                                         SmallVectorImpl<SDValue> &InVals) const {
1128   return lowerUnhandledCall(CLI, InVals, "unsupported call to function ");
1129 }
1130 
1131 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
1132                                                       SelectionDAG &DAG) const {
1133   const Function &Fn = DAG.getMachineFunction().getFunction();
1134 
1135   DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca",
1136                                             SDLoc(Op).getDebugLoc());
1137   DAG.getContext()->diagnose(NoDynamicAlloca);
1138   auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
1139   return DAG.getMergeValues(Ops, SDLoc());
1140 }
1141 
1142 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
1143                                              SelectionDAG &DAG) const {
1144   switch (Op.getOpcode()) {
1145   default:
1146     Op->print(errs(), &DAG);
1147     llvm_unreachable("Custom lowering code for this "
1148                      "instruction is not implemented yet!");
1149     break;
1150   case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
1151   case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
1152   case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
1153   case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
1154   case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
1155   case ISD::FREM: return LowerFREM(Op, DAG);
1156   case ISD::FCEIL: return LowerFCEIL(Op, DAG);
1157   case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
1158   case ISD::FRINT: return LowerFRINT(Op, DAG);
1159   case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
1160   case ISD::FROUND: return LowerFROUND(Op, DAG);
1161   case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
1162   case ISD::FLOG:
1163     return LowerFLOG(Op, DAG, numbers::ln2f);
1164   case ISD::FLOG10:
1165     return LowerFLOG(Op, DAG, numbers::ln2f / numbers::ln10f);
1166   case ISD::FEXP:
1167     return lowerFEXP(Op, DAG);
1168   case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
1169   case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
1170   case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG);
1171   case ISD::FP_TO_SINT:
1172   case ISD::FP_TO_UINT:
1173     return LowerFP_TO_INT(Op, DAG);
1174   case ISD::CTTZ:
1175   case ISD::CTTZ_ZERO_UNDEF:
1176   case ISD::CTLZ:
1177   case ISD::CTLZ_ZERO_UNDEF:
1178     return LowerCTLZ_CTTZ(Op, DAG);
1179   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
1180   }
1181   return Op;
1182 }
1183 
1184 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
1185                                               SmallVectorImpl<SDValue> &Results,
1186                                               SelectionDAG &DAG) const {
1187   switch (N->getOpcode()) {
1188   case ISD::SIGN_EXTEND_INREG:
1189     // Different parts of legalization seem to interpret which type of
1190     // sign_extend_inreg is the one to check for custom lowering. The extended
1191     // from type is what really matters, but some places check for custom
1192     // lowering of the result type. This results in trying to use
1193     // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
1194     // nothing here and let the illegal result integer be handled normally.
1195     return;
1196   default:
1197     return;
1198   }
1199 }
1200 
1201 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
1202                                                  SDValue Op,
1203                                                  SelectionDAG &DAG) const {
1204 
1205   const DataLayout &DL = DAG.getDataLayout();
1206   GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
1207   const GlobalValue *GV = G->getGlobal();
1208 
1209   if (G->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1210       G->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) {
1211     if (!MFI->isModuleEntryFunction() &&
1212         !GV->getName().equals("llvm.amdgcn.module.lds")) {
1213       SDLoc DL(Op);
1214       const Function &Fn = DAG.getMachineFunction().getFunction();
1215       DiagnosticInfoUnsupported BadLDSDecl(
1216         Fn, "local memory global used by non-kernel function",
1217         DL.getDebugLoc(), DS_Warning);
1218       DAG.getContext()->diagnose(BadLDSDecl);
1219 
1220       // We currently don't have a way to correctly allocate LDS objects that
1221       // aren't directly associated with a kernel. We do force inlining of
1222       // functions that use local objects. However, if these dead functions are
1223       // not eliminated, we don't want a compile time error. Just emit a warning
1224       // and a trap, since there should be no callable path here.
1225       SDValue Trap = DAG.getNode(ISD::TRAP, DL, MVT::Other, DAG.getEntryNode());
1226       SDValue OutputChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
1227                                         Trap, DAG.getRoot());
1228       DAG.setRoot(OutputChain);
1229       return DAG.getUNDEF(Op.getValueType());
1230     }
1231 
1232     // XXX: What does the value of G->getOffset() mean?
1233     assert(G->getOffset() == 0 &&
1234          "Do not know what to do with an non-zero offset");
1235 
1236     // TODO: We could emit code to handle the initialization somewhere.
1237     // We ignore the initializer for now and legalize it to allow selection.
1238     // The initializer will anyway get errored out during assembly emission.
1239     unsigned Offset = MFI->allocateLDSGlobal(DL, *cast<GlobalVariable>(GV));
1240     return DAG.getConstant(Offset, SDLoc(Op), Op.getValueType());
1241   }
1242   return SDValue();
1243 }
1244 
1245 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
1246                                                   SelectionDAG &DAG) const {
1247   SmallVector<SDValue, 8> Args;
1248 
1249   EVT VT = Op.getValueType();
1250   if (VT == MVT::v4i16 || VT == MVT::v4f16) {
1251     SDLoc SL(Op);
1252     SDValue Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(0));
1253     SDValue Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(1));
1254 
1255     SDValue BV = DAG.getBuildVector(MVT::v2i32, SL, { Lo, Hi });
1256     return DAG.getNode(ISD::BITCAST, SL, VT, BV);
1257   }
1258 
1259   for (const SDUse &U : Op->ops())
1260     DAG.ExtractVectorElements(U.get(), Args);
1261 
1262   return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
1263 }
1264 
1265 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
1266                                                      SelectionDAG &DAG) const {
1267 
1268   SmallVector<SDValue, 8> Args;
1269   unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1270   EVT VT = Op.getValueType();
1271   EVT SrcVT = Op.getOperand(0).getValueType();
1272 
1273   // For these types, we have some TableGen patterns except if the index is 1
1274   if (((SrcVT == MVT::v4f16 && VT == MVT::v2f16) ||
1275        (SrcVT == MVT::v4i16 && VT == MVT::v2i16)) &&
1276       Start != 1)
1277     return Op;
1278 
1279   if (((SrcVT == MVT::v8f16 && VT == MVT::v4f16) ||
1280        (SrcVT == MVT::v8i16 && VT == MVT::v4i16)) &&
1281       (Start == 0 || Start == 4))
1282     return Op;
1283 
1284   if (((SrcVT == MVT::v16f16 && VT == MVT::v8f16) ||
1285        (SrcVT == MVT::v16i16 && VT == MVT::v8i16)) &&
1286       (Start == 0 || Start == 8))
1287     return Op;
1288 
1289   DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
1290                             VT.getVectorNumElements());
1291 
1292   return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
1293 }
1294 
1295 /// Generate Min/Max node
1296 SDValue AMDGPUTargetLowering::combineFMinMaxLegacy(const SDLoc &DL, EVT VT,
1297                                                    SDValue LHS, SDValue RHS,
1298                                                    SDValue True, SDValue False,
1299                                                    SDValue CC,
1300                                                    DAGCombinerInfo &DCI) const {
1301   if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
1302     return SDValue();
1303 
1304   SelectionDAG &DAG = DCI.DAG;
1305   ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
1306   switch (CCOpcode) {
1307   case ISD::SETOEQ:
1308   case ISD::SETONE:
1309   case ISD::SETUNE:
1310   case ISD::SETNE:
1311   case ISD::SETUEQ:
1312   case ISD::SETEQ:
1313   case ISD::SETFALSE:
1314   case ISD::SETFALSE2:
1315   case ISD::SETTRUE:
1316   case ISD::SETTRUE2:
1317   case ISD::SETUO:
1318   case ISD::SETO:
1319     break;
1320   case ISD::SETULE:
1321   case ISD::SETULT: {
1322     if (LHS == True)
1323       return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1324     return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1325   }
1326   case ISD::SETOLE:
1327   case ISD::SETOLT:
1328   case ISD::SETLE:
1329   case ISD::SETLT: {
1330     // Ordered. Assume ordered for undefined.
1331 
1332     // Only do this after legalization to avoid interfering with other combines
1333     // which might occur.
1334     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1335         !DCI.isCalledByLegalizer())
1336       return SDValue();
1337 
1338     // We need to permute the operands to get the correct NaN behavior. The
1339     // selected operand is the second one based on the failing compare with NaN,
1340     // so permute it based on the compare type the hardware uses.
1341     if (LHS == True)
1342       return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1343     return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1344   }
1345   case ISD::SETUGE:
1346   case ISD::SETUGT: {
1347     if (LHS == True)
1348       return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1349     return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1350   }
1351   case ISD::SETGT:
1352   case ISD::SETGE:
1353   case ISD::SETOGE:
1354   case ISD::SETOGT: {
1355     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1356         !DCI.isCalledByLegalizer())
1357       return SDValue();
1358 
1359     if (LHS == True)
1360       return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1361     return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1362   }
1363   case ISD::SETCC_INVALID:
1364     llvm_unreachable("Invalid setcc condcode!");
1365   }
1366   return SDValue();
1367 }
1368 
1369 std::pair<SDValue, SDValue>
1370 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const {
1371   SDLoc SL(Op);
1372 
1373   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1374 
1375   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1376   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1377 
1378   SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1379   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1380 
1381   return std::make_pair(Lo, Hi);
1382 }
1383 
1384 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const {
1385   SDLoc SL(Op);
1386 
1387   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1388   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1389   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1390 }
1391 
1392 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const {
1393   SDLoc SL(Op);
1394 
1395   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1396   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1397   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1398 }
1399 
1400 // Split a vector type into two parts. The first part is a power of two vector.
1401 // The second part is whatever is left over, and is a scalar if it would
1402 // otherwise be a 1-vector.
1403 std::pair<EVT, EVT>
1404 AMDGPUTargetLowering::getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const {
1405   EVT LoVT, HiVT;
1406   EVT EltVT = VT.getVectorElementType();
1407   unsigned NumElts = VT.getVectorNumElements();
1408   unsigned LoNumElts = PowerOf2Ceil((NumElts + 1) / 2);
1409   LoVT = EVT::getVectorVT(*DAG.getContext(), EltVT, LoNumElts);
1410   HiVT = NumElts - LoNumElts == 1
1411              ? EltVT
1412              : EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts - LoNumElts);
1413   return std::make_pair(LoVT, HiVT);
1414 }
1415 
1416 // Split a vector value into two parts of types LoVT and HiVT. HiVT could be
1417 // scalar.
1418 std::pair<SDValue, SDValue>
1419 AMDGPUTargetLowering::splitVector(const SDValue &N, const SDLoc &DL,
1420                                   const EVT &LoVT, const EVT &HiVT,
1421                                   SelectionDAG &DAG) const {
1422   assert(LoVT.getVectorNumElements() +
1423                  (HiVT.isVector() ? HiVT.getVectorNumElements() : 1) <=
1424              N.getValueType().getVectorNumElements() &&
1425          "More vector elements requested than available!");
1426   SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
1427                            DAG.getVectorIdxConstant(0, DL));
1428   SDValue Hi = DAG.getNode(
1429       HiVT.isVector() ? ISD::EXTRACT_SUBVECTOR : ISD::EXTRACT_VECTOR_ELT, DL,
1430       HiVT, N, DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), DL));
1431   return std::make_pair(Lo, Hi);
1432 }
1433 
1434 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
1435                                               SelectionDAG &DAG) const {
1436   LoadSDNode *Load = cast<LoadSDNode>(Op);
1437   EVT VT = Op.getValueType();
1438   SDLoc SL(Op);
1439 
1440 
1441   // If this is a 2 element vector, we really want to scalarize and not create
1442   // weird 1 element vectors.
1443   if (VT.getVectorNumElements() == 2) {
1444     SDValue Ops[2];
1445     std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG);
1446     return DAG.getMergeValues(Ops, SL);
1447   }
1448 
1449   SDValue BasePtr = Load->getBasePtr();
1450   EVT MemVT = Load->getMemoryVT();
1451 
1452   const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1453 
1454   EVT LoVT, HiVT;
1455   EVT LoMemVT, HiMemVT;
1456   SDValue Lo, Hi;
1457 
1458   std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1459   std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1460   std::tie(Lo, Hi) = splitVector(Op, SL, LoVT, HiVT, DAG);
1461 
1462   unsigned Size = LoMemVT.getStoreSize();
1463   Align BaseAlign = Load->getAlign();
1464   Align HiAlign = commonAlignment(BaseAlign, Size);
1465 
1466   SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
1467                                   Load->getChain(), BasePtr, SrcValue, LoMemVT,
1468                                   BaseAlign, Load->getMemOperand()->getFlags());
1469   SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Size));
1470   SDValue HiLoad =
1471       DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(),
1472                      HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()),
1473                      HiMemVT, HiAlign, Load->getMemOperand()->getFlags());
1474 
1475   SDValue Join;
1476   if (LoVT == HiVT) {
1477     // This is the case that the vector is power of two so was evenly split.
1478     Join = DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad);
1479   } else {
1480     Join = DAG.getNode(ISD::INSERT_SUBVECTOR, SL, VT, DAG.getUNDEF(VT), LoLoad,
1481                        DAG.getVectorIdxConstant(0, SL));
1482     Join = DAG.getNode(
1483         HiVT.isVector() ? ISD::INSERT_SUBVECTOR : ISD::INSERT_VECTOR_ELT, SL,
1484         VT, Join, HiLoad,
1485         DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), SL));
1486   }
1487 
1488   SDValue Ops[] = {Join, DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
1489                                      LoLoad.getValue(1), HiLoad.getValue(1))};
1490 
1491   return DAG.getMergeValues(Ops, SL);
1492 }
1493 
1494 SDValue AMDGPUTargetLowering::WidenOrSplitVectorLoad(SDValue Op,
1495                                                      SelectionDAG &DAG) const {
1496   LoadSDNode *Load = cast<LoadSDNode>(Op);
1497   EVT VT = Op.getValueType();
1498   SDValue BasePtr = Load->getBasePtr();
1499   EVT MemVT = Load->getMemoryVT();
1500   SDLoc SL(Op);
1501   const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1502   Align BaseAlign = Load->getAlign();
1503   unsigned NumElements = MemVT.getVectorNumElements();
1504 
1505   // Widen from vec3 to vec4 when the load is at least 8-byte aligned
1506   // or 16-byte fully dereferenceable. Otherwise, split the vector load.
1507   if (NumElements != 3 ||
1508       (BaseAlign < Align(8) &&
1509        !SrcValue.isDereferenceable(16, *DAG.getContext(), DAG.getDataLayout())))
1510     return SplitVectorLoad(Op, DAG);
1511 
1512   assert(NumElements == 3);
1513 
1514   EVT WideVT =
1515       EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4);
1516   EVT WideMemVT =
1517       EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), 4);
1518   SDValue WideLoad = DAG.getExtLoad(
1519       Load->getExtensionType(), SL, WideVT, Load->getChain(), BasePtr, SrcValue,
1520       WideMemVT, BaseAlign, Load->getMemOperand()->getFlags());
1521   return DAG.getMergeValues(
1522       {DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, VT, WideLoad,
1523                    DAG.getVectorIdxConstant(0, SL)),
1524        WideLoad.getValue(1)},
1525       SL);
1526 }
1527 
1528 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1529                                                SelectionDAG &DAG) const {
1530   StoreSDNode *Store = cast<StoreSDNode>(Op);
1531   SDValue Val = Store->getValue();
1532   EVT VT = Val.getValueType();
1533 
1534   // If this is a 2 element vector, we really want to scalarize and not create
1535   // weird 1 element vectors.
1536   if (VT.getVectorNumElements() == 2)
1537     return scalarizeVectorStore(Store, DAG);
1538 
1539   EVT MemVT = Store->getMemoryVT();
1540   SDValue Chain = Store->getChain();
1541   SDValue BasePtr = Store->getBasePtr();
1542   SDLoc SL(Op);
1543 
1544   EVT LoVT, HiVT;
1545   EVT LoMemVT, HiMemVT;
1546   SDValue Lo, Hi;
1547 
1548   std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1549   std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1550   std::tie(Lo, Hi) = splitVector(Val, SL, LoVT, HiVT, DAG);
1551 
1552   SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, LoMemVT.getStoreSize());
1553 
1554   const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo();
1555   Align BaseAlign = Store->getAlign();
1556   unsigned Size = LoMemVT.getStoreSize();
1557   Align HiAlign = commonAlignment(BaseAlign, Size);
1558 
1559   SDValue LoStore =
1560       DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign,
1561                         Store->getMemOperand()->getFlags());
1562   SDValue HiStore =
1563       DAG.getTruncStore(Chain, SL, Hi, HiPtr, SrcValue.getWithOffset(Size),
1564                         HiMemVT, HiAlign, Store->getMemOperand()->getFlags());
1565 
1566   return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
1567 }
1568 
1569 // This is a shortcut for integer division because we have fast i32<->f32
1570 // conversions, and fast f32 reciprocal instructions. The fractional part of a
1571 // float is enough to accurately represent up to a 24-bit signed integer.
1572 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG,
1573                                             bool Sign) const {
1574   SDLoc DL(Op);
1575   EVT VT = Op.getValueType();
1576   SDValue LHS = Op.getOperand(0);
1577   SDValue RHS = Op.getOperand(1);
1578   MVT IntVT = MVT::i32;
1579   MVT FltVT = MVT::f32;
1580 
1581   unsigned LHSSignBits = DAG.ComputeNumSignBits(LHS);
1582   if (LHSSignBits < 9)
1583     return SDValue();
1584 
1585   unsigned RHSSignBits = DAG.ComputeNumSignBits(RHS);
1586   if (RHSSignBits < 9)
1587     return SDValue();
1588 
1589   unsigned BitSize = VT.getSizeInBits();
1590   unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
1591   unsigned DivBits = BitSize - SignBits;
1592   if (Sign)
1593     ++DivBits;
1594 
1595   ISD::NodeType ToFp = Sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
1596   ISD::NodeType ToInt = Sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
1597 
1598   SDValue jq = DAG.getConstant(1, DL, IntVT);
1599 
1600   if (Sign) {
1601     // char|short jq = ia ^ ib;
1602     jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
1603 
1604     // jq = jq >> (bitsize - 2)
1605     jq = DAG.getNode(ISD::SRA, DL, VT, jq,
1606                      DAG.getConstant(BitSize - 2, DL, VT));
1607 
1608     // jq = jq | 0x1
1609     jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT));
1610   }
1611 
1612   // int ia = (int)LHS;
1613   SDValue ia = LHS;
1614 
1615   // int ib, (int)RHS;
1616   SDValue ib = RHS;
1617 
1618   // float fa = (float)ia;
1619   SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia);
1620 
1621   // float fb = (float)ib;
1622   SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib);
1623 
1624   SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
1625                            fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
1626 
1627   // fq = trunc(fq);
1628   fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq);
1629 
1630   // float fqneg = -fq;
1631   SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
1632 
1633   MachineFunction &MF = DAG.getMachineFunction();
1634   const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
1635 
1636   // float fr = mad(fqneg, fb, fa);
1637   unsigned OpCode = !Subtarget->hasMadMacF32Insts() ?
1638                     (unsigned)ISD::FMA :
1639                     !MFI->getMode().allFP32Denormals() ?
1640                     (unsigned)ISD::FMAD :
1641                     (unsigned)AMDGPUISD::FMAD_FTZ;
1642   SDValue fr = DAG.getNode(OpCode, DL, FltVT, fqneg, fb, fa);
1643 
1644   // int iq = (int)fq;
1645   SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq);
1646 
1647   // fr = fabs(fr);
1648   fr = DAG.getNode(ISD::FABS, DL, FltVT, fr);
1649 
1650   // fb = fabs(fb);
1651   fb = DAG.getNode(ISD::FABS, DL, FltVT, fb);
1652 
1653   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
1654 
1655   // int cv = fr >= fb;
1656   SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
1657 
1658   // jq = (cv ? jq : 0);
1659   jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT));
1660 
1661   // dst = iq + jq;
1662   SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq);
1663 
1664   // Rem needs compensation, it's easier to recompute it
1665   SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS);
1666   Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem);
1667 
1668   // Truncate to number of bits this divide really is.
1669   if (Sign) {
1670     SDValue InRegSize
1671       = DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), DivBits));
1672     Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize);
1673     Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize);
1674   } else {
1675     SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT);
1676     Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask);
1677     Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask);
1678   }
1679 
1680   return DAG.getMergeValues({ Div, Rem }, DL);
1681 }
1682 
1683 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
1684                                       SelectionDAG &DAG,
1685                                       SmallVectorImpl<SDValue> &Results) const {
1686   SDLoc DL(Op);
1687   EVT VT = Op.getValueType();
1688 
1689   assert(VT == MVT::i64 && "LowerUDIVREM64 expects an i64");
1690 
1691   EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1692 
1693   SDValue One = DAG.getConstant(1, DL, HalfVT);
1694   SDValue Zero = DAG.getConstant(0, DL, HalfVT);
1695 
1696   //HiLo split
1697   SDValue LHS = Op.getOperand(0);
1698   SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
1699   SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, One);
1700 
1701   SDValue RHS = Op.getOperand(1);
1702   SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
1703   SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, One);
1704 
1705   if (DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) &&
1706       DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) {
1707 
1708     SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1709                               LHS_Lo, RHS_Lo);
1710 
1711     SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), Zero});
1712     SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), Zero});
1713 
1714     Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV));
1715     Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM));
1716     return;
1717   }
1718 
1719   if (isTypeLegal(MVT::i64)) {
1720     // The algorithm here is based on ideas from "Software Integer Division",
1721     // Tom Rodeheffer, August 2008.
1722 
1723     MachineFunction &MF = DAG.getMachineFunction();
1724     const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1725 
1726     // Compute denominator reciprocal.
1727     unsigned FMAD = !Subtarget->hasMadMacF32Insts() ?
1728                     (unsigned)ISD::FMA :
1729                     !MFI->getMode().allFP32Denormals() ?
1730                     (unsigned)ISD::FMAD :
1731                     (unsigned)AMDGPUISD::FMAD_FTZ;
1732 
1733     SDValue Cvt_Lo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Lo);
1734     SDValue Cvt_Hi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Hi);
1735     SDValue Mad1 = DAG.getNode(FMAD, DL, MVT::f32, Cvt_Hi,
1736       DAG.getConstantFP(APInt(32, 0x4f800000).bitsToFloat(), DL, MVT::f32),
1737       Cvt_Lo);
1738     SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, DL, MVT::f32, Mad1);
1739     SDValue Mul1 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Rcp,
1740       DAG.getConstantFP(APInt(32, 0x5f7ffffc).bitsToFloat(), DL, MVT::f32));
1741     SDValue Mul2 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Mul1,
1742       DAG.getConstantFP(APInt(32, 0x2f800000).bitsToFloat(), DL, MVT::f32));
1743     SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, MVT::f32, Mul2);
1744     SDValue Mad2 = DAG.getNode(FMAD, DL, MVT::f32, Trunc,
1745       DAG.getConstantFP(APInt(32, 0xcf800000).bitsToFloat(), DL, MVT::f32),
1746       Mul1);
1747     SDValue Rcp_Lo = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Mad2);
1748     SDValue Rcp_Hi = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Trunc);
1749     SDValue Rcp64 = DAG.getBitcast(VT,
1750                         DAG.getBuildVector(MVT::v2i32, DL, {Rcp_Lo, Rcp_Hi}));
1751 
1752     SDValue Zero64 = DAG.getConstant(0, DL, VT);
1753     SDValue One64  = DAG.getConstant(1, DL, VT);
1754     SDValue Zero1 = DAG.getConstant(0, DL, MVT::i1);
1755     SDVTList HalfCarryVT = DAG.getVTList(HalfVT, MVT::i1);
1756 
1757     // First round of UNR (Unsigned integer Newton-Raphson).
1758     SDValue Neg_RHS = DAG.getNode(ISD::SUB, DL, VT, Zero64, RHS);
1759     SDValue Mullo1 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Rcp64);
1760     SDValue Mulhi1 = DAG.getNode(ISD::MULHU, DL, VT, Rcp64, Mullo1);
1761     SDValue Mulhi1_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1,
1762                                     Zero);
1763     SDValue Mulhi1_Hi =
1764         DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1, One);
1765     SDValue Add1_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Lo,
1766                                   Mulhi1_Lo, Zero1);
1767     SDValue Add1_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Hi,
1768                                   Mulhi1_Hi, Add1_Lo.getValue(1));
1769     SDValue Add1 = DAG.getBitcast(VT,
1770                         DAG.getBuildVector(MVT::v2i32, DL, {Add1_Lo, Add1_Hi}));
1771 
1772     // Second round of UNR.
1773     SDValue Mullo2 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Add1);
1774     SDValue Mulhi2 = DAG.getNode(ISD::MULHU, DL, VT, Add1, Mullo2);
1775     SDValue Mulhi2_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2,
1776                                     Zero);
1777     SDValue Mulhi2_Hi =
1778         DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2, One);
1779     SDValue Add2_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_Lo,
1780                                   Mulhi2_Lo, Zero1);
1781     SDValue Add2_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_Hi,
1782                                   Mulhi2_Hi, Add2_Lo.getValue(1));
1783     SDValue Add2 = DAG.getBitcast(VT,
1784                         DAG.getBuildVector(MVT::v2i32, DL, {Add2_Lo, Add2_Hi}));
1785 
1786     SDValue Mulhi3 = DAG.getNode(ISD::MULHU, DL, VT, LHS, Add2);
1787 
1788     SDValue Mul3 = DAG.getNode(ISD::MUL, DL, VT, RHS, Mulhi3);
1789 
1790     SDValue Mul3_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, Zero);
1791     SDValue Mul3_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, One);
1792     SDValue Sub1_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Lo,
1793                                   Mul3_Lo, Zero1);
1794     SDValue Sub1_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Hi,
1795                                   Mul3_Hi, Sub1_Lo.getValue(1));
1796     SDValue Sub1_Mi = DAG.getNode(ISD::SUB, DL, HalfVT, LHS_Hi, Mul3_Hi);
1797     SDValue Sub1 = DAG.getBitcast(VT,
1798                         DAG.getBuildVector(MVT::v2i32, DL, {Sub1_Lo, Sub1_Hi}));
1799 
1800     SDValue MinusOne = DAG.getConstant(0xffffffffu, DL, HalfVT);
1801     SDValue C1 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, MinusOne, Zero,
1802                                  ISD::SETUGE);
1803     SDValue C2 = DAG.getSelectCC(DL, Sub1_Lo, RHS_Lo, MinusOne, Zero,
1804                                  ISD::SETUGE);
1805     SDValue C3 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, C2, C1, ISD::SETEQ);
1806 
1807     // TODO: Here and below portions of the code can be enclosed into if/endif.
1808     // Currently control flow is unconditional and we have 4 selects after
1809     // potential endif to substitute PHIs.
1810 
1811     // if C3 != 0 ...
1812     SDValue Sub2_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Lo,
1813                                   RHS_Lo, Zero1);
1814     SDValue Sub2_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Mi,
1815                                   RHS_Hi, Sub1_Lo.getValue(1));
1816     SDValue Sub2_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi,
1817                                   Zero, Sub2_Lo.getValue(1));
1818     SDValue Sub2 = DAG.getBitcast(VT,
1819                         DAG.getBuildVector(MVT::v2i32, DL, {Sub2_Lo, Sub2_Hi}));
1820 
1821     SDValue Add3 = DAG.getNode(ISD::ADD, DL, VT, Mulhi3, One64);
1822 
1823     SDValue C4 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, MinusOne, Zero,
1824                                  ISD::SETUGE);
1825     SDValue C5 = DAG.getSelectCC(DL, Sub2_Lo, RHS_Lo, MinusOne, Zero,
1826                                  ISD::SETUGE);
1827     SDValue C6 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, C5, C4, ISD::SETEQ);
1828 
1829     // if (C6 != 0)
1830     SDValue Add4 = DAG.getNode(ISD::ADD, DL, VT, Add3, One64);
1831 
1832     SDValue Sub3_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Lo,
1833                                   RHS_Lo, Zero1);
1834     SDValue Sub3_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi,
1835                                   RHS_Hi, Sub2_Lo.getValue(1));
1836     SDValue Sub3_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub3_Mi,
1837                                   Zero, Sub3_Lo.getValue(1));
1838     SDValue Sub3 = DAG.getBitcast(VT,
1839                         DAG.getBuildVector(MVT::v2i32, DL, {Sub3_Lo, Sub3_Hi}));
1840 
1841     // endif C6
1842     // endif C3
1843 
1844     SDValue Sel1 = DAG.getSelectCC(DL, C6, Zero, Add4, Add3, ISD::SETNE);
1845     SDValue Div  = DAG.getSelectCC(DL, C3, Zero, Sel1, Mulhi3, ISD::SETNE);
1846 
1847     SDValue Sel2 = DAG.getSelectCC(DL, C6, Zero, Sub3, Sub2, ISD::SETNE);
1848     SDValue Rem  = DAG.getSelectCC(DL, C3, Zero, Sel2, Sub1, ISD::SETNE);
1849 
1850     Results.push_back(Div);
1851     Results.push_back(Rem);
1852 
1853     return;
1854   }
1855 
1856   // r600 expandion.
1857   // Get Speculative values
1858   SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
1859   SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
1860 
1861   SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, Zero, REM_Part, LHS_Hi, ISD::SETEQ);
1862   SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, Zero});
1863   REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM);
1864 
1865   SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, Zero, DIV_Part, Zero, ISD::SETEQ);
1866   SDValue DIV_Lo = Zero;
1867 
1868   const unsigned halfBitWidth = HalfVT.getSizeInBits();
1869 
1870   for (unsigned i = 0; i < halfBitWidth; ++i) {
1871     const unsigned bitPos = halfBitWidth - i - 1;
1872     SDValue POS = DAG.getConstant(bitPos, DL, HalfVT);
1873     // Get value of high bit
1874     SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
1875     HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, One);
1876     HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit);
1877 
1878     // Shift
1879     REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT));
1880     // Add LHS high bit
1881     REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
1882 
1883     SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT);
1884     SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, Zero, ISD::SETUGE);
1885 
1886     DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
1887 
1888     // Update REM
1889     SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
1890     REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
1891   }
1892 
1893   SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi});
1894   DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV);
1895   Results.push_back(DIV);
1896   Results.push_back(REM);
1897 }
1898 
1899 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
1900                                            SelectionDAG &DAG) const {
1901   SDLoc DL(Op);
1902   EVT VT = Op.getValueType();
1903 
1904   if (VT == MVT::i64) {
1905     SmallVector<SDValue, 2> Results;
1906     LowerUDIVREM64(Op, DAG, Results);
1907     return DAG.getMergeValues(Results, DL);
1908   }
1909 
1910   if (VT == MVT::i32) {
1911     if (SDValue Res = LowerDIVREM24(Op, DAG, false))
1912       return Res;
1913   }
1914 
1915   SDValue X = Op.getOperand(0);
1916   SDValue Y = Op.getOperand(1);
1917 
1918   // See AMDGPUCodeGenPrepare::expandDivRem32 for a description of the
1919   // algorithm used here.
1920 
1921   // Initial estimate of inv(y).
1922   SDValue Z = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Y);
1923 
1924   // One round of UNR.
1925   SDValue NegY = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Y);
1926   SDValue NegYZ = DAG.getNode(ISD::MUL, DL, VT, NegY, Z);
1927   Z = DAG.getNode(ISD::ADD, DL, VT, Z,
1928                   DAG.getNode(ISD::MULHU, DL, VT, Z, NegYZ));
1929 
1930   // Quotient/remainder estimate.
1931   SDValue Q = DAG.getNode(ISD::MULHU, DL, VT, X, Z);
1932   SDValue R =
1933       DAG.getNode(ISD::SUB, DL, VT, X, DAG.getNode(ISD::MUL, DL, VT, Q, Y));
1934 
1935   // First quotient/remainder refinement.
1936   EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
1937   SDValue One = DAG.getConstant(1, DL, VT);
1938   SDValue Cond = DAG.getSetCC(DL, CCVT, R, Y, ISD::SETUGE);
1939   Q = DAG.getNode(ISD::SELECT, DL, VT, Cond,
1940                   DAG.getNode(ISD::ADD, DL, VT, Q, One), Q);
1941   R = DAG.getNode(ISD::SELECT, DL, VT, Cond,
1942                   DAG.getNode(ISD::SUB, DL, VT, R, Y), R);
1943 
1944   // Second quotient/remainder refinement.
1945   Cond = DAG.getSetCC(DL, CCVT, R, Y, ISD::SETUGE);
1946   Q = DAG.getNode(ISD::SELECT, DL, VT, Cond,
1947                   DAG.getNode(ISD::ADD, DL, VT, Q, One), Q);
1948   R = DAG.getNode(ISD::SELECT, DL, VT, Cond,
1949                   DAG.getNode(ISD::SUB, DL, VT, R, Y), R);
1950 
1951   return DAG.getMergeValues({Q, R}, DL);
1952 }
1953 
1954 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
1955                                            SelectionDAG &DAG) const {
1956   SDLoc DL(Op);
1957   EVT VT = Op.getValueType();
1958 
1959   SDValue LHS = Op.getOperand(0);
1960   SDValue RHS = Op.getOperand(1);
1961 
1962   SDValue Zero = DAG.getConstant(0, DL, VT);
1963   SDValue NegOne = DAG.getConstant(-1, DL, VT);
1964 
1965   if (VT == MVT::i32) {
1966     if (SDValue Res = LowerDIVREM24(Op, DAG, true))
1967       return Res;
1968   }
1969 
1970   if (VT == MVT::i64 &&
1971       DAG.ComputeNumSignBits(LHS) > 32 &&
1972       DAG.ComputeNumSignBits(RHS) > 32) {
1973     EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1974 
1975     //HiLo split
1976     SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
1977     SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
1978     SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1979                                  LHS_Lo, RHS_Lo);
1980     SDValue Res[2] = {
1981       DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)),
1982       DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1))
1983     };
1984     return DAG.getMergeValues(Res, DL);
1985   }
1986 
1987   SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
1988   SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
1989   SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
1990   SDValue RSign = LHSign; // Remainder sign is the same as LHS
1991 
1992   LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
1993   RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
1994 
1995   LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
1996   RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
1997 
1998   SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
1999   SDValue Rem = Div.getValue(1);
2000 
2001   Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
2002   Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
2003 
2004   Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
2005   Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
2006 
2007   SDValue Res[2] = {
2008     Div,
2009     Rem
2010   };
2011   return DAG.getMergeValues(Res, DL);
2012 }
2013 
2014 // (frem x, y) -> (fma (fneg (ftrunc (fdiv x, y))), y, x)
2015 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const {
2016   SDLoc SL(Op);
2017   EVT VT = Op.getValueType();
2018   auto Flags = Op->getFlags();
2019   SDValue X = Op.getOperand(0);
2020   SDValue Y = Op.getOperand(1);
2021 
2022   SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y, Flags);
2023   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, VT, Div, Flags);
2024   SDValue Neg = DAG.getNode(ISD::FNEG, SL, VT, Trunc, Flags);
2025   // TODO: For f32 use FMAD instead if !hasFastFMA32?
2026   return DAG.getNode(ISD::FMA, SL, VT, Neg, Y, X, Flags);
2027 }
2028 
2029 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
2030   SDLoc SL(Op);
2031   SDValue Src = Op.getOperand(0);
2032 
2033   // result = trunc(src)
2034   // if (src > 0.0 && src != result)
2035   //   result += 1.0
2036 
2037   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2038 
2039   const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2040   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
2041 
2042   EVT SetCCVT =
2043       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2044 
2045   SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
2046   SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2047   SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2048 
2049   SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
2050   // TODO: Should this propagate fast-math-flags?
2051   return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2052 }
2053 
2054 static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL,
2055                                   SelectionDAG &DAG) {
2056   const unsigned FractBits = 52;
2057   const unsigned ExpBits = 11;
2058 
2059   SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
2060                                 Hi,
2061                                 DAG.getConstant(FractBits - 32, SL, MVT::i32),
2062                                 DAG.getConstant(ExpBits, SL, MVT::i32));
2063   SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
2064                             DAG.getConstant(1023, SL, MVT::i32));
2065 
2066   return Exp;
2067 }
2068 
2069 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
2070   SDLoc SL(Op);
2071   SDValue Src = Op.getOperand(0);
2072 
2073   assert(Op.getValueType() == MVT::f64);
2074 
2075   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2076 
2077   // Extract the upper half, since this is where we will find the sign and
2078   // exponent.
2079   SDValue Hi = getHiHalf64(Src, DAG);
2080 
2081   SDValue Exp = extractF64Exponent(Hi, SL, DAG);
2082 
2083   const unsigned FractBits = 52;
2084 
2085   // Extract the sign bit.
2086   const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32);
2087   SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
2088 
2089   // Extend back to 64-bits.
2090   SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit});
2091   SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
2092 
2093   SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
2094   const SDValue FractMask
2095     = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
2096 
2097   SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
2098   SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
2099   SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
2100 
2101   EVT SetCCVT =
2102       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
2103 
2104   const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32);
2105 
2106   SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
2107   SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
2108 
2109   SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
2110   SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
2111 
2112   return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
2113 }
2114 
2115 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
2116   SDLoc SL(Op);
2117   SDValue Src = Op.getOperand(0);
2118 
2119   assert(Op.getValueType() == MVT::f64);
2120 
2121   APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
2122   SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64);
2123   SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
2124 
2125   // TODO: Should this propagate fast-math-flags?
2126 
2127   SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
2128   SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
2129 
2130   SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
2131 
2132   APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
2133   SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64);
2134 
2135   EVT SetCCVT =
2136       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2137   SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
2138 
2139   return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
2140 }
2141 
2142 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
2143   // FNEARBYINT and FRINT are the same, except in their handling of FP
2144   // exceptions. Those aren't really meaningful for us, and OpenCL only has
2145   // rint, so just treat them as equivalent.
2146   return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
2147 }
2148 
2149 // XXX - May require not supporting f32 denormals?
2150 
2151 // Don't handle v2f16. The extra instructions to scalarize and repack around the
2152 // compare and vselect end up producing worse code than scalarizing the whole
2153 // operation.
2154 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
2155   SDLoc SL(Op);
2156   SDValue X = Op.getOperand(0);
2157   EVT VT = Op.getValueType();
2158 
2159   SDValue T = DAG.getNode(ISD::FTRUNC, SL, VT, X);
2160 
2161   // TODO: Should this propagate fast-math-flags?
2162 
2163   SDValue Diff = DAG.getNode(ISD::FSUB, SL, VT, X, T);
2164 
2165   SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, VT, Diff);
2166 
2167   const SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
2168   const SDValue One = DAG.getConstantFP(1.0, SL, VT);
2169   const SDValue Half = DAG.getConstantFP(0.5, SL, VT);
2170 
2171   SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, VT, One, X);
2172 
2173   EVT SetCCVT =
2174       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2175 
2176   SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE);
2177 
2178   SDValue Sel = DAG.getNode(ISD::SELECT, SL, VT, Cmp, SignOne, Zero);
2179 
2180   return DAG.getNode(ISD::FADD, SL, VT, T, Sel);
2181 }
2182 
2183 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
2184   SDLoc SL(Op);
2185   SDValue Src = Op.getOperand(0);
2186 
2187   // result = trunc(src);
2188   // if (src < 0.0 && src != result)
2189   //   result += -1.0.
2190 
2191   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2192 
2193   const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2194   const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64);
2195 
2196   EVT SetCCVT =
2197       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2198 
2199   SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
2200   SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2201   SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2202 
2203   SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
2204   // TODO: Should this propagate fast-math-flags?
2205   return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2206 }
2207 
2208 SDValue AMDGPUTargetLowering::LowerFLOG(SDValue Op, SelectionDAG &DAG,
2209                                         double Log2BaseInverted) const {
2210   EVT VT = Op.getValueType();
2211 
2212   SDLoc SL(Op);
2213   SDValue Operand = Op.getOperand(0);
2214   SDValue Log2Operand = DAG.getNode(ISD::FLOG2, SL, VT, Operand);
2215   SDValue Log2BaseInvertedOperand = DAG.getConstantFP(Log2BaseInverted, SL, VT);
2216 
2217   return DAG.getNode(ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand);
2218 }
2219 
2220 // exp2(M_LOG2E_F * f);
2221 SDValue AMDGPUTargetLowering::lowerFEXP(SDValue Op, SelectionDAG &DAG) const {
2222   EVT VT = Op.getValueType();
2223   SDLoc SL(Op);
2224   SDValue Src = Op.getOperand(0);
2225 
2226   const SDValue K = DAG.getConstantFP(numbers::log2e, SL, VT);
2227   SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Src, K, Op->getFlags());
2228   return DAG.getNode(ISD::FEXP2, SL, VT, Mul, Op->getFlags());
2229 }
2230 
2231 static bool isCtlzOpc(unsigned Opc) {
2232   return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF;
2233 }
2234 
2235 static bool isCttzOpc(unsigned Opc) {
2236   return Opc == ISD::CTTZ || Opc == ISD::CTTZ_ZERO_UNDEF;
2237 }
2238 
2239 SDValue AMDGPUTargetLowering::LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const {
2240   SDLoc SL(Op);
2241   SDValue Src = Op.getOperand(0);
2242 
2243   assert(isCtlzOpc(Op.getOpcode()) || isCttzOpc(Op.getOpcode()));
2244   bool Ctlz = isCtlzOpc(Op.getOpcode());
2245   unsigned NewOpc = Ctlz ? AMDGPUISD::FFBH_U32 : AMDGPUISD::FFBL_B32;
2246 
2247   bool ZeroUndef = Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF ||
2248                    Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF;
2249 
2250   if (Src.getValueType() == MVT::i32) {
2251     // (ctlz hi:lo) -> (umin (ffbh src), 32)
2252     // (cttz hi:lo) -> (umin (ffbl src), 32)
2253     // (ctlz_zero_undef src) -> (ffbh src)
2254     // (cttz_zero_undef src) -> (ffbl src)
2255     SDValue NewOpr = DAG.getNode(NewOpc, SL, MVT::i32, Src);
2256     if (!ZeroUndef) {
2257       const SDValue Const32 = DAG.getConstant(32, SL, MVT::i32);
2258       NewOpr = DAG.getNode(ISD::UMIN, SL, MVT::i32, NewOpr, Const32);
2259     }
2260     return NewOpr;
2261   }
2262 
2263   SDValue Lo, Hi;
2264   std::tie(Lo, Hi) = split64BitValue(Src, DAG);
2265 
2266   SDValue OprLo = DAG.getNode(NewOpc, SL, MVT::i32, Lo);
2267   SDValue OprHi = DAG.getNode(NewOpc, SL, MVT::i32, Hi);
2268 
2269   // (ctlz hi:lo) -> (umin3 (ffbh hi), (uaddsat (ffbh lo), 32), 64)
2270   // (cttz hi:lo) -> (umin3 (uaddsat (ffbl hi), 32), (ffbl lo), 64)
2271   // (ctlz_zero_undef hi:lo) -> (umin (ffbh hi), (add (ffbh lo), 32))
2272   // (cttz_zero_undef hi:lo) -> (umin (add (ffbl hi), 32), (ffbl lo))
2273 
2274   unsigned AddOpc = ZeroUndef ? ISD::ADD : ISD::UADDSAT;
2275   const SDValue Const32 = DAG.getConstant(32, SL, MVT::i32);
2276   if (Ctlz)
2277     OprLo = DAG.getNode(AddOpc, SL, MVT::i32, OprLo, Const32);
2278   else
2279     OprHi = DAG.getNode(AddOpc, SL, MVT::i32, OprHi, Const32);
2280 
2281   SDValue NewOpr;
2282   NewOpr = DAG.getNode(ISD::UMIN, SL, MVT::i32, OprLo, OprHi);
2283   if (!ZeroUndef) {
2284     const SDValue Const64 = DAG.getConstant(64, SL, MVT::i32);
2285     NewOpr = DAG.getNode(ISD::UMIN, SL, MVT::i32, NewOpr, Const64);
2286   }
2287 
2288   return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewOpr);
2289 }
2290 
2291 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG,
2292                                                bool Signed) const {
2293   // The regular method converting a 64-bit integer to float roughly consists of
2294   // 2 steps: normalization and rounding. In fact, after normalization, the
2295   // conversion from a 64-bit integer to a float is essentially the same as the
2296   // one from a 32-bit integer. The only difference is that it has more
2297   // trailing bits to be rounded. To leverage the native 32-bit conversion, a
2298   // 64-bit integer could be preprocessed and fit into a 32-bit integer then
2299   // converted into the correct float number. The basic steps for the unsigned
2300   // conversion are illustrated in the following pseudo code:
2301   //
2302   // f32 uitofp(i64 u) {
2303   //   i32 hi, lo = split(u);
2304   //   // Only count the leading zeros in hi as we have native support of the
2305   //   // conversion from i32 to f32. If hi is all 0s, the conversion is
2306   //   // reduced to a 32-bit one automatically.
2307   //   i32 shamt = clz(hi); // Return 32 if hi is all 0s.
2308   //   u <<= shamt;
2309   //   hi, lo = split(u);
2310   //   hi |= (lo != 0) ? 1 : 0; // Adjust rounding bit in hi based on lo.
2311   //   // convert it as a 32-bit integer and scale the result back.
2312   //   return uitofp(hi) * 2^(32 - shamt);
2313   // }
2314   //
2315   // The signed one follows the same principle but uses 'ffbh_i32' to count its
2316   // sign bits instead. If 'ffbh_i32' is not available, its absolute value is
2317   // converted instead followed by negation based its sign bit.
2318 
2319   SDLoc SL(Op);
2320   SDValue Src = Op.getOperand(0);
2321 
2322   SDValue Lo, Hi;
2323   std::tie(Lo, Hi) = split64BitValue(Src, DAG);
2324   SDValue Sign;
2325   SDValue ShAmt;
2326   if (Signed && Subtarget->isGCN()) {
2327     // We also need to consider the sign bit in Lo if Hi has just sign bits,
2328     // i.e. Hi is 0 or -1. However, that only needs to take the MSB into
2329     // account. That is, the maximal shift is
2330     // - 32 if Lo and Hi have opposite signs;
2331     // - 33 if Lo and Hi have the same sign.
2332     //
2333     // Or, MaxShAmt = 33 + OppositeSign, where
2334     //
2335     // OppositeSign is defined as ((Lo ^ Hi) >> 31), which is
2336     // - -1 if Lo and Hi have opposite signs; and
2337     // -  0 otherwise.
2338     //
2339     // All in all, ShAmt is calculated as
2340     //
2341     //  umin(sffbh(Hi), 33 + (Lo^Hi)>>31) - 1.
2342     //
2343     // or
2344     //
2345     //  umin(sffbh(Hi) - 1, 32 + (Lo^Hi)>>31).
2346     //
2347     // to reduce the critical path.
2348     SDValue OppositeSign = DAG.getNode(
2349         ISD::SRA, SL, MVT::i32, DAG.getNode(ISD::XOR, SL, MVT::i32, Lo, Hi),
2350         DAG.getConstant(31, SL, MVT::i32));
2351     SDValue MaxShAmt =
2352         DAG.getNode(ISD::ADD, SL, MVT::i32, DAG.getConstant(32, SL, MVT::i32),
2353                     OppositeSign);
2354     // Count the leading sign bits.
2355     ShAmt = DAG.getNode(AMDGPUISD::FFBH_I32, SL, MVT::i32, Hi);
2356     // Different from unsigned conversion, the shift should be one bit less to
2357     // preserve the sign bit.
2358     ShAmt = DAG.getNode(ISD::SUB, SL, MVT::i32, ShAmt,
2359                         DAG.getConstant(1, SL, MVT::i32));
2360     ShAmt = DAG.getNode(ISD::UMIN, SL, MVT::i32, ShAmt, MaxShAmt);
2361   } else {
2362     if (Signed) {
2363       // Without 'ffbh_i32', only leading zeros could be counted. Take the
2364       // absolute value first.
2365       Sign = DAG.getNode(ISD::SRA, SL, MVT::i64, Src,
2366                          DAG.getConstant(63, SL, MVT::i64));
2367       SDValue Abs =
2368           DAG.getNode(ISD::XOR, SL, MVT::i64,
2369                       DAG.getNode(ISD::ADD, SL, MVT::i64, Src, Sign), Sign);
2370       std::tie(Lo, Hi) = split64BitValue(Abs, DAG);
2371     }
2372     // Count the leading zeros.
2373     ShAmt = DAG.getNode(ISD::CTLZ, SL, MVT::i32, Hi);
2374     // The shift amount for signed integers is [0, 32].
2375   }
2376   // Normalize the given 64-bit integer.
2377   SDValue Norm = DAG.getNode(ISD::SHL, SL, MVT::i64, Src, ShAmt);
2378   // Split it again.
2379   std::tie(Lo, Hi) = split64BitValue(Norm, DAG);
2380   // Calculate the adjust bit for rounding.
2381   // (lo != 0) ? 1 : 0 => (lo >= 1) ? 1 : 0 => umin(1, lo)
2382   SDValue Adjust = DAG.getNode(ISD::UMIN, SL, MVT::i32,
2383                                DAG.getConstant(1, SL, MVT::i32), Lo);
2384   // Get the 32-bit normalized integer.
2385   Norm = DAG.getNode(ISD::OR, SL, MVT::i32, Hi, Adjust);
2386   // Convert the normalized 32-bit integer into f32.
2387   unsigned Opc =
2388       (Signed && Subtarget->isGCN()) ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
2389   SDValue FVal = DAG.getNode(Opc, SL, MVT::f32, Norm);
2390 
2391   // Finally, need to scale back the converted floating number as the original
2392   // 64-bit integer is converted as a 32-bit one.
2393   ShAmt = DAG.getNode(ISD::SUB, SL, MVT::i32, DAG.getConstant(32, SL, MVT::i32),
2394                       ShAmt);
2395   // On GCN, use LDEXP directly.
2396   if (Subtarget->isGCN())
2397     return DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f32, FVal, ShAmt);
2398 
2399   // Otherwise, align 'ShAmt' to the exponent part and add it into the exponent
2400   // part directly to emulate the multiplication of 2^ShAmt. That 8-bit
2401   // exponent is enough to avoid overflowing into the sign bit.
2402   SDValue Exp = DAG.getNode(ISD::SHL, SL, MVT::i32, ShAmt,
2403                             DAG.getConstant(23, SL, MVT::i32));
2404   SDValue IVal =
2405       DAG.getNode(ISD::ADD, SL, MVT::i32,
2406                   DAG.getNode(ISD::BITCAST, SL, MVT::i32, FVal), Exp);
2407   if (Signed) {
2408     // Set the sign bit.
2409     Sign = DAG.getNode(ISD::SHL, SL, MVT::i32,
2410                        DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Sign),
2411                        DAG.getConstant(31, SL, MVT::i32));
2412     IVal = DAG.getNode(ISD::OR, SL, MVT::i32, IVal, Sign);
2413   }
2414   return DAG.getNode(ISD::BITCAST, SL, MVT::f32, IVal);
2415 }
2416 
2417 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
2418                                                bool Signed) const {
2419   SDLoc SL(Op);
2420   SDValue Src = Op.getOperand(0);
2421 
2422   SDValue Lo, Hi;
2423   std::tie(Lo, Hi) = split64BitValue(Src, DAG);
2424 
2425   SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP,
2426                               SL, MVT::f64, Hi);
2427 
2428   SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo);
2429 
2430   SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi,
2431                               DAG.getConstant(32, SL, MVT::i32));
2432   // TODO: Should this propagate fast-math-flags?
2433   return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo);
2434 }
2435 
2436 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
2437                                                SelectionDAG &DAG) const {
2438   // TODO: Factor out code common with LowerSINT_TO_FP.
2439   EVT DestVT = Op.getValueType();
2440   SDValue Src = Op.getOperand(0);
2441   EVT SrcVT = Src.getValueType();
2442 
2443   if (SrcVT == MVT::i16) {
2444     if (DestVT == MVT::f16)
2445       return Op;
2446     SDLoc DL(Op);
2447 
2448     // Promote src to i32
2449     SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Src);
2450     return DAG.getNode(ISD::UINT_TO_FP, DL, DestVT, Ext);
2451   }
2452 
2453   assert(SrcVT == MVT::i64 && "operation should be legal");
2454 
2455   if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2456     SDLoc DL(Op);
2457 
2458     SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2459     SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op));
2460     SDValue FPRound =
2461         DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2462 
2463     return FPRound;
2464   }
2465 
2466   if (DestVT == MVT::f32)
2467     return LowerINT_TO_FP32(Op, DAG, false);
2468 
2469   assert(DestVT == MVT::f64);
2470   return LowerINT_TO_FP64(Op, DAG, false);
2471 }
2472 
2473 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op,
2474                                               SelectionDAG &DAG) const {
2475   EVT DestVT = Op.getValueType();
2476 
2477   SDValue Src = Op.getOperand(0);
2478   EVT SrcVT = Src.getValueType();
2479 
2480   if (SrcVT == MVT::i16) {
2481     if (DestVT == MVT::f16)
2482       return Op;
2483 
2484     SDLoc DL(Op);
2485     // Promote src to i32
2486     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32, Src);
2487     return DAG.getNode(ISD::SINT_TO_FP, DL, DestVT, Ext);
2488   }
2489 
2490   assert(SrcVT == MVT::i64 && "operation should be legal");
2491 
2492   // TODO: Factor out code common with LowerUINT_TO_FP.
2493 
2494   if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2495     SDLoc DL(Op);
2496     SDValue Src = Op.getOperand(0);
2497 
2498     SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2499     SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op));
2500     SDValue FPRound =
2501         DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2502 
2503     return FPRound;
2504   }
2505 
2506   if (DestVT == MVT::f32)
2507     return LowerINT_TO_FP32(Op, DAG, true);
2508 
2509   assert(DestVT == MVT::f64);
2510   return LowerINT_TO_FP64(Op, DAG, true);
2511 }
2512 
2513 SDValue AMDGPUTargetLowering::LowerFP_TO_INT64(SDValue Op, SelectionDAG &DAG,
2514                                                bool Signed) const {
2515   SDLoc SL(Op);
2516 
2517   SDValue Src = Op.getOperand(0);
2518   EVT SrcVT = Src.getValueType();
2519 
2520   assert(SrcVT == MVT::f32 || SrcVT == MVT::f64);
2521 
2522   // The basic idea of converting a floating point number into a pair of 32-bit
2523   // integers is illustrated as follows:
2524   //
2525   //     tf := trunc(val);
2526   //    hif := floor(tf * 2^-32);
2527   //    lof := tf - hif * 2^32; // lof is always positive due to floor.
2528   //     hi := fptoi(hif);
2529   //     lo := fptoi(lof);
2530   //
2531   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, SrcVT, Src);
2532   SDValue Sign;
2533   if (Signed && SrcVT == MVT::f32) {
2534     // However, a 32-bit floating point number has only 23 bits mantissa and
2535     // it's not enough to hold all the significant bits of `lof` if val is
2536     // negative. To avoid the loss of precision, We need to take the absolute
2537     // value after truncating and flip the result back based on the original
2538     // signedness.
2539     Sign = DAG.getNode(ISD::SRA, SL, MVT::i32,
2540                        DAG.getNode(ISD::BITCAST, SL, MVT::i32, Trunc),
2541                        DAG.getConstant(31, SL, MVT::i32));
2542     Trunc = DAG.getNode(ISD::FABS, SL, SrcVT, Trunc);
2543   }
2544 
2545   SDValue K0, K1;
2546   if (SrcVT == MVT::f64) {
2547     K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(/*2^-32*/ 0x3df0000000000000)),
2548                            SL, SrcVT);
2549     K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(/*-2^32*/ 0xc1f0000000000000)),
2550                            SL, SrcVT);
2551   } else {
2552     K0 = DAG.getConstantFP(BitsToFloat(UINT32_C(/*2^-32*/ 0x2f800000)), SL,
2553                            SrcVT);
2554     K1 = DAG.getConstantFP(BitsToFloat(UINT32_C(/*-2^32*/ 0xcf800000)), SL,
2555                            SrcVT);
2556   }
2557   // TODO: Should this propagate fast-math-flags?
2558   SDValue Mul = DAG.getNode(ISD::FMUL, SL, SrcVT, Trunc, K0);
2559 
2560   SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, SrcVT, Mul);
2561 
2562   SDValue Fma = DAG.getNode(ISD::FMA, SL, SrcVT, FloorMul, K1, Trunc);
2563 
2564   SDValue Hi = DAG.getNode((Signed && SrcVT == MVT::f64) ? ISD::FP_TO_SINT
2565                                                          : ISD::FP_TO_UINT,
2566                            SL, MVT::i32, FloorMul);
2567   SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
2568 
2569   SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i64,
2570                                DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi}));
2571 
2572   if (Signed && SrcVT == MVT::f32) {
2573     assert(Sign);
2574     // Flip the result based on the signedness, which is either all 0s or 1s.
2575     Sign = DAG.getNode(ISD::BITCAST, SL, MVT::i64,
2576                        DAG.getBuildVector(MVT::v2i32, SL, {Sign, Sign}));
2577     // r := xor(r, sign) - sign;
2578     Result =
2579         DAG.getNode(ISD::SUB, SL, MVT::i64,
2580                     DAG.getNode(ISD::XOR, SL, MVT::i64, Result, Sign), Sign);
2581   }
2582 
2583   return Result;
2584 }
2585 
2586 SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const {
2587   SDLoc DL(Op);
2588   SDValue N0 = Op.getOperand(0);
2589 
2590   // Convert to target node to get known bits
2591   if (N0.getValueType() == MVT::f32)
2592     return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0);
2593 
2594   if (getTargetMachine().Options.UnsafeFPMath) {
2595     // There is a generic expand for FP_TO_FP16 with unsafe fast math.
2596     return SDValue();
2597   }
2598 
2599   assert(N0.getSimpleValueType() == MVT::f64);
2600 
2601   // f64 -> f16 conversion using round-to-nearest-even rounding mode.
2602   const unsigned ExpMask = 0x7ff;
2603   const unsigned ExpBiasf64 = 1023;
2604   const unsigned ExpBiasf16 = 15;
2605   SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
2606   SDValue One = DAG.getConstant(1, DL, MVT::i32);
2607   SDValue U = DAG.getNode(ISD::BITCAST, DL, MVT::i64, N0);
2608   SDValue UH = DAG.getNode(ISD::SRL, DL, MVT::i64, U,
2609                            DAG.getConstant(32, DL, MVT::i64));
2610   UH = DAG.getZExtOrTrunc(UH, DL, MVT::i32);
2611   U = DAG.getZExtOrTrunc(U, DL, MVT::i32);
2612   SDValue E = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2613                           DAG.getConstant(20, DL, MVT::i64));
2614   E = DAG.getNode(ISD::AND, DL, MVT::i32, E,
2615                   DAG.getConstant(ExpMask, DL, MVT::i32));
2616   // Subtract the fp64 exponent bias (1023) to get the real exponent and
2617   // add the f16 bias (15) to get the biased exponent for the f16 format.
2618   E = DAG.getNode(ISD::ADD, DL, MVT::i32, E,
2619                   DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32));
2620 
2621   SDValue M = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2622                           DAG.getConstant(8, DL, MVT::i32));
2623   M = DAG.getNode(ISD::AND, DL, MVT::i32, M,
2624                   DAG.getConstant(0xffe, DL, MVT::i32));
2625 
2626   SDValue MaskedSig = DAG.getNode(ISD::AND, DL, MVT::i32, UH,
2627                                   DAG.getConstant(0x1ff, DL, MVT::i32));
2628   MaskedSig = DAG.getNode(ISD::OR, DL, MVT::i32, MaskedSig, U);
2629 
2630   SDValue Lo40Set = DAG.getSelectCC(DL, MaskedSig, Zero, Zero, One, ISD::SETEQ);
2631   M = DAG.getNode(ISD::OR, DL, MVT::i32, M, Lo40Set);
2632 
2633   // (M != 0 ? 0x0200 : 0) | 0x7c00;
2634   SDValue I = DAG.getNode(ISD::OR, DL, MVT::i32,
2635       DAG.getSelectCC(DL, M, Zero, DAG.getConstant(0x0200, DL, MVT::i32),
2636                       Zero, ISD::SETNE), DAG.getConstant(0x7c00, DL, MVT::i32));
2637 
2638   // N = M | (E << 12);
2639   SDValue N = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2640       DAG.getNode(ISD::SHL, DL, MVT::i32, E,
2641                   DAG.getConstant(12, DL, MVT::i32)));
2642 
2643   // B = clamp(1-E, 0, 13);
2644   SDValue OneSubExp = DAG.getNode(ISD::SUB, DL, MVT::i32,
2645                                   One, E);
2646   SDValue B = DAG.getNode(ISD::SMAX, DL, MVT::i32, OneSubExp, Zero);
2647   B = DAG.getNode(ISD::SMIN, DL, MVT::i32, B,
2648                   DAG.getConstant(13, DL, MVT::i32));
2649 
2650   SDValue SigSetHigh = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2651                                    DAG.getConstant(0x1000, DL, MVT::i32));
2652 
2653   SDValue D = DAG.getNode(ISD::SRL, DL, MVT::i32, SigSetHigh, B);
2654   SDValue D0 = DAG.getNode(ISD::SHL, DL, MVT::i32, D, B);
2655   SDValue D1 = DAG.getSelectCC(DL, D0, SigSetHigh, One, Zero, ISD::SETNE);
2656   D = DAG.getNode(ISD::OR, DL, MVT::i32, D, D1);
2657 
2658   SDValue V = DAG.getSelectCC(DL, E, One, D, N, ISD::SETLT);
2659   SDValue VLow3 = DAG.getNode(ISD::AND, DL, MVT::i32, V,
2660                               DAG.getConstant(0x7, DL, MVT::i32));
2661   V = DAG.getNode(ISD::SRL, DL, MVT::i32, V,
2662                   DAG.getConstant(2, DL, MVT::i32));
2663   SDValue V0 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(3, DL, MVT::i32),
2664                                One, Zero, ISD::SETEQ);
2665   SDValue V1 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(5, DL, MVT::i32),
2666                                One, Zero, ISD::SETGT);
2667   V1 = DAG.getNode(ISD::OR, DL, MVT::i32, V0, V1);
2668   V = DAG.getNode(ISD::ADD, DL, MVT::i32, V, V1);
2669 
2670   V = DAG.getSelectCC(DL, E, DAG.getConstant(30, DL, MVT::i32),
2671                       DAG.getConstant(0x7c00, DL, MVT::i32), V, ISD::SETGT);
2672   V = DAG.getSelectCC(DL, E, DAG.getConstant(1039, DL, MVT::i32),
2673                       I, V, ISD::SETEQ);
2674 
2675   // Extract the sign bit.
2676   SDValue Sign = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2677                             DAG.getConstant(16, DL, MVT::i32));
2678   Sign = DAG.getNode(ISD::AND, DL, MVT::i32, Sign,
2679                      DAG.getConstant(0x8000, DL, MVT::i32));
2680 
2681   V = DAG.getNode(ISD::OR, DL, MVT::i32, Sign, V);
2682   return DAG.getZExtOrTrunc(V, DL, Op.getValueType());
2683 }
2684 
2685 SDValue AMDGPUTargetLowering::LowerFP_TO_INT(SDValue Op,
2686                                              SelectionDAG &DAG) const {
2687   SDValue Src = Op.getOperand(0);
2688   unsigned OpOpcode = Op.getOpcode();
2689   EVT SrcVT = Src.getValueType();
2690   EVT DestVT = Op.getValueType();
2691 
2692   // Will be selected natively
2693   if (SrcVT == MVT::f16 && DestVT == MVT::i16)
2694     return Op;
2695 
2696   // Promote i16 to i32
2697   if (DestVT == MVT::i16 && (SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
2698     SDLoc DL(Op);
2699 
2700     SDValue FpToInt32 = DAG.getNode(OpOpcode, DL, MVT::i32, Src);
2701     return DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToInt32);
2702   }
2703 
2704   if (SrcVT == MVT::f16 ||
2705       (SrcVT == MVT::f32 && Src.getOpcode() == ISD::FP16_TO_FP)) {
2706     SDLoc DL(Op);
2707 
2708     SDValue FpToInt32 = DAG.getNode(OpOpcode, DL, MVT::i32, Src);
2709     unsigned Ext =
2710         OpOpcode == ISD::FP_TO_SINT ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
2711     return DAG.getNode(Ext, DL, MVT::i64, FpToInt32);
2712   }
2713 
2714   if (DestVT == MVT::i64 && (SrcVT == MVT::f32 || SrcVT == MVT::f64))
2715     return LowerFP_TO_INT64(Op, DAG, OpOpcode == ISD::FP_TO_SINT);
2716 
2717   return SDValue();
2718 }
2719 
2720 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
2721                                                      SelectionDAG &DAG) const {
2722   EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2723   MVT VT = Op.getSimpleValueType();
2724   MVT ScalarVT = VT.getScalarType();
2725 
2726   assert(VT.isVector());
2727 
2728   SDValue Src = Op.getOperand(0);
2729   SDLoc DL(Op);
2730 
2731   // TODO: Don't scalarize on Evergreen?
2732   unsigned NElts = VT.getVectorNumElements();
2733   SmallVector<SDValue, 8> Args;
2734   DAG.ExtractVectorElements(Src, Args, 0, NElts);
2735 
2736   SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
2737   for (unsigned I = 0; I < NElts; ++I)
2738     Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
2739 
2740   return DAG.getBuildVector(VT, DL, Args);
2741 }
2742 
2743 //===----------------------------------------------------------------------===//
2744 // Custom DAG optimizations
2745 //===----------------------------------------------------------------------===//
2746 
2747 static bool isU24(SDValue Op, SelectionDAG &DAG) {
2748   return AMDGPUTargetLowering::numBitsUnsigned(Op, DAG) <= 24;
2749 }
2750 
2751 static bool isI24(SDValue Op, SelectionDAG &DAG) {
2752   EVT VT = Op.getValueType();
2753   return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
2754                                      // as unsigned 24-bit values.
2755          AMDGPUTargetLowering::numBitsSigned(Op, DAG) <= 24;
2756 }
2757 
2758 static SDValue simplifyMul24(SDNode *Node24,
2759                              TargetLowering::DAGCombinerInfo &DCI) {
2760   SelectionDAG &DAG = DCI.DAG;
2761   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2762   bool IsIntrin = Node24->getOpcode() == ISD::INTRINSIC_WO_CHAIN;
2763 
2764   SDValue LHS = IsIntrin ? Node24->getOperand(1) : Node24->getOperand(0);
2765   SDValue RHS = IsIntrin ? Node24->getOperand(2) : Node24->getOperand(1);
2766   unsigned NewOpcode = Node24->getOpcode();
2767   if (IsIntrin) {
2768     unsigned IID = cast<ConstantSDNode>(Node24->getOperand(0))->getZExtValue();
2769     switch (IID) {
2770     case Intrinsic::amdgcn_mul_i24:
2771       NewOpcode = AMDGPUISD::MUL_I24;
2772       break;
2773     case Intrinsic::amdgcn_mul_u24:
2774       NewOpcode = AMDGPUISD::MUL_U24;
2775       break;
2776     case Intrinsic::amdgcn_mulhi_i24:
2777       NewOpcode = AMDGPUISD::MULHI_I24;
2778       break;
2779     case Intrinsic::amdgcn_mulhi_u24:
2780       NewOpcode = AMDGPUISD::MULHI_U24;
2781       break;
2782     default:
2783       llvm_unreachable("Expected 24-bit mul intrinsic");
2784     }
2785   }
2786 
2787   APInt Demanded = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 24);
2788 
2789   // First try to simplify using SimplifyMultipleUseDemandedBits which allows
2790   // the operands to have other uses, but will only perform simplifications that
2791   // involve bypassing some nodes for this user.
2792   SDValue DemandedLHS = TLI.SimplifyMultipleUseDemandedBits(LHS, Demanded, DAG);
2793   SDValue DemandedRHS = TLI.SimplifyMultipleUseDemandedBits(RHS, Demanded, DAG);
2794   if (DemandedLHS || DemandedRHS)
2795     return DAG.getNode(NewOpcode, SDLoc(Node24), Node24->getVTList(),
2796                        DemandedLHS ? DemandedLHS : LHS,
2797                        DemandedRHS ? DemandedRHS : RHS);
2798 
2799   // Now try SimplifyDemandedBits which can simplify the nodes used by our
2800   // operands if this node is the only user.
2801   if (TLI.SimplifyDemandedBits(LHS, Demanded, DCI))
2802     return SDValue(Node24, 0);
2803   if (TLI.SimplifyDemandedBits(RHS, Demanded, DCI))
2804     return SDValue(Node24, 0);
2805 
2806   return SDValue();
2807 }
2808 
2809 template <typename IntTy>
2810 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset,
2811                                uint32_t Width, const SDLoc &DL) {
2812   if (Width + Offset < 32) {
2813     uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
2814     IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
2815     return DAG.getConstant(Result, DL, MVT::i32);
2816   }
2817 
2818   return DAG.getConstant(Src0 >> Offset, DL, MVT::i32);
2819 }
2820 
2821 static bool hasVolatileUser(SDNode *Val) {
2822   for (SDNode *U : Val->uses()) {
2823     if (MemSDNode *M = dyn_cast<MemSDNode>(U)) {
2824       if (M->isVolatile())
2825         return true;
2826     }
2827   }
2828 
2829   return false;
2830 }
2831 
2832 bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const {
2833   // i32 vectors are the canonical memory type.
2834   if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT))
2835     return false;
2836 
2837   if (!VT.isByteSized())
2838     return false;
2839 
2840   unsigned Size = VT.getStoreSize();
2841 
2842   if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector())
2843     return false;
2844 
2845   if (Size == 3 || (Size > 4 && (Size % 4 != 0)))
2846     return false;
2847 
2848   return true;
2849 }
2850 
2851 // Replace load of an illegal type with a store of a bitcast to a friendlier
2852 // type.
2853 SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N,
2854                                                  DAGCombinerInfo &DCI) const {
2855   if (!DCI.isBeforeLegalize())
2856     return SDValue();
2857 
2858   LoadSDNode *LN = cast<LoadSDNode>(N);
2859   if (!LN->isSimple() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN))
2860     return SDValue();
2861 
2862   SDLoc SL(N);
2863   SelectionDAG &DAG = DCI.DAG;
2864   EVT VT = LN->getMemoryVT();
2865 
2866   unsigned Size = VT.getStoreSize();
2867   Align Alignment = LN->getAlign();
2868   if (Alignment < Size && isTypeLegal(VT)) {
2869     bool IsFast;
2870     unsigned AS = LN->getAddressSpace();
2871 
2872     // Expand unaligned loads earlier than legalization. Due to visitation order
2873     // problems during legalization, the emitted instructions to pack and unpack
2874     // the bytes again are not eliminated in the case of an unaligned copy.
2875     if (!allowsMisalignedMemoryAccesses(
2876             VT, AS, Alignment, LN->getMemOperand()->getFlags(), &IsFast)) {
2877       if (VT.isVector())
2878         return SplitVectorLoad(SDValue(LN, 0), DAG);
2879 
2880       SDValue Ops[2];
2881       std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
2882 
2883       return DAG.getMergeValues(Ops, SDLoc(N));
2884     }
2885 
2886     if (!IsFast)
2887       return SDValue();
2888   }
2889 
2890   if (!shouldCombineMemoryType(VT))
2891     return SDValue();
2892 
2893   EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2894 
2895   SDValue NewLoad
2896     = DAG.getLoad(NewVT, SL, LN->getChain(),
2897                   LN->getBasePtr(), LN->getMemOperand());
2898 
2899   SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad);
2900   DCI.CombineTo(N, BC, NewLoad.getValue(1));
2901   return SDValue(N, 0);
2902 }
2903 
2904 // Replace store of an illegal type with a store of a bitcast to a friendlier
2905 // type.
2906 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
2907                                                   DAGCombinerInfo &DCI) const {
2908   if (!DCI.isBeforeLegalize())
2909     return SDValue();
2910 
2911   StoreSDNode *SN = cast<StoreSDNode>(N);
2912   if (!SN->isSimple() || !ISD::isNormalStore(SN))
2913     return SDValue();
2914 
2915   EVT VT = SN->getMemoryVT();
2916   unsigned Size = VT.getStoreSize();
2917 
2918   SDLoc SL(N);
2919   SelectionDAG &DAG = DCI.DAG;
2920   Align Alignment = SN->getAlign();
2921   if (Alignment < Size && isTypeLegal(VT)) {
2922     bool IsFast;
2923     unsigned AS = SN->getAddressSpace();
2924 
2925     // Expand unaligned stores earlier than legalization. Due to visitation
2926     // order problems during legalization, the emitted instructions to pack and
2927     // unpack the bytes again are not eliminated in the case of an unaligned
2928     // copy.
2929     if (!allowsMisalignedMemoryAccesses(
2930             VT, AS, Alignment, SN->getMemOperand()->getFlags(), &IsFast)) {
2931       if (VT.isVector())
2932         return SplitVectorStore(SDValue(SN, 0), DAG);
2933 
2934       return expandUnalignedStore(SN, DAG);
2935     }
2936 
2937     if (!IsFast)
2938       return SDValue();
2939   }
2940 
2941   if (!shouldCombineMemoryType(VT))
2942     return SDValue();
2943 
2944   EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2945   SDValue Val = SN->getValue();
2946 
2947   //DCI.AddToWorklist(Val.getNode());
2948 
2949   bool OtherUses = !Val.hasOneUse();
2950   SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NewVT, Val);
2951   if (OtherUses) {
2952     SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal);
2953     DAG.ReplaceAllUsesOfValueWith(Val, CastBack);
2954   }
2955 
2956   return DAG.getStore(SN->getChain(), SL, CastVal,
2957                       SN->getBasePtr(), SN->getMemOperand());
2958 }
2959 
2960 // FIXME: This should go in generic DAG combiner with an isTruncateFree check,
2961 // but isTruncateFree is inaccurate for i16 now because of SALU vs. VALU
2962 // issues.
2963 SDValue AMDGPUTargetLowering::performAssertSZExtCombine(SDNode *N,
2964                                                         DAGCombinerInfo &DCI) const {
2965   SelectionDAG &DAG = DCI.DAG;
2966   SDValue N0 = N->getOperand(0);
2967 
2968   // (vt2 (assertzext (truncate vt0:x), vt1)) ->
2969   //     (vt2 (truncate (assertzext vt0:x, vt1)))
2970   if (N0.getOpcode() == ISD::TRUNCATE) {
2971     SDValue N1 = N->getOperand(1);
2972     EVT ExtVT = cast<VTSDNode>(N1)->getVT();
2973     SDLoc SL(N);
2974 
2975     SDValue Src = N0.getOperand(0);
2976     EVT SrcVT = Src.getValueType();
2977     if (SrcVT.bitsGE(ExtVT)) {
2978       SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1);
2979       return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg);
2980     }
2981   }
2982 
2983   return SDValue();
2984 }
2985 
2986 SDValue AMDGPUTargetLowering::performIntrinsicWOChainCombine(
2987   SDNode *N, DAGCombinerInfo &DCI) const {
2988   unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
2989   switch (IID) {
2990   case Intrinsic::amdgcn_mul_i24:
2991   case Intrinsic::amdgcn_mul_u24:
2992   case Intrinsic::amdgcn_mulhi_i24:
2993   case Intrinsic::amdgcn_mulhi_u24:
2994     return simplifyMul24(N, DCI);
2995   case Intrinsic::amdgcn_fract:
2996   case Intrinsic::amdgcn_rsq:
2997   case Intrinsic::amdgcn_rcp_legacy:
2998   case Intrinsic::amdgcn_rsq_legacy:
2999   case Intrinsic::amdgcn_rsq_clamp:
3000   case Intrinsic::amdgcn_ldexp: {
3001     // FIXME: This is probably wrong. If src is an sNaN, it won't be quieted
3002     SDValue Src = N->getOperand(1);
3003     return Src.isUndef() ? Src : SDValue();
3004   }
3005   default:
3006     return SDValue();
3007   }
3008 }
3009 
3010 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the
3011 /// binary operation \p Opc to it with the corresponding constant operands.
3012 SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl(
3013   DAGCombinerInfo &DCI, const SDLoc &SL,
3014   unsigned Opc, SDValue LHS,
3015   uint32_t ValLo, uint32_t ValHi) const {
3016   SelectionDAG &DAG = DCI.DAG;
3017   SDValue Lo, Hi;
3018   std::tie(Lo, Hi) = split64BitValue(LHS, DAG);
3019 
3020   SDValue LoRHS = DAG.getConstant(ValLo, SL, MVT::i32);
3021   SDValue HiRHS = DAG.getConstant(ValHi, SL, MVT::i32);
3022 
3023   SDValue LoAnd = DAG.getNode(Opc, SL, MVT::i32, Lo, LoRHS);
3024   SDValue HiAnd = DAG.getNode(Opc, SL, MVT::i32, Hi, HiRHS);
3025 
3026   // Re-visit the ands. It's possible we eliminated one of them and it could
3027   // simplify the vector.
3028   DCI.AddToWorklist(Lo.getNode());
3029   DCI.AddToWorklist(Hi.getNode());
3030 
3031   SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd});
3032   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3033 }
3034 
3035 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
3036                                                 DAGCombinerInfo &DCI) const {
3037   EVT VT = N->getValueType(0);
3038 
3039   ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3040   if (!RHS)
3041     return SDValue();
3042 
3043   SDValue LHS = N->getOperand(0);
3044   unsigned RHSVal = RHS->getZExtValue();
3045   if (!RHSVal)
3046     return LHS;
3047 
3048   SDLoc SL(N);
3049   SelectionDAG &DAG = DCI.DAG;
3050 
3051   switch (LHS->getOpcode()) {
3052   default:
3053     break;
3054   case ISD::ZERO_EXTEND:
3055   case ISD::SIGN_EXTEND:
3056   case ISD::ANY_EXTEND: {
3057     SDValue X = LHS->getOperand(0);
3058 
3059     if (VT == MVT::i32 && RHSVal == 16 && X.getValueType() == MVT::i16 &&
3060         isOperationLegal(ISD::BUILD_VECTOR, MVT::v2i16)) {
3061       // Prefer build_vector as the canonical form if packed types are legal.
3062       // (shl ([asz]ext i16:x), 16 -> build_vector 0, x
3063       SDValue Vec = DAG.getBuildVector(MVT::v2i16, SL,
3064        { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) });
3065       return DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
3066     }
3067 
3068     // shl (ext x) => zext (shl x), if shift does not overflow int
3069     if (VT != MVT::i64)
3070       break;
3071     KnownBits Known = DAG.computeKnownBits(X);
3072     unsigned LZ = Known.countMinLeadingZeros();
3073     if (LZ < RHSVal)
3074       break;
3075     EVT XVT = X.getValueType();
3076     SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0));
3077     return DAG.getZExtOrTrunc(Shl, SL, VT);
3078   }
3079   }
3080 
3081   if (VT != MVT::i64)
3082     return SDValue();
3083 
3084   // i64 (shl x, C) -> (build_pair 0, (shl x, C -32))
3085 
3086   // On some subtargets, 64-bit shift is a quarter rate instruction. In the
3087   // common case, splitting this into a move and a 32-bit shift is faster and
3088   // the same code size.
3089   if (RHSVal < 32)
3090     return SDValue();
3091 
3092   SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32);
3093 
3094   SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
3095   SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
3096 
3097   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
3098 
3099   SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
3100   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3101 }
3102 
3103 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
3104                                                 DAGCombinerInfo &DCI) const {
3105   if (N->getValueType(0) != MVT::i64)
3106     return SDValue();
3107 
3108   const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3109   if (!RHS)
3110     return SDValue();
3111 
3112   SelectionDAG &DAG = DCI.DAG;
3113   SDLoc SL(N);
3114   unsigned RHSVal = RHS->getZExtValue();
3115 
3116   // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31)
3117   if (RHSVal == 32) {
3118     SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
3119     SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
3120                                    DAG.getConstant(31, SL, MVT::i32));
3121 
3122     SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift});
3123     return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
3124   }
3125 
3126   // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31)
3127   if (RHSVal == 63) {
3128     SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
3129     SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
3130                                    DAG.getConstant(31, SL, MVT::i32));
3131     SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift});
3132     return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
3133   }
3134 
3135   return SDValue();
3136 }
3137 
3138 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
3139                                                 DAGCombinerInfo &DCI) const {
3140   auto *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3141   if (!RHS)
3142     return SDValue();
3143 
3144   EVT VT = N->getValueType(0);
3145   SDValue LHS = N->getOperand(0);
3146   unsigned ShiftAmt = RHS->getZExtValue();
3147   SelectionDAG &DAG = DCI.DAG;
3148   SDLoc SL(N);
3149 
3150   // fold (srl (and x, c1 << c2), c2) -> (and (srl(x, c2), c1)
3151   // this improves the ability to match BFE patterns in isel.
3152   if (LHS.getOpcode() == ISD::AND) {
3153     if (auto *Mask = dyn_cast<ConstantSDNode>(LHS.getOperand(1))) {
3154       unsigned MaskIdx, MaskLen;
3155       if (Mask->getAPIntValue().isShiftedMask(MaskIdx, MaskLen) &&
3156           MaskIdx == ShiftAmt) {
3157         return DAG.getNode(
3158             ISD::AND, SL, VT,
3159             DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(0), N->getOperand(1)),
3160             DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(1), N->getOperand(1)));
3161       }
3162     }
3163   }
3164 
3165   if (VT != MVT::i64)
3166     return SDValue();
3167 
3168   if (ShiftAmt < 32)
3169     return SDValue();
3170 
3171   // srl i64:x, C for C >= 32
3172   // =>
3173   //   build_pair (srl hi_32(x), C - 32), 0
3174   SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
3175 
3176   SDValue Hi = getHiHalf64(LHS, DAG);
3177 
3178   SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32);
3179   SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst);
3180 
3181   SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero});
3182 
3183   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair);
3184 }
3185 
3186 SDValue AMDGPUTargetLowering::performTruncateCombine(
3187   SDNode *N, DAGCombinerInfo &DCI) const {
3188   SDLoc SL(N);
3189   SelectionDAG &DAG = DCI.DAG;
3190   EVT VT = N->getValueType(0);
3191   SDValue Src = N->getOperand(0);
3192 
3193   // vt1 (truncate (bitcast (build_vector vt0:x, ...))) -> vt1 (bitcast vt0:x)
3194   if (Src.getOpcode() == ISD::BITCAST && !VT.isVector()) {
3195     SDValue Vec = Src.getOperand(0);
3196     if (Vec.getOpcode() == ISD::BUILD_VECTOR) {
3197       SDValue Elt0 = Vec.getOperand(0);
3198       EVT EltVT = Elt0.getValueType();
3199       if (VT.getFixedSizeInBits() <= EltVT.getFixedSizeInBits()) {
3200         if (EltVT.isFloatingPoint()) {
3201           Elt0 = DAG.getNode(ISD::BITCAST, SL,
3202                              EltVT.changeTypeToInteger(), Elt0);
3203         }
3204 
3205         return DAG.getNode(ISD::TRUNCATE, SL, VT, Elt0);
3206       }
3207     }
3208   }
3209 
3210   // Equivalent of above for accessing the high element of a vector as an
3211   // integer operation.
3212   // trunc (srl (bitcast (build_vector x, y))), 16 -> trunc (bitcast y)
3213   if (Src.getOpcode() == ISD::SRL && !VT.isVector()) {
3214     if (auto K = isConstOrConstSplat(Src.getOperand(1))) {
3215       if (2 * K->getZExtValue() == Src.getValueType().getScalarSizeInBits()) {
3216         SDValue BV = stripBitcast(Src.getOperand(0));
3217         if (BV.getOpcode() == ISD::BUILD_VECTOR &&
3218             BV.getValueType().getVectorNumElements() == 2) {
3219           SDValue SrcElt = BV.getOperand(1);
3220           EVT SrcEltVT = SrcElt.getValueType();
3221           if (SrcEltVT.isFloatingPoint()) {
3222             SrcElt = DAG.getNode(ISD::BITCAST, SL,
3223                                  SrcEltVT.changeTypeToInteger(), SrcElt);
3224           }
3225 
3226           return DAG.getNode(ISD::TRUNCATE, SL, VT, SrcElt);
3227         }
3228       }
3229     }
3230   }
3231 
3232   // Partially shrink 64-bit shifts to 32-bit if reduced to 16-bit.
3233   //
3234   // i16 (trunc (srl i64:x, K)), K <= 16 ->
3235   //     i16 (trunc (srl (i32 (trunc x), K)))
3236   if (VT.getScalarSizeInBits() < 32) {
3237     EVT SrcVT = Src.getValueType();
3238     if (SrcVT.getScalarSizeInBits() > 32 &&
3239         (Src.getOpcode() == ISD::SRL ||
3240          Src.getOpcode() == ISD::SRA ||
3241          Src.getOpcode() == ISD::SHL)) {
3242       SDValue Amt = Src.getOperand(1);
3243       KnownBits Known = DAG.computeKnownBits(Amt);
3244       unsigned Size = VT.getScalarSizeInBits();
3245       if ((Known.isConstant() && Known.getConstant().ule(Size)) ||
3246           (Known.countMaxActiveBits() <= Log2_32(Size))) {
3247         EVT MidVT = VT.isVector() ?
3248           EVT::getVectorVT(*DAG.getContext(), MVT::i32,
3249                            VT.getVectorNumElements()) : MVT::i32;
3250 
3251         EVT NewShiftVT = getShiftAmountTy(MidVT, DAG.getDataLayout());
3252         SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MidVT,
3253                                     Src.getOperand(0));
3254         DCI.AddToWorklist(Trunc.getNode());
3255 
3256         if (Amt.getValueType() != NewShiftVT) {
3257           Amt = DAG.getZExtOrTrunc(Amt, SL, NewShiftVT);
3258           DCI.AddToWorklist(Amt.getNode());
3259         }
3260 
3261         SDValue ShrunkShift = DAG.getNode(Src.getOpcode(), SL, MidVT,
3262                                           Trunc, Amt);
3263         return DAG.getNode(ISD::TRUNCATE, SL, VT, ShrunkShift);
3264       }
3265     }
3266   }
3267 
3268   return SDValue();
3269 }
3270 
3271 // We need to specifically handle i64 mul here to avoid unnecessary conversion
3272 // instructions. If we only match on the legalized i64 mul expansion,
3273 // SimplifyDemandedBits will be unable to remove them because there will be
3274 // multiple uses due to the separate mul + mulh[su].
3275 static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL,
3276                         SDValue N0, SDValue N1, unsigned Size, bool Signed) {
3277   if (Size <= 32) {
3278     unsigned MulOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
3279     return DAG.getNode(MulOpc, SL, MVT::i32, N0, N1);
3280   }
3281 
3282   unsigned MulLoOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
3283   unsigned MulHiOpc = Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24;
3284 
3285   SDValue MulLo = DAG.getNode(MulLoOpc, SL, MVT::i32, N0, N1);
3286   SDValue MulHi = DAG.getNode(MulHiOpc, SL, MVT::i32, N0, N1);
3287 
3288   return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64, MulLo, MulHi);
3289 }
3290 
3291 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
3292                                                 DAGCombinerInfo &DCI) const {
3293   EVT VT = N->getValueType(0);
3294 
3295   // Don't generate 24-bit multiplies on values that are in SGPRs, since
3296   // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
3297   // unnecessarily). isDivergent() is used as an approximation of whether the
3298   // value is in an SGPR.
3299   if (!N->isDivergent())
3300     return SDValue();
3301 
3302   unsigned Size = VT.getSizeInBits();
3303   if (VT.isVector() || Size > 64)
3304     return SDValue();
3305 
3306   // There are i16 integer mul/mad.
3307   if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16))
3308     return SDValue();
3309 
3310   SelectionDAG &DAG = DCI.DAG;
3311   SDLoc DL(N);
3312 
3313   SDValue N0 = N->getOperand(0);
3314   SDValue N1 = N->getOperand(1);
3315 
3316   // SimplifyDemandedBits has the annoying habit of turning useful zero_extends
3317   // in the source into any_extends if the result of the mul is truncated. Since
3318   // we can assume the high bits are whatever we want, use the underlying value
3319   // to avoid the unknown high bits from interfering.
3320   if (N0.getOpcode() == ISD::ANY_EXTEND)
3321     N0 = N0.getOperand(0);
3322 
3323   if (N1.getOpcode() == ISD::ANY_EXTEND)
3324     N1 = N1.getOperand(0);
3325 
3326   SDValue Mul;
3327 
3328   if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
3329     N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3330     N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3331     Mul = getMul24(DAG, DL, N0, N1, Size, false);
3332   } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
3333     N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3334     N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3335     Mul = getMul24(DAG, DL, N0, N1, Size, true);
3336   } else {
3337     return SDValue();
3338   }
3339 
3340   // We need to use sext even for MUL_U24, because MUL_U24 is used
3341   // for signed multiply of 8 and 16-bit types.
3342   return DAG.getSExtOrTrunc(Mul, DL, VT);
3343 }
3344 
3345 SDValue
3346 AMDGPUTargetLowering::performMulLoHiCombine(SDNode *N,
3347                                             DAGCombinerInfo &DCI) const {
3348   if (N->getValueType(0) != MVT::i32)
3349     return SDValue();
3350 
3351   SelectionDAG &DAG = DCI.DAG;
3352   SDLoc DL(N);
3353 
3354   SDValue N0 = N->getOperand(0);
3355   SDValue N1 = N->getOperand(1);
3356 
3357   // SimplifyDemandedBits has the annoying habit of turning useful zero_extends
3358   // in the source into any_extends if the result of the mul is truncated. Since
3359   // we can assume the high bits are whatever we want, use the underlying value
3360   // to avoid the unknown high bits from interfering.
3361   if (N0.getOpcode() == ISD::ANY_EXTEND)
3362     N0 = N0.getOperand(0);
3363   if (N1.getOpcode() == ISD::ANY_EXTEND)
3364     N1 = N1.getOperand(0);
3365 
3366   // Try to use two fast 24-bit multiplies (one for each half of the result)
3367   // instead of one slow extending multiply.
3368   unsigned LoOpcode, HiOpcode;
3369   if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
3370     N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3371     N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3372     LoOpcode = AMDGPUISD::MUL_U24;
3373     HiOpcode = AMDGPUISD::MULHI_U24;
3374   } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
3375     N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3376     N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3377     LoOpcode = AMDGPUISD::MUL_I24;
3378     HiOpcode = AMDGPUISD::MULHI_I24;
3379   } else {
3380     return SDValue();
3381   }
3382 
3383   SDValue Lo = DAG.getNode(LoOpcode, DL, MVT::i32, N0, N1);
3384   SDValue Hi = DAG.getNode(HiOpcode, DL, MVT::i32, N0, N1);
3385   DCI.CombineTo(N, Lo, Hi);
3386   return SDValue(N, 0);
3387 }
3388 
3389 SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N,
3390                                                   DAGCombinerInfo &DCI) const {
3391   EVT VT = N->getValueType(0);
3392 
3393   if (!Subtarget->hasMulI24() || VT.isVector())
3394     return SDValue();
3395 
3396   // Don't generate 24-bit multiplies on values that are in SGPRs, since
3397   // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
3398   // unnecessarily). isDivergent() is used as an approximation of whether the
3399   // value is in an SGPR.
3400   // This doesn't apply if no s_mul_hi is available (since we'll end up with a
3401   // valu op anyway)
3402   if (Subtarget->hasSMulHi() && !N->isDivergent())
3403     return SDValue();
3404 
3405   SelectionDAG &DAG = DCI.DAG;
3406   SDLoc DL(N);
3407 
3408   SDValue N0 = N->getOperand(0);
3409   SDValue N1 = N->getOperand(1);
3410 
3411   if (!isI24(N0, DAG) || !isI24(N1, DAG))
3412     return SDValue();
3413 
3414   N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3415   N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3416 
3417   SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_I24, DL, MVT::i32, N0, N1);
3418   DCI.AddToWorklist(Mulhi.getNode());
3419   return DAG.getSExtOrTrunc(Mulhi, DL, VT);
3420 }
3421 
3422 SDValue AMDGPUTargetLowering::performMulhuCombine(SDNode *N,
3423                                                   DAGCombinerInfo &DCI) const {
3424   EVT VT = N->getValueType(0);
3425 
3426   if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32)
3427     return SDValue();
3428 
3429   // Don't generate 24-bit multiplies on values that are in SGPRs, since
3430   // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
3431   // unnecessarily). isDivergent() is used as an approximation of whether the
3432   // value is in an SGPR.
3433   // This doesn't apply if no s_mul_hi is available (since we'll end up with a
3434   // valu op anyway)
3435   if (Subtarget->hasSMulHi() && !N->isDivergent())
3436     return SDValue();
3437 
3438   SelectionDAG &DAG = DCI.DAG;
3439   SDLoc DL(N);
3440 
3441   SDValue N0 = N->getOperand(0);
3442   SDValue N1 = N->getOperand(1);
3443 
3444   if (!isU24(N0, DAG) || !isU24(N1, DAG))
3445     return SDValue();
3446 
3447   N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3448   N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3449 
3450   SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_U24, DL, MVT::i32, N0, N1);
3451   DCI.AddToWorklist(Mulhi.getNode());
3452   return DAG.getZExtOrTrunc(Mulhi, DL, VT);
3453 }
3454 
3455 static bool isNegativeOne(SDValue Val) {
3456   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val))
3457     return C->isAllOnes();
3458   return false;
3459 }
3460 
3461 SDValue AMDGPUTargetLowering::getFFBX_U32(SelectionDAG &DAG,
3462                                           SDValue Op,
3463                                           const SDLoc &DL,
3464                                           unsigned Opc) const {
3465   EVT VT = Op.getValueType();
3466   EVT LegalVT = getTypeToTransformTo(*DAG.getContext(), VT);
3467   if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() &&
3468                               LegalVT != MVT::i16))
3469     return SDValue();
3470 
3471   if (VT != MVT::i32)
3472     Op = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Op);
3473 
3474   SDValue FFBX = DAG.getNode(Opc, DL, MVT::i32, Op);
3475   if (VT != MVT::i32)
3476     FFBX = DAG.getNode(ISD::TRUNCATE, DL, VT, FFBX);
3477 
3478   return FFBX;
3479 }
3480 
3481 // The native instructions return -1 on 0 input. Optimize out a select that
3482 // produces -1 on 0.
3483 //
3484 // TODO: If zero is not undef, we could also do this if the output is compared
3485 // against the bitwidth.
3486 //
3487 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly.
3488 SDValue AMDGPUTargetLowering::performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond,
3489                                                  SDValue LHS, SDValue RHS,
3490                                                  DAGCombinerInfo &DCI) const {
3491   ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
3492   if (!CmpRhs || !CmpRhs->isZero())
3493     return SDValue();
3494 
3495   SelectionDAG &DAG = DCI.DAG;
3496   ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
3497   SDValue CmpLHS = Cond.getOperand(0);
3498 
3499   // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x
3500   // select (setcc x, 0, eq), -1, (cttz_zero_undef x) -> ffbl_u32 x
3501   if (CCOpcode == ISD::SETEQ &&
3502       (isCtlzOpc(RHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) &&
3503       RHS.getOperand(0) == CmpLHS && isNegativeOne(LHS)) {
3504     unsigned Opc =
3505         isCttzOpc(RHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : AMDGPUISD::FFBH_U32;
3506     return getFFBX_U32(DAG, CmpLHS, SL, Opc);
3507   }
3508 
3509   // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x
3510   // select (setcc x, 0, ne), (cttz_zero_undef x), -1 -> ffbl_u32 x
3511   if (CCOpcode == ISD::SETNE &&
3512       (isCtlzOpc(LHS.getOpcode()) || isCttzOpc(LHS.getOpcode())) &&
3513       LHS.getOperand(0) == CmpLHS && isNegativeOne(RHS)) {
3514     unsigned Opc =
3515         isCttzOpc(LHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : AMDGPUISD::FFBH_U32;
3516 
3517     return getFFBX_U32(DAG, CmpLHS, SL, Opc);
3518   }
3519 
3520   return SDValue();
3521 }
3522 
3523 static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI,
3524                                          unsigned Op,
3525                                          const SDLoc &SL,
3526                                          SDValue Cond,
3527                                          SDValue N1,
3528                                          SDValue N2) {
3529   SelectionDAG &DAG = DCI.DAG;
3530   EVT VT = N1.getValueType();
3531 
3532   SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, Cond,
3533                                   N1.getOperand(0), N2.getOperand(0));
3534   DCI.AddToWorklist(NewSelect.getNode());
3535   return DAG.getNode(Op, SL, VT, NewSelect);
3536 }
3537 
3538 // Pull a free FP operation out of a select so it may fold into uses.
3539 //
3540 // select c, (fneg x), (fneg y) -> fneg (select c, x, y)
3541 // select c, (fneg x), k -> fneg (select c, x, (fneg k))
3542 //
3543 // select c, (fabs x), (fabs y) -> fabs (select c, x, y)
3544 // select c, (fabs x), +k -> fabs (select c, x, k)
3545 static SDValue foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI,
3546                                     SDValue N) {
3547   SelectionDAG &DAG = DCI.DAG;
3548   SDValue Cond = N.getOperand(0);
3549   SDValue LHS = N.getOperand(1);
3550   SDValue RHS = N.getOperand(2);
3551 
3552   EVT VT = N.getValueType();
3553   if ((LHS.getOpcode() == ISD::FABS && RHS.getOpcode() == ISD::FABS) ||
3554       (LHS.getOpcode() == ISD::FNEG && RHS.getOpcode() == ISD::FNEG)) {
3555     return distributeOpThroughSelect(DCI, LHS.getOpcode(),
3556                                      SDLoc(N), Cond, LHS, RHS);
3557   }
3558 
3559   bool Inv = false;
3560   if (RHS.getOpcode() == ISD::FABS || RHS.getOpcode() == ISD::FNEG) {
3561     std::swap(LHS, RHS);
3562     Inv = true;
3563   }
3564 
3565   // TODO: Support vector constants.
3566   ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
3567   if ((LHS.getOpcode() == ISD::FNEG || LHS.getOpcode() == ISD::FABS) && CRHS) {
3568     SDLoc SL(N);
3569     // If one side is an fneg/fabs and the other is a constant, we can push the
3570     // fneg/fabs down. If it's an fabs, the constant needs to be non-negative.
3571     SDValue NewLHS = LHS.getOperand(0);
3572     SDValue NewRHS = RHS;
3573 
3574     // Careful: if the neg can be folded up, don't try to pull it back down.
3575     bool ShouldFoldNeg = true;
3576 
3577     if (NewLHS.hasOneUse()) {
3578       unsigned Opc = NewLHS.getOpcode();
3579       if (LHS.getOpcode() == ISD::FNEG && fnegFoldsIntoOp(Opc))
3580         ShouldFoldNeg = false;
3581       if (LHS.getOpcode() == ISD::FABS && Opc == ISD::FMUL)
3582         ShouldFoldNeg = false;
3583     }
3584 
3585     if (ShouldFoldNeg) {
3586       if (LHS.getOpcode() == ISD::FNEG)
3587         NewRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3588       else if (CRHS->isNegative())
3589         return SDValue();
3590 
3591       if (Inv)
3592         std::swap(NewLHS, NewRHS);
3593 
3594       SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT,
3595                                       Cond, NewLHS, NewRHS);
3596       DCI.AddToWorklist(NewSelect.getNode());
3597       return DAG.getNode(LHS.getOpcode(), SL, VT, NewSelect);
3598     }
3599   }
3600 
3601   return SDValue();
3602 }
3603 
3604 
3605 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N,
3606                                                    DAGCombinerInfo &DCI) const {
3607   if (SDValue Folded = foldFreeOpFromSelect(DCI, SDValue(N, 0)))
3608     return Folded;
3609 
3610   SDValue Cond = N->getOperand(0);
3611   if (Cond.getOpcode() != ISD::SETCC)
3612     return SDValue();
3613 
3614   EVT VT = N->getValueType(0);
3615   SDValue LHS = Cond.getOperand(0);
3616   SDValue RHS = Cond.getOperand(1);
3617   SDValue CC = Cond.getOperand(2);
3618 
3619   SDValue True = N->getOperand(1);
3620   SDValue False = N->getOperand(2);
3621 
3622   if (Cond.hasOneUse()) { // TODO: Look for multiple select uses.
3623     SelectionDAG &DAG = DCI.DAG;
3624     if (DAG.isConstantValueOfAnyType(True) &&
3625         !DAG.isConstantValueOfAnyType(False)) {
3626       // Swap cmp + select pair to move constant to false input.
3627       // This will allow using VOPC cndmasks more often.
3628       // select (setcc x, y), k, x -> select (setccinv x, y), x, k
3629 
3630       SDLoc SL(N);
3631       ISD::CondCode NewCC =
3632           getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), LHS.getValueType());
3633 
3634       SDValue NewCond = DAG.getSetCC(SL, Cond.getValueType(), LHS, RHS, NewCC);
3635       return DAG.getNode(ISD::SELECT, SL, VT, NewCond, False, True);
3636     }
3637 
3638     if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) {
3639       SDValue MinMax
3640         = combineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI);
3641       // Revisit this node so we can catch min3/max3/med3 patterns.
3642       //DCI.AddToWorklist(MinMax.getNode());
3643       return MinMax;
3644     }
3645   }
3646 
3647   // There's no reason to not do this if the condition has other uses.
3648   return performCtlz_CttzCombine(SDLoc(N), Cond, True, False, DCI);
3649 }
3650 
3651 static bool isInv2Pi(const APFloat &APF) {
3652   static const APFloat KF16(APFloat::IEEEhalf(), APInt(16, 0x3118));
3653   static const APFloat KF32(APFloat::IEEEsingle(), APInt(32, 0x3e22f983));
3654   static const APFloat KF64(APFloat::IEEEdouble(), APInt(64, 0x3fc45f306dc9c882));
3655 
3656   return APF.bitwiseIsEqual(KF16) ||
3657          APF.bitwiseIsEqual(KF32) ||
3658          APF.bitwiseIsEqual(KF64);
3659 }
3660 
3661 // 0 and 1.0 / (0.5 * pi) do not have inline immmediates, so there is an
3662 // additional cost to negate them.
3663 bool AMDGPUTargetLowering::isConstantCostlierToNegate(SDValue N) const {
3664   if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N)) {
3665     if (C->isZero() && !C->isNegative())
3666       return true;
3667 
3668     if (Subtarget->hasInv2PiInlineImm() && isInv2Pi(C->getValueAPF()))
3669       return true;
3670   }
3671 
3672   return false;
3673 }
3674 
3675 static unsigned inverseMinMax(unsigned Opc) {
3676   switch (Opc) {
3677   case ISD::FMAXNUM:
3678     return ISD::FMINNUM;
3679   case ISD::FMINNUM:
3680     return ISD::FMAXNUM;
3681   case ISD::FMAXNUM_IEEE:
3682     return ISD::FMINNUM_IEEE;
3683   case ISD::FMINNUM_IEEE:
3684     return ISD::FMAXNUM_IEEE;
3685   case AMDGPUISD::FMAX_LEGACY:
3686     return AMDGPUISD::FMIN_LEGACY;
3687   case AMDGPUISD::FMIN_LEGACY:
3688     return  AMDGPUISD::FMAX_LEGACY;
3689   default:
3690     llvm_unreachable("invalid min/max opcode");
3691   }
3692 }
3693 
3694 SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N,
3695                                                  DAGCombinerInfo &DCI) const {
3696   SelectionDAG &DAG = DCI.DAG;
3697   SDValue N0 = N->getOperand(0);
3698   EVT VT = N->getValueType(0);
3699 
3700   unsigned Opc = N0.getOpcode();
3701 
3702   // If the input has multiple uses and we can either fold the negate down, or
3703   // the other uses cannot, give up. This both prevents unprofitable
3704   // transformations and infinite loops: we won't repeatedly try to fold around
3705   // a negate that has no 'good' form.
3706   if (N0.hasOneUse()) {
3707     // This may be able to fold into the source, but at a code size cost. Don't
3708     // fold if the fold into the user is free.
3709     if (allUsesHaveSourceMods(N, 0))
3710       return SDValue();
3711   } else {
3712     if (fnegFoldsIntoOp(Opc) &&
3713         (allUsesHaveSourceMods(N) || !allUsesHaveSourceMods(N0.getNode())))
3714       return SDValue();
3715   }
3716 
3717   SDLoc SL(N);
3718   switch (Opc) {
3719   case ISD::FADD: {
3720     if (!mayIgnoreSignedZero(N0))
3721       return SDValue();
3722 
3723     // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y))
3724     SDValue LHS = N0.getOperand(0);
3725     SDValue RHS = N0.getOperand(1);
3726 
3727     if (LHS.getOpcode() != ISD::FNEG)
3728       LHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
3729     else
3730       LHS = LHS.getOperand(0);
3731 
3732     if (RHS.getOpcode() != ISD::FNEG)
3733       RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3734     else
3735       RHS = RHS.getOperand(0);
3736 
3737     SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags());
3738     if (Res.getOpcode() != ISD::FADD)
3739       return SDValue(); // Op got folded away.
3740     if (!N0.hasOneUse())
3741       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3742     return Res;
3743   }
3744   case ISD::FMUL:
3745   case AMDGPUISD::FMUL_LEGACY: {
3746     // (fneg (fmul x, y)) -> (fmul x, (fneg y))
3747     // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y))
3748     SDValue LHS = N0.getOperand(0);
3749     SDValue RHS = N0.getOperand(1);
3750 
3751     if (LHS.getOpcode() == ISD::FNEG)
3752       LHS = LHS.getOperand(0);
3753     else if (RHS.getOpcode() == ISD::FNEG)
3754       RHS = RHS.getOperand(0);
3755     else
3756       RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3757 
3758     SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags());
3759     if (Res.getOpcode() != Opc)
3760       return SDValue(); // Op got folded away.
3761     if (!N0.hasOneUse())
3762       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3763     return Res;
3764   }
3765   case ISD::FMA:
3766   case ISD::FMAD: {
3767     // TODO: handle llvm.amdgcn.fma.legacy
3768     if (!mayIgnoreSignedZero(N0))
3769       return SDValue();
3770 
3771     // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z))
3772     SDValue LHS = N0.getOperand(0);
3773     SDValue MHS = N0.getOperand(1);
3774     SDValue RHS = N0.getOperand(2);
3775 
3776     if (LHS.getOpcode() == ISD::FNEG)
3777       LHS = LHS.getOperand(0);
3778     else if (MHS.getOpcode() == ISD::FNEG)
3779       MHS = MHS.getOperand(0);
3780     else
3781       MHS = DAG.getNode(ISD::FNEG, SL, VT, MHS);
3782 
3783     if (RHS.getOpcode() != ISD::FNEG)
3784       RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3785     else
3786       RHS = RHS.getOperand(0);
3787 
3788     SDValue Res = DAG.getNode(Opc, SL, VT, LHS, MHS, RHS);
3789     if (Res.getOpcode() != Opc)
3790       return SDValue(); // Op got folded away.
3791     if (!N0.hasOneUse())
3792       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3793     return Res;
3794   }
3795   case ISD::FMAXNUM:
3796   case ISD::FMINNUM:
3797   case ISD::FMAXNUM_IEEE:
3798   case ISD::FMINNUM_IEEE:
3799   case AMDGPUISD::FMAX_LEGACY:
3800   case AMDGPUISD::FMIN_LEGACY: {
3801     // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y)
3802     // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y)
3803     // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y)
3804     // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y)
3805 
3806     SDValue LHS = N0.getOperand(0);
3807     SDValue RHS = N0.getOperand(1);
3808 
3809     // 0 doesn't have a negated inline immediate.
3810     // TODO: This constant check should be generalized to other operations.
3811     if (isConstantCostlierToNegate(RHS))
3812       return SDValue();
3813 
3814     SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
3815     SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3816     unsigned Opposite = inverseMinMax(Opc);
3817 
3818     SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags());
3819     if (Res.getOpcode() != Opposite)
3820       return SDValue(); // Op got folded away.
3821     if (!N0.hasOneUse())
3822       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3823     return Res;
3824   }
3825   case AMDGPUISD::FMED3: {
3826     SDValue Ops[3];
3827     for (unsigned I = 0; I < 3; ++I)
3828       Ops[I] = DAG.getNode(ISD::FNEG, SL, VT, N0->getOperand(I), N0->getFlags());
3829 
3830     SDValue Res = DAG.getNode(AMDGPUISD::FMED3, SL, VT, Ops, N0->getFlags());
3831     if (Res.getOpcode() != AMDGPUISD::FMED3)
3832       return SDValue(); // Op got folded away.
3833 
3834     if (!N0.hasOneUse()) {
3835       SDValue Neg = DAG.getNode(ISD::FNEG, SL, VT, Res);
3836       DAG.ReplaceAllUsesWith(N0, Neg);
3837 
3838       for (SDNode *U : Neg->uses())
3839         DCI.AddToWorklist(U);
3840     }
3841 
3842     return Res;
3843   }
3844   case ISD::FP_EXTEND:
3845   case ISD::FTRUNC:
3846   case ISD::FRINT:
3847   case ISD::FNEARBYINT: // XXX - Should fround be handled?
3848   case ISD::FSIN:
3849   case ISD::FCANONICALIZE:
3850   case AMDGPUISD::RCP:
3851   case AMDGPUISD::RCP_LEGACY:
3852   case AMDGPUISD::RCP_IFLAG:
3853   case AMDGPUISD::SIN_HW: {
3854     SDValue CvtSrc = N0.getOperand(0);
3855     if (CvtSrc.getOpcode() == ISD::FNEG) {
3856       // (fneg (fp_extend (fneg x))) -> (fp_extend x)
3857       // (fneg (rcp (fneg x))) -> (rcp x)
3858       return DAG.getNode(Opc, SL, VT, CvtSrc.getOperand(0));
3859     }
3860 
3861     if (!N0.hasOneUse())
3862       return SDValue();
3863 
3864     // (fneg (fp_extend x)) -> (fp_extend (fneg x))
3865     // (fneg (rcp x)) -> (rcp (fneg x))
3866     SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
3867     return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags());
3868   }
3869   case ISD::FP_ROUND: {
3870     SDValue CvtSrc = N0.getOperand(0);
3871 
3872     if (CvtSrc.getOpcode() == ISD::FNEG) {
3873       // (fneg (fp_round (fneg x))) -> (fp_round x)
3874       return DAG.getNode(ISD::FP_ROUND, SL, VT,
3875                          CvtSrc.getOperand(0), N0.getOperand(1));
3876     }
3877 
3878     if (!N0.hasOneUse())
3879       return SDValue();
3880 
3881     // (fneg (fp_round x)) -> (fp_round (fneg x))
3882     SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
3883     return DAG.getNode(ISD::FP_ROUND, SL, VT, Neg, N0.getOperand(1));
3884   }
3885   case ISD::FP16_TO_FP: {
3886     // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal
3887     // f16, but legalization of f16 fneg ends up pulling it out of the source.
3888     // Put the fneg back as a legal source operation that can be matched later.
3889     SDLoc SL(N);
3890 
3891     SDValue Src = N0.getOperand(0);
3892     EVT SrcVT = Src.getValueType();
3893 
3894     // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000)
3895     SDValue IntFNeg = DAG.getNode(ISD::XOR, SL, SrcVT, Src,
3896                                   DAG.getConstant(0x8000, SL, SrcVT));
3897     return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg);
3898   }
3899   default:
3900     return SDValue();
3901   }
3902 }
3903 
3904 SDValue AMDGPUTargetLowering::performFAbsCombine(SDNode *N,
3905                                                  DAGCombinerInfo &DCI) const {
3906   SelectionDAG &DAG = DCI.DAG;
3907   SDValue N0 = N->getOperand(0);
3908 
3909   if (!N0.hasOneUse())
3910     return SDValue();
3911 
3912   switch (N0.getOpcode()) {
3913   case ISD::FP16_TO_FP: {
3914     assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal");
3915     SDLoc SL(N);
3916     SDValue Src = N0.getOperand(0);
3917     EVT SrcVT = Src.getValueType();
3918 
3919     // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff)
3920     SDValue IntFAbs = DAG.getNode(ISD::AND, SL, SrcVT, Src,
3921                                   DAG.getConstant(0x7fff, SL, SrcVT));
3922     return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs);
3923   }
3924   default:
3925     return SDValue();
3926   }
3927 }
3928 
3929 SDValue AMDGPUTargetLowering::performRcpCombine(SDNode *N,
3930                                                 DAGCombinerInfo &DCI) const {
3931   const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
3932   if (!CFP)
3933     return SDValue();
3934 
3935   // XXX - Should this flush denormals?
3936   const APFloat &Val = CFP->getValueAPF();
3937   APFloat One(Val.getSemantics(), "1.0");
3938   return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0));
3939 }
3940 
3941 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
3942                                                 DAGCombinerInfo &DCI) const {
3943   SelectionDAG &DAG = DCI.DAG;
3944   SDLoc DL(N);
3945 
3946   switch(N->getOpcode()) {
3947   default:
3948     break;
3949   case ISD::BITCAST: {
3950     EVT DestVT = N->getValueType(0);
3951 
3952     // Push casts through vector builds. This helps avoid emitting a large
3953     // number of copies when materializing floating point vector constants.
3954     //
3955     // vNt1 bitcast (vNt0 (build_vector t0:x, t0:y)) =>
3956     //   vnt1 = build_vector (t1 (bitcast t0:x)), (t1 (bitcast t0:y))
3957     if (DestVT.isVector()) {
3958       SDValue Src = N->getOperand(0);
3959       if (Src.getOpcode() == ISD::BUILD_VECTOR) {
3960         EVT SrcVT = Src.getValueType();
3961         unsigned NElts = DestVT.getVectorNumElements();
3962 
3963         if (SrcVT.getVectorNumElements() == NElts) {
3964           EVT DestEltVT = DestVT.getVectorElementType();
3965 
3966           SmallVector<SDValue, 8> CastedElts;
3967           SDLoc SL(N);
3968           for (unsigned I = 0, E = SrcVT.getVectorNumElements(); I != E; ++I) {
3969             SDValue Elt = Src.getOperand(I);
3970             CastedElts.push_back(DAG.getNode(ISD::BITCAST, DL, DestEltVT, Elt));
3971           }
3972 
3973           return DAG.getBuildVector(DestVT, SL, CastedElts);
3974         }
3975       }
3976     }
3977 
3978     if (DestVT.getSizeInBits() != 64 || !DestVT.isVector())
3979       break;
3980 
3981     // Fold bitcasts of constants.
3982     //
3983     // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k)
3984     // TODO: Generalize and move to DAGCombiner
3985     SDValue Src = N->getOperand(0);
3986     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) {
3987       SDLoc SL(N);
3988       uint64_t CVal = C->getZExtValue();
3989       SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
3990                                DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
3991                                DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
3992       return DAG.getNode(ISD::BITCAST, SL, DestVT, BV);
3993     }
3994 
3995     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) {
3996       const APInt &Val = C->getValueAPF().bitcastToAPInt();
3997       SDLoc SL(N);
3998       uint64_t CVal = Val.getZExtValue();
3999       SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
4000                                 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
4001                                 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
4002 
4003       return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec);
4004     }
4005 
4006     break;
4007   }
4008   case ISD::SHL: {
4009     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
4010       break;
4011 
4012     return performShlCombine(N, DCI);
4013   }
4014   case ISD::SRL: {
4015     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
4016       break;
4017 
4018     return performSrlCombine(N, DCI);
4019   }
4020   case ISD::SRA: {
4021     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
4022       break;
4023 
4024     return performSraCombine(N, DCI);
4025   }
4026   case ISD::TRUNCATE:
4027     return performTruncateCombine(N, DCI);
4028   case ISD::MUL:
4029     return performMulCombine(N, DCI);
4030   case ISD::SMUL_LOHI:
4031   case ISD::UMUL_LOHI:
4032     return performMulLoHiCombine(N, DCI);
4033   case ISD::MULHS:
4034     return performMulhsCombine(N, DCI);
4035   case ISD::MULHU:
4036     return performMulhuCombine(N, DCI);
4037   case AMDGPUISD::MUL_I24:
4038   case AMDGPUISD::MUL_U24:
4039   case AMDGPUISD::MULHI_I24:
4040   case AMDGPUISD::MULHI_U24:
4041     return simplifyMul24(N, DCI);
4042   case ISD::SELECT:
4043     return performSelectCombine(N, DCI);
4044   case ISD::FNEG:
4045     return performFNegCombine(N, DCI);
4046   case ISD::FABS:
4047     return performFAbsCombine(N, DCI);
4048   case AMDGPUISD::BFE_I32:
4049   case AMDGPUISD::BFE_U32: {
4050     assert(!N->getValueType(0).isVector() &&
4051            "Vector handling of BFE not implemented");
4052     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
4053     if (!Width)
4054       break;
4055 
4056     uint32_t WidthVal = Width->getZExtValue() & 0x1f;
4057     if (WidthVal == 0)
4058       return DAG.getConstant(0, DL, MVT::i32);
4059 
4060     ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
4061     if (!Offset)
4062       break;
4063 
4064     SDValue BitsFrom = N->getOperand(0);
4065     uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
4066 
4067     bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
4068 
4069     if (OffsetVal == 0) {
4070       // This is already sign / zero extended, so try to fold away extra BFEs.
4071       unsigned SignBits =  Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
4072 
4073       unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
4074       if (OpSignBits >= SignBits)
4075         return BitsFrom;
4076 
4077       EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
4078       if (Signed) {
4079         // This is a sign_extend_inreg. Replace it to take advantage of existing
4080         // DAG Combines. If not eliminated, we will match back to BFE during
4081         // selection.
4082 
4083         // TODO: The sext_inreg of extended types ends, although we can could
4084         // handle them in a single BFE.
4085         return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
4086                            DAG.getValueType(SmallVT));
4087       }
4088 
4089       return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
4090     }
4091 
4092     if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) {
4093       if (Signed) {
4094         return constantFoldBFE<int32_t>(DAG,
4095                                         CVal->getSExtValue(),
4096                                         OffsetVal,
4097                                         WidthVal,
4098                                         DL);
4099       }
4100 
4101       return constantFoldBFE<uint32_t>(DAG,
4102                                        CVal->getZExtValue(),
4103                                        OffsetVal,
4104                                        WidthVal,
4105                                        DL);
4106     }
4107 
4108     if ((OffsetVal + WidthVal) >= 32 &&
4109         !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) {
4110       SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32);
4111       return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
4112                          BitsFrom, ShiftVal);
4113     }
4114 
4115     if (BitsFrom.hasOneUse()) {
4116       APInt Demanded = APInt::getBitsSet(32,
4117                                          OffsetVal,
4118                                          OffsetVal + WidthVal);
4119 
4120       KnownBits Known;
4121       TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
4122                                             !DCI.isBeforeLegalizeOps());
4123       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4124       if (TLI.ShrinkDemandedConstant(BitsFrom, Demanded, TLO) ||
4125           TLI.SimplifyDemandedBits(BitsFrom, Demanded, Known, TLO)) {
4126         DCI.CommitTargetLoweringOpt(TLO);
4127       }
4128     }
4129 
4130     break;
4131   }
4132   case ISD::LOAD:
4133     return performLoadCombine(N, DCI);
4134   case ISD::STORE:
4135     return performStoreCombine(N, DCI);
4136   case AMDGPUISD::RCP:
4137   case AMDGPUISD::RCP_IFLAG:
4138     return performRcpCombine(N, DCI);
4139   case ISD::AssertZext:
4140   case ISD::AssertSext:
4141     return performAssertSZExtCombine(N, DCI);
4142   case ISD::INTRINSIC_WO_CHAIN:
4143     return performIntrinsicWOChainCombine(N, DCI);
4144   }
4145   return SDValue();
4146 }
4147 
4148 //===----------------------------------------------------------------------===//
4149 // Helper functions
4150 //===----------------------------------------------------------------------===//
4151 
4152 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
4153                                                    const TargetRegisterClass *RC,
4154                                                    Register Reg, EVT VT,
4155                                                    const SDLoc &SL,
4156                                                    bool RawReg) const {
4157   MachineFunction &MF = DAG.getMachineFunction();
4158   MachineRegisterInfo &MRI = MF.getRegInfo();
4159   Register VReg;
4160 
4161   if (!MRI.isLiveIn(Reg)) {
4162     VReg = MRI.createVirtualRegister(RC);
4163     MRI.addLiveIn(Reg, VReg);
4164   } else {
4165     VReg = MRI.getLiveInVirtReg(Reg);
4166   }
4167 
4168   if (RawReg)
4169     return DAG.getRegister(VReg, VT);
4170 
4171   return DAG.getCopyFromReg(DAG.getEntryNode(), SL, VReg, VT);
4172 }
4173 
4174 // This may be called multiple times, and nothing prevents creating multiple
4175 // objects at the same offset. See if we already defined this object.
4176 static int getOrCreateFixedStackObject(MachineFrameInfo &MFI, unsigned Size,
4177                                        int64_t Offset) {
4178   for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) {
4179     if (MFI.getObjectOffset(I) == Offset) {
4180       assert(MFI.getObjectSize(I) == Size);
4181       return I;
4182     }
4183   }
4184 
4185   return MFI.CreateFixedObject(Size, Offset, true);
4186 }
4187 
4188 SDValue AMDGPUTargetLowering::loadStackInputValue(SelectionDAG &DAG,
4189                                                   EVT VT,
4190                                                   const SDLoc &SL,
4191                                                   int64_t Offset) const {
4192   MachineFunction &MF = DAG.getMachineFunction();
4193   MachineFrameInfo &MFI = MF.getFrameInfo();
4194   int FI = getOrCreateFixedStackObject(MFI, VT.getStoreSize(), Offset);
4195 
4196   auto SrcPtrInfo = MachinePointerInfo::getStack(MF, Offset);
4197   SDValue Ptr = DAG.getFrameIndex(FI, MVT::i32);
4198 
4199   return DAG.getLoad(VT, SL, DAG.getEntryNode(), Ptr, SrcPtrInfo, Align(4),
4200                      MachineMemOperand::MODereferenceable |
4201                          MachineMemOperand::MOInvariant);
4202 }
4203 
4204 SDValue AMDGPUTargetLowering::storeStackInputValue(SelectionDAG &DAG,
4205                                                    const SDLoc &SL,
4206                                                    SDValue Chain,
4207                                                    SDValue ArgVal,
4208                                                    int64_t Offset) const {
4209   MachineFunction &MF = DAG.getMachineFunction();
4210   MachinePointerInfo DstInfo = MachinePointerInfo::getStack(MF, Offset);
4211   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4212 
4213   SDValue Ptr = DAG.getConstant(Offset, SL, MVT::i32);
4214   // Stores to the argument stack area are relative to the stack pointer.
4215   SDValue SP =
4216       DAG.getCopyFromReg(Chain, SL, Info->getStackPtrOffsetReg(), MVT::i32);
4217   Ptr = DAG.getNode(ISD::ADD, SL, MVT::i32, SP, Ptr);
4218   SDValue Store = DAG.getStore(Chain, SL, ArgVal, Ptr, DstInfo, Align(4),
4219                                MachineMemOperand::MODereferenceable);
4220   return Store;
4221 }
4222 
4223 SDValue AMDGPUTargetLowering::loadInputValue(SelectionDAG &DAG,
4224                                              const TargetRegisterClass *RC,
4225                                              EVT VT, const SDLoc &SL,
4226                                              const ArgDescriptor &Arg) const {
4227   assert(Arg && "Attempting to load missing argument");
4228 
4229   SDValue V = Arg.isRegister() ?
4230     CreateLiveInRegister(DAG, RC, Arg.getRegister(), VT, SL) :
4231     loadStackInputValue(DAG, VT, SL, Arg.getStackOffset());
4232 
4233   if (!Arg.isMasked())
4234     return V;
4235 
4236   unsigned Mask = Arg.getMask();
4237   unsigned Shift = countTrailingZeros<unsigned>(Mask);
4238   V = DAG.getNode(ISD::SRL, SL, VT, V,
4239                   DAG.getShiftAmountConstant(Shift, VT, SL));
4240   return DAG.getNode(ISD::AND, SL, VT, V,
4241                      DAG.getConstant(Mask >> Shift, SL, VT));
4242 }
4243 
4244 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset(
4245     const MachineFunction &MF, const ImplicitParameter Param) const {
4246   const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
4247   const AMDGPUSubtarget &ST =
4248       AMDGPUSubtarget::get(getTargetMachine(), MF.getFunction());
4249   unsigned ExplicitArgOffset = ST.getExplicitKernelArgOffset(MF.getFunction());
4250   const Align Alignment = ST.getAlignmentForImplicitArgPtr();
4251   uint64_t ArgOffset = alignTo(MFI->getExplicitKernArgSize(), Alignment) +
4252                        ExplicitArgOffset;
4253   switch (Param) {
4254   case FIRST_IMPLICIT:
4255     return ArgOffset;
4256   case PRIVATE_BASE:
4257     return ArgOffset + AMDGPU::ImplicitArg::PRIVATE_BASE_OFFSET;
4258   case SHARED_BASE:
4259     return ArgOffset + AMDGPU::ImplicitArg::SHARED_BASE_OFFSET;
4260   case QUEUE_PTR:
4261     return ArgOffset + AMDGPU::ImplicitArg::QUEUE_PTR_OFFSET;
4262   }
4263   llvm_unreachable("unexpected implicit parameter type");
4264 }
4265 
4266 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
4267 
4268 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
4269   switch ((AMDGPUISD::NodeType)Opcode) {
4270   case AMDGPUISD::FIRST_NUMBER: break;
4271   // AMDIL DAG nodes
4272   NODE_NAME_CASE(UMUL);
4273   NODE_NAME_CASE(BRANCH_COND);
4274 
4275   // AMDGPU DAG nodes
4276   NODE_NAME_CASE(IF)
4277   NODE_NAME_CASE(ELSE)
4278   NODE_NAME_CASE(LOOP)
4279   NODE_NAME_CASE(CALL)
4280   NODE_NAME_CASE(TC_RETURN)
4281   NODE_NAME_CASE(TRAP)
4282   NODE_NAME_CASE(RET_FLAG)
4283   NODE_NAME_CASE(RETURN_TO_EPILOG)
4284   NODE_NAME_CASE(ENDPGM)
4285   NODE_NAME_CASE(DWORDADDR)
4286   NODE_NAME_CASE(FRACT)
4287   NODE_NAME_CASE(SETCC)
4288   NODE_NAME_CASE(SETREG)
4289   NODE_NAME_CASE(DENORM_MODE)
4290   NODE_NAME_CASE(FMA_W_CHAIN)
4291   NODE_NAME_CASE(FMUL_W_CHAIN)
4292   NODE_NAME_CASE(CLAMP)
4293   NODE_NAME_CASE(COS_HW)
4294   NODE_NAME_CASE(SIN_HW)
4295   NODE_NAME_CASE(FMAX_LEGACY)
4296   NODE_NAME_CASE(FMIN_LEGACY)
4297   NODE_NAME_CASE(FMAX3)
4298   NODE_NAME_CASE(SMAX3)
4299   NODE_NAME_CASE(UMAX3)
4300   NODE_NAME_CASE(FMIN3)
4301   NODE_NAME_CASE(SMIN3)
4302   NODE_NAME_CASE(UMIN3)
4303   NODE_NAME_CASE(FMED3)
4304   NODE_NAME_CASE(SMED3)
4305   NODE_NAME_CASE(UMED3)
4306   NODE_NAME_CASE(FDOT2)
4307   NODE_NAME_CASE(URECIP)
4308   NODE_NAME_CASE(DIV_SCALE)
4309   NODE_NAME_CASE(DIV_FMAS)
4310   NODE_NAME_CASE(DIV_FIXUP)
4311   NODE_NAME_CASE(FMAD_FTZ)
4312   NODE_NAME_CASE(RCP)
4313   NODE_NAME_CASE(RSQ)
4314   NODE_NAME_CASE(RCP_LEGACY)
4315   NODE_NAME_CASE(RCP_IFLAG)
4316   NODE_NAME_CASE(FMUL_LEGACY)
4317   NODE_NAME_CASE(RSQ_CLAMP)
4318   NODE_NAME_CASE(LDEXP)
4319   NODE_NAME_CASE(FP_CLASS)
4320   NODE_NAME_CASE(DOT4)
4321   NODE_NAME_CASE(CARRY)
4322   NODE_NAME_CASE(BORROW)
4323   NODE_NAME_CASE(BFE_U32)
4324   NODE_NAME_CASE(BFE_I32)
4325   NODE_NAME_CASE(BFI)
4326   NODE_NAME_CASE(BFM)
4327   NODE_NAME_CASE(FFBH_U32)
4328   NODE_NAME_CASE(FFBH_I32)
4329   NODE_NAME_CASE(FFBL_B32)
4330   NODE_NAME_CASE(MUL_U24)
4331   NODE_NAME_CASE(MUL_I24)
4332   NODE_NAME_CASE(MULHI_U24)
4333   NODE_NAME_CASE(MULHI_I24)
4334   NODE_NAME_CASE(MAD_U24)
4335   NODE_NAME_CASE(MAD_I24)
4336   NODE_NAME_CASE(MAD_I64_I32)
4337   NODE_NAME_CASE(MAD_U64_U32)
4338   NODE_NAME_CASE(PERM)
4339   NODE_NAME_CASE(TEXTURE_FETCH)
4340   NODE_NAME_CASE(R600_EXPORT)
4341   NODE_NAME_CASE(CONST_ADDRESS)
4342   NODE_NAME_CASE(REGISTER_LOAD)
4343   NODE_NAME_CASE(REGISTER_STORE)
4344   NODE_NAME_CASE(SAMPLE)
4345   NODE_NAME_CASE(SAMPLEB)
4346   NODE_NAME_CASE(SAMPLED)
4347   NODE_NAME_CASE(SAMPLEL)
4348   NODE_NAME_CASE(CVT_F32_UBYTE0)
4349   NODE_NAME_CASE(CVT_F32_UBYTE1)
4350   NODE_NAME_CASE(CVT_F32_UBYTE2)
4351   NODE_NAME_CASE(CVT_F32_UBYTE3)
4352   NODE_NAME_CASE(CVT_PKRTZ_F16_F32)
4353   NODE_NAME_CASE(CVT_PKNORM_I16_F32)
4354   NODE_NAME_CASE(CVT_PKNORM_U16_F32)
4355   NODE_NAME_CASE(CVT_PK_I16_I32)
4356   NODE_NAME_CASE(CVT_PK_U16_U32)
4357   NODE_NAME_CASE(FP_TO_FP16)
4358   NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
4359   NODE_NAME_CASE(CONST_DATA_PTR)
4360   NODE_NAME_CASE(PC_ADD_REL_OFFSET)
4361   NODE_NAME_CASE(LDS)
4362   NODE_NAME_CASE(FPTRUNC_ROUND_UPWARD)
4363   NODE_NAME_CASE(FPTRUNC_ROUND_DOWNWARD)
4364   NODE_NAME_CASE(DUMMY_CHAIN)
4365   case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
4366   NODE_NAME_CASE(LOAD_D16_HI)
4367   NODE_NAME_CASE(LOAD_D16_LO)
4368   NODE_NAME_CASE(LOAD_D16_HI_I8)
4369   NODE_NAME_CASE(LOAD_D16_HI_U8)
4370   NODE_NAME_CASE(LOAD_D16_LO_I8)
4371   NODE_NAME_CASE(LOAD_D16_LO_U8)
4372   NODE_NAME_CASE(STORE_MSKOR)
4373   NODE_NAME_CASE(LOAD_CONSTANT)
4374   NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
4375   NODE_NAME_CASE(TBUFFER_STORE_FORMAT_D16)
4376   NODE_NAME_CASE(TBUFFER_LOAD_FORMAT)
4377   NODE_NAME_CASE(TBUFFER_LOAD_FORMAT_D16)
4378   NODE_NAME_CASE(DS_ORDERED_COUNT)
4379   NODE_NAME_CASE(ATOMIC_CMP_SWAP)
4380   NODE_NAME_CASE(ATOMIC_INC)
4381   NODE_NAME_CASE(ATOMIC_DEC)
4382   NODE_NAME_CASE(ATOMIC_LOAD_FMIN)
4383   NODE_NAME_CASE(ATOMIC_LOAD_FMAX)
4384   NODE_NAME_CASE(BUFFER_LOAD)
4385   NODE_NAME_CASE(BUFFER_LOAD_UBYTE)
4386   NODE_NAME_CASE(BUFFER_LOAD_USHORT)
4387   NODE_NAME_CASE(BUFFER_LOAD_BYTE)
4388   NODE_NAME_CASE(BUFFER_LOAD_SHORT)
4389   NODE_NAME_CASE(BUFFER_LOAD_FORMAT)
4390   NODE_NAME_CASE(BUFFER_LOAD_FORMAT_D16)
4391   NODE_NAME_CASE(SBUFFER_LOAD)
4392   NODE_NAME_CASE(BUFFER_STORE)
4393   NODE_NAME_CASE(BUFFER_STORE_BYTE)
4394   NODE_NAME_CASE(BUFFER_STORE_SHORT)
4395   NODE_NAME_CASE(BUFFER_STORE_FORMAT)
4396   NODE_NAME_CASE(BUFFER_STORE_FORMAT_D16)
4397   NODE_NAME_CASE(BUFFER_ATOMIC_SWAP)
4398   NODE_NAME_CASE(BUFFER_ATOMIC_ADD)
4399   NODE_NAME_CASE(BUFFER_ATOMIC_SUB)
4400   NODE_NAME_CASE(BUFFER_ATOMIC_SMIN)
4401   NODE_NAME_CASE(BUFFER_ATOMIC_UMIN)
4402   NODE_NAME_CASE(BUFFER_ATOMIC_SMAX)
4403   NODE_NAME_CASE(BUFFER_ATOMIC_UMAX)
4404   NODE_NAME_CASE(BUFFER_ATOMIC_AND)
4405   NODE_NAME_CASE(BUFFER_ATOMIC_OR)
4406   NODE_NAME_CASE(BUFFER_ATOMIC_XOR)
4407   NODE_NAME_CASE(BUFFER_ATOMIC_INC)
4408   NODE_NAME_CASE(BUFFER_ATOMIC_DEC)
4409   NODE_NAME_CASE(BUFFER_ATOMIC_CMPSWAP)
4410   NODE_NAME_CASE(BUFFER_ATOMIC_CSUB)
4411   NODE_NAME_CASE(BUFFER_ATOMIC_FADD)
4412   NODE_NAME_CASE(BUFFER_ATOMIC_FMIN)
4413   NODE_NAME_CASE(BUFFER_ATOMIC_FMAX)
4414 
4415   case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break;
4416   }
4417   return nullptr;
4418 }
4419 
4420 SDValue AMDGPUTargetLowering::getSqrtEstimate(SDValue Operand,
4421                                               SelectionDAG &DAG, int Enabled,
4422                                               int &RefinementSteps,
4423                                               bool &UseOneConstNR,
4424                                               bool Reciprocal) const {
4425   EVT VT = Operand.getValueType();
4426 
4427   if (VT == MVT::f32) {
4428     RefinementSteps = 0;
4429     return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand);
4430   }
4431 
4432   // TODO: There is also f64 rsq instruction, but the documentation is less
4433   // clear on its precision.
4434 
4435   return SDValue();
4436 }
4437 
4438 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand,
4439                                                SelectionDAG &DAG, int Enabled,
4440                                                int &RefinementSteps) const {
4441   EVT VT = Operand.getValueType();
4442 
4443   if (VT == MVT::f32) {
4444     // Reciprocal, < 1 ulp error.
4445     //
4446     // This reciprocal approximation converges to < 0.5 ulp error with one
4447     // newton rhapson performed with two fused multiple adds (FMAs).
4448 
4449     RefinementSteps = 0;
4450     return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand);
4451   }
4452 
4453   // TODO: There is also f64 rcp instruction, but the documentation is less
4454   // clear on its precision.
4455 
4456   return SDValue();
4457 }
4458 
4459 static unsigned workitemIntrinsicDim(unsigned ID) {
4460   switch (ID) {
4461   case Intrinsic::amdgcn_workitem_id_x:
4462     return 0;
4463   case Intrinsic::amdgcn_workitem_id_y:
4464     return 1;
4465   case Intrinsic::amdgcn_workitem_id_z:
4466     return 2;
4467   default:
4468     llvm_unreachable("not a workitem intrinsic");
4469   }
4470 }
4471 
4472 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
4473     const SDValue Op, KnownBits &Known,
4474     const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const {
4475 
4476   Known.resetAll(); // Don't know anything.
4477 
4478   unsigned Opc = Op.getOpcode();
4479 
4480   switch (Opc) {
4481   default:
4482     break;
4483   case AMDGPUISD::CARRY:
4484   case AMDGPUISD::BORROW: {
4485     Known.Zero = APInt::getHighBitsSet(32, 31);
4486     break;
4487   }
4488 
4489   case AMDGPUISD::BFE_I32:
4490   case AMDGPUISD::BFE_U32: {
4491     ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4492     if (!CWidth)
4493       return;
4494 
4495     uint32_t Width = CWidth->getZExtValue() & 0x1f;
4496 
4497     if (Opc == AMDGPUISD::BFE_U32)
4498       Known.Zero = APInt::getHighBitsSet(32, 32 - Width);
4499 
4500     break;
4501   }
4502   case AMDGPUISD::FP_TO_FP16: {
4503     unsigned BitWidth = Known.getBitWidth();
4504 
4505     // High bits are zero.
4506     Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
4507     break;
4508   }
4509   case AMDGPUISD::MUL_U24:
4510   case AMDGPUISD::MUL_I24: {
4511     KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
4512     KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
4513     unsigned TrailZ = LHSKnown.countMinTrailingZeros() +
4514                       RHSKnown.countMinTrailingZeros();
4515     Known.Zero.setLowBits(std::min(TrailZ, 32u));
4516     // Skip extra check if all bits are known zeros.
4517     if (TrailZ >= 32)
4518       break;
4519 
4520     // Truncate to 24 bits.
4521     LHSKnown = LHSKnown.trunc(24);
4522     RHSKnown = RHSKnown.trunc(24);
4523 
4524     if (Opc == AMDGPUISD::MUL_I24) {
4525       unsigned LHSValBits = LHSKnown.countMaxSignificantBits();
4526       unsigned RHSValBits = RHSKnown.countMaxSignificantBits();
4527       unsigned MaxValBits = LHSValBits + RHSValBits;
4528       if (MaxValBits > 32)
4529         break;
4530       unsigned SignBits = 32 - MaxValBits + 1;
4531       bool LHSNegative = LHSKnown.isNegative();
4532       bool LHSNonNegative = LHSKnown.isNonNegative();
4533       bool LHSPositive = LHSKnown.isStrictlyPositive();
4534       bool RHSNegative = RHSKnown.isNegative();
4535       bool RHSNonNegative = RHSKnown.isNonNegative();
4536       bool RHSPositive = RHSKnown.isStrictlyPositive();
4537 
4538       if ((LHSNonNegative && RHSNonNegative) || (LHSNegative && RHSNegative))
4539         Known.Zero.setHighBits(SignBits);
4540       else if ((LHSNegative && RHSPositive) || (LHSPositive && RHSNegative))
4541         Known.One.setHighBits(SignBits);
4542     } else {
4543       unsigned LHSValBits = LHSKnown.countMaxActiveBits();
4544       unsigned RHSValBits = RHSKnown.countMaxActiveBits();
4545       unsigned MaxValBits = LHSValBits + RHSValBits;
4546       if (MaxValBits >= 32)
4547         break;
4548       Known.Zero.setBitsFrom(MaxValBits);
4549     }
4550     break;
4551   }
4552   case AMDGPUISD::PERM: {
4553     ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4554     if (!CMask)
4555       return;
4556 
4557     KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
4558     KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
4559     unsigned Sel = CMask->getZExtValue();
4560 
4561     for (unsigned I = 0; I < 32; I += 8) {
4562       unsigned SelBits = Sel & 0xff;
4563       if (SelBits < 4) {
4564         SelBits *= 8;
4565         Known.One |= ((RHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
4566         Known.Zero |= ((RHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
4567       } else if (SelBits < 7) {
4568         SelBits = (SelBits & 3) * 8;
4569         Known.One |= ((LHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
4570         Known.Zero |= ((LHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
4571       } else if (SelBits == 0x0c) {
4572         Known.Zero |= 0xFFull << I;
4573       } else if (SelBits > 0x0c) {
4574         Known.One |= 0xFFull << I;
4575       }
4576       Sel >>= 8;
4577     }
4578     break;
4579   }
4580   case AMDGPUISD::BUFFER_LOAD_UBYTE:  {
4581     Known.Zero.setHighBits(24);
4582     break;
4583   }
4584   case AMDGPUISD::BUFFER_LOAD_USHORT: {
4585     Known.Zero.setHighBits(16);
4586     break;
4587   }
4588   case AMDGPUISD::LDS: {
4589     auto GA = cast<GlobalAddressSDNode>(Op.getOperand(0).getNode());
4590     Align Alignment = GA->getGlobal()->getPointerAlignment(DAG.getDataLayout());
4591 
4592     Known.Zero.setHighBits(16);
4593     Known.Zero.setLowBits(Log2(Alignment));
4594     break;
4595   }
4596   case ISD::INTRINSIC_WO_CHAIN: {
4597     unsigned IID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4598     switch (IID) {
4599     case Intrinsic::amdgcn_mbcnt_lo:
4600     case Intrinsic::amdgcn_mbcnt_hi: {
4601       const GCNSubtarget &ST =
4602           DAG.getMachineFunction().getSubtarget<GCNSubtarget>();
4603       // These return at most the (wavefront size - 1) + src1
4604       // As long as src1 is an immediate we can calc known bits
4605       KnownBits Src1Known = DAG.computeKnownBits(Op.getOperand(2), Depth + 1);
4606       unsigned Src1ValBits = Src1Known.countMaxActiveBits();
4607       unsigned MaxActiveBits = std::max(Src1ValBits, ST.getWavefrontSizeLog2());
4608       // Cater for potential carry
4609       MaxActiveBits += Src1ValBits ? 1 : 0;
4610       unsigned Size = Op.getValueType().getSizeInBits();
4611       if (MaxActiveBits < Size)
4612         Known.Zero.setHighBits(Size - MaxActiveBits);
4613       break;
4614     }
4615     case Intrinsic::amdgcn_workitem_id_x:
4616     case Intrinsic::amdgcn_workitem_id_y:
4617     case Intrinsic::amdgcn_workitem_id_z: {
4618       unsigned MaxValue = Subtarget->getMaxWorkitemID(
4619           DAG.getMachineFunction().getFunction(), workitemIntrinsicDim(IID));
4620       Known.Zero.setHighBits(countLeadingZeros(MaxValue));
4621       break;
4622     }
4623     default:
4624       break;
4625     }
4626   }
4627   }
4628 }
4629 
4630 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
4631     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
4632     unsigned Depth) const {
4633   switch (Op.getOpcode()) {
4634   case AMDGPUISD::BFE_I32: {
4635     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4636     if (!Width)
4637       return 1;
4638 
4639     unsigned SignBits = 32 - Width->getZExtValue() + 1;
4640     if (!isNullConstant(Op.getOperand(1)))
4641       return SignBits;
4642 
4643     // TODO: Could probably figure something out with non-0 offsets.
4644     unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4645     return std::max(SignBits, Op0SignBits);
4646   }
4647 
4648   case AMDGPUISD::BFE_U32: {
4649     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4650     return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
4651   }
4652 
4653   case AMDGPUISD::CARRY:
4654   case AMDGPUISD::BORROW:
4655     return 31;
4656   case AMDGPUISD::BUFFER_LOAD_BYTE:
4657     return 25;
4658   case AMDGPUISD::BUFFER_LOAD_SHORT:
4659     return 17;
4660   case AMDGPUISD::BUFFER_LOAD_UBYTE:
4661     return 24;
4662   case AMDGPUISD::BUFFER_LOAD_USHORT:
4663     return 16;
4664   case AMDGPUISD::FP_TO_FP16:
4665     return 16;
4666   default:
4667     return 1;
4668   }
4669 }
4670 
4671 unsigned AMDGPUTargetLowering::computeNumSignBitsForTargetInstr(
4672   GISelKnownBits &Analysis, Register R,
4673   const APInt &DemandedElts, const MachineRegisterInfo &MRI,
4674   unsigned Depth) const {
4675   const MachineInstr *MI = MRI.getVRegDef(R);
4676   if (!MI)
4677     return 1;
4678 
4679   // TODO: Check range metadata on MMO.
4680   switch (MI->getOpcode()) {
4681   case AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE:
4682     return 25;
4683   case AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT:
4684     return 17;
4685   case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE:
4686     return 24;
4687   case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT:
4688     return 16;
4689   default:
4690     return 1;
4691   }
4692 }
4693 
4694 bool AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
4695                                                         const SelectionDAG &DAG,
4696                                                         bool SNaN,
4697                                                         unsigned Depth) const {
4698   unsigned Opcode = Op.getOpcode();
4699   switch (Opcode) {
4700   case AMDGPUISD::FMIN_LEGACY:
4701   case AMDGPUISD::FMAX_LEGACY: {
4702     if (SNaN)
4703       return true;
4704 
4705     // TODO: Can check no nans on one of the operands for each one, but which
4706     // one?
4707     return false;
4708   }
4709   case AMDGPUISD::FMUL_LEGACY:
4710   case AMDGPUISD::CVT_PKRTZ_F16_F32: {
4711     if (SNaN)
4712       return true;
4713     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4714            DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4715   }
4716   case AMDGPUISD::FMED3:
4717   case AMDGPUISD::FMIN3:
4718   case AMDGPUISD::FMAX3:
4719   case AMDGPUISD::FMAD_FTZ: {
4720     if (SNaN)
4721       return true;
4722     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4723            DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4724            DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4725   }
4726   case AMDGPUISD::CVT_F32_UBYTE0:
4727   case AMDGPUISD::CVT_F32_UBYTE1:
4728   case AMDGPUISD::CVT_F32_UBYTE2:
4729   case AMDGPUISD::CVT_F32_UBYTE3:
4730     return true;
4731 
4732   case AMDGPUISD::RCP:
4733   case AMDGPUISD::RSQ:
4734   case AMDGPUISD::RCP_LEGACY:
4735   case AMDGPUISD::RSQ_CLAMP: {
4736     if (SNaN)
4737       return true;
4738 
4739     // TODO: Need is known positive check.
4740     return false;
4741   }
4742   case AMDGPUISD::LDEXP:
4743   case AMDGPUISD::FRACT: {
4744     if (SNaN)
4745       return true;
4746     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4747   }
4748   case AMDGPUISD::DIV_SCALE:
4749   case AMDGPUISD::DIV_FMAS:
4750   case AMDGPUISD::DIV_FIXUP:
4751     // TODO: Refine on operands.
4752     return SNaN;
4753   case AMDGPUISD::SIN_HW:
4754   case AMDGPUISD::COS_HW: {
4755     // TODO: Need check for infinity
4756     return SNaN;
4757   }
4758   case ISD::INTRINSIC_WO_CHAIN: {
4759     unsigned IntrinsicID
4760       = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4761     // TODO: Handle more intrinsics
4762     switch (IntrinsicID) {
4763     case Intrinsic::amdgcn_cubeid:
4764       return true;
4765 
4766     case Intrinsic::amdgcn_frexp_mant: {
4767       if (SNaN)
4768         return true;
4769       return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4770     }
4771     case Intrinsic::amdgcn_cvt_pkrtz: {
4772       if (SNaN)
4773         return true;
4774       return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4775              DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4776     }
4777     case Intrinsic::amdgcn_rcp:
4778     case Intrinsic::amdgcn_rsq:
4779     case Intrinsic::amdgcn_rcp_legacy:
4780     case Intrinsic::amdgcn_rsq_legacy:
4781     case Intrinsic::amdgcn_rsq_clamp: {
4782       if (SNaN)
4783         return true;
4784 
4785       // TODO: Need is known positive check.
4786       return false;
4787     }
4788     case Intrinsic::amdgcn_trig_preop:
4789     case Intrinsic::amdgcn_fdot2:
4790       // TODO: Refine on operand
4791       return SNaN;
4792     case Intrinsic::amdgcn_fma_legacy:
4793       if (SNaN)
4794         return true;
4795       return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4796              DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1) &&
4797              DAG.isKnownNeverNaN(Op.getOperand(3), SNaN, Depth + 1);
4798     default:
4799       return false;
4800     }
4801   }
4802   default:
4803     return false;
4804   }
4805 }
4806 
4807 TargetLowering::AtomicExpansionKind
4808 AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
4809   switch (RMW->getOperation()) {
4810   case AtomicRMWInst::Nand:
4811   case AtomicRMWInst::FAdd:
4812   case AtomicRMWInst::FSub:
4813   case AtomicRMWInst::FMax:
4814   case AtomicRMWInst::FMin:
4815     return AtomicExpansionKind::CmpXChg;
4816   default:
4817     return AtomicExpansionKind::None;
4818   }
4819 }
4820 
4821 bool AMDGPUTargetLowering::isConstantUnsignedBitfieldExtractLegal(
4822     unsigned Opc, LLT Ty1, LLT Ty2) const {
4823   return (Ty1 == LLT::scalar(32) || Ty1 == LLT::scalar(64)) &&
4824          Ty2 == LLT::scalar(32);
4825 }
4826