1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This is the parent TargetLowering class for hardware code gen
11 /// targets.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "AMDGPUISelLowering.h"
16 #include "AMDGPU.h"
17 #include "AMDGPUCallLowering.h"
18 #include "AMDGPUFrameLowering.h"
19 #include "AMDGPUSubtarget.h"
20 #include "AMDGPUTargetMachine.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 #include "R600MachineFunctionInfo.h"
23 #include "SIInstrInfo.h"
24 #include "SIMachineFunctionInfo.h"
25 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
26 #include "llvm/CodeGen/Analysis.h"
27 #include "llvm/CodeGen/CallingConvLower.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/SelectionDAG.h"
31 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/DiagnosticInfo.h"
34 #include "llvm/Support/KnownBits.h"
35 #include "llvm/Support/MathExtras.h"
36 using namespace llvm;
37
38 #include "AMDGPUGenCallingConv.inc"
39
40 static cl::opt<bool> AMDGPUBypassSlowDiv(
41 "amdgpu-bypass-slow-div",
42 cl::desc("Skip 64-bit divide for dynamic 32-bit values"),
43 cl::init(true));
44
45 // Find a larger type to do a load / store of a vector with.
getEquivalentMemType(LLVMContext & Ctx,EVT VT)46 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
47 unsigned StoreSize = VT.getStoreSizeInBits();
48 if (StoreSize <= 32)
49 return EVT::getIntegerVT(Ctx, StoreSize);
50
51 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
52 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
53 }
54
numBitsUnsigned(SDValue Op,SelectionDAG & DAG)55 unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) {
56 EVT VT = Op.getValueType();
57 KnownBits Known = DAG.computeKnownBits(Op);
58 return VT.getSizeInBits() - Known.countMinLeadingZeros();
59 }
60
numBitsSigned(SDValue Op,SelectionDAG & DAG)61 unsigned AMDGPUTargetLowering::numBitsSigned(SDValue Op, SelectionDAG &DAG) {
62 EVT VT = Op.getValueType();
63
64 // In order for this to be a signed 24-bit value, bit 23, must
65 // be a sign bit.
66 return VT.getSizeInBits() - DAG.ComputeNumSignBits(Op);
67 }
68
AMDGPUTargetLowering(const TargetMachine & TM,const AMDGPUSubtarget & STI)69 AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
70 const AMDGPUSubtarget &STI)
71 : TargetLowering(TM), Subtarget(&STI) {
72 // Lower floating point store/load to integer store/load to reduce the number
73 // of patterns in tablegen.
74 setOperationAction(ISD::LOAD, MVT::f32, Promote);
75 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
76
77 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
78 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
79
80 setOperationAction(ISD::LOAD, MVT::v3f32, Promote);
81 AddPromotedToType(ISD::LOAD, MVT::v3f32, MVT::v3i32);
82
83 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
84 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
85
86 setOperationAction(ISD::LOAD, MVT::v5f32, Promote);
87 AddPromotedToType(ISD::LOAD, MVT::v5f32, MVT::v5i32);
88
89 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
90 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
91
92 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
93 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
94
95 setOperationAction(ISD::LOAD, MVT::v32f32, Promote);
96 AddPromotedToType(ISD::LOAD, MVT::v32f32, MVT::v32i32);
97
98 setOperationAction(ISD::LOAD, MVT::i64, Promote);
99 AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
100
101 setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
102 AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32);
103
104 setOperationAction(ISD::LOAD, MVT::f64, Promote);
105 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::v2i32);
106
107 setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
108 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32);
109
110 setOperationAction(ISD::LOAD, MVT::v4i64, Promote);
111 AddPromotedToType(ISD::LOAD, MVT::v4i64, MVT::v8i32);
112
113 setOperationAction(ISD::LOAD, MVT::v4f64, Promote);
114 AddPromotedToType(ISD::LOAD, MVT::v4f64, MVT::v8i32);
115
116 setOperationAction(ISD::LOAD, MVT::v8i64, Promote);
117 AddPromotedToType(ISD::LOAD, MVT::v8i64, MVT::v16i32);
118
119 setOperationAction(ISD::LOAD, MVT::v8f64, Promote);
120 AddPromotedToType(ISD::LOAD, MVT::v8f64, MVT::v16i32);
121
122 setOperationAction(ISD::LOAD, MVT::v16i64, Promote);
123 AddPromotedToType(ISD::LOAD, MVT::v16i64, MVT::v32i32);
124
125 setOperationAction(ISD::LOAD, MVT::v16f64, Promote);
126 AddPromotedToType(ISD::LOAD, MVT::v16f64, MVT::v32i32);
127
128 // There are no 64-bit extloads. These should be done as a 32-bit extload and
129 // an extension to 64-bit.
130 for (MVT VT : MVT::integer_valuetypes()) {
131 setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand);
132 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand);
133 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand);
134 }
135
136 for (MVT VT : MVT::integer_valuetypes()) {
137 if (VT == MVT::i64)
138 continue;
139
140 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
141 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal);
142 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal);
143 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
144
145 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
146 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal);
147 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal);
148 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand);
149
150 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
151 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal);
152 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal);
153 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
154 }
155
156 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
157 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand);
158 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand);
159 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand);
160 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand);
161 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand);
162 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand);
163 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand);
164 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand);
165 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand);
166 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v3i16, Expand);
167 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v3i16, Expand);
168 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v3i16, Expand);
169 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand);
170 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand);
171 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand);
172 }
173
174 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
175 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
176 setLoadExtAction(ISD::EXTLOAD, MVT::v3f32, MVT::v3f16, Expand);
177 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
178 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
179 setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Expand);
180 setLoadExtAction(ISD::EXTLOAD, MVT::v32f32, MVT::v32f16, Expand);
181
182 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
183 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
184 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
185 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f32, Expand);
186 setLoadExtAction(ISD::EXTLOAD, MVT::v16f64, MVT::v16f32, Expand);
187
188 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
189 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
190 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
191 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
192 setLoadExtAction(ISD::EXTLOAD, MVT::v16f64, MVT::v16f16, Expand);
193
194 setOperationAction(ISD::STORE, MVT::f32, Promote);
195 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
196
197 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
198 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
199
200 setOperationAction(ISD::STORE, MVT::v3f32, Promote);
201 AddPromotedToType(ISD::STORE, MVT::v3f32, MVT::v3i32);
202
203 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
204 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
205
206 setOperationAction(ISD::STORE, MVT::v5f32, Promote);
207 AddPromotedToType(ISD::STORE, MVT::v5f32, MVT::v5i32);
208
209 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
210 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
211
212 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
213 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
214
215 setOperationAction(ISD::STORE, MVT::v32f32, Promote);
216 AddPromotedToType(ISD::STORE, MVT::v32f32, MVT::v32i32);
217
218 setOperationAction(ISD::STORE, MVT::i64, Promote);
219 AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
220
221 setOperationAction(ISD::STORE, MVT::v2i64, Promote);
222 AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32);
223
224 setOperationAction(ISD::STORE, MVT::f64, Promote);
225 AddPromotedToType(ISD::STORE, MVT::f64, MVT::v2i32);
226
227 setOperationAction(ISD::STORE, MVT::v2f64, Promote);
228 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32);
229
230 setOperationAction(ISD::STORE, MVT::v4i64, Promote);
231 AddPromotedToType(ISD::STORE, MVT::v4i64, MVT::v8i32);
232
233 setOperationAction(ISD::STORE, MVT::v4f64, Promote);
234 AddPromotedToType(ISD::STORE, MVT::v4f64, MVT::v8i32);
235
236 setOperationAction(ISD::STORE, MVT::v8i64, Promote);
237 AddPromotedToType(ISD::STORE, MVT::v8i64, MVT::v16i32);
238
239 setOperationAction(ISD::STORE, MVT::v8f64, Promote);
240 AddPromotedToType(ISD::STORE, MVT::v8f64, MVT::v16i32);
241
242 setOperationAction(ISD::STORE, MVT::v16i64, Promote);
243 AddPromotedToType(ISD::STORE, MVT::v16i64, MVT::v32i32);
244
245 setOperationAction(ISD::STORE, MVT::v16f64, Promote);
246 AddPromotedToType(ISD::STORE, MVT::v16f64, MVT::v32i32);
247
248 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
249 setTruncStoreAction(MVT::i64, MVT::i8, Expand);
250 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
251 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
252
253 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
254 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Expand);
255 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Expand);
256 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand);
257
258 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
259 setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand);
260 setTruncStoreAction(MVT::v3f32, MVT::v3f16, Expand);
261 setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand);
262 setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand);
263 setTruncStoreAction(MVT::v16f32, MVT::v16f16, Expand);
264 setTruncStoreAction(MVT::v32f32, MVT::v32f16, Expand);
265
266 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
267 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
268
269 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
270 setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand);
271
272 setTruncStoreAction(MVT::v4i64, MVT::v4i32, Expand);
273 setTruncStoreAction(MVT::v4i64, MVT::v4i16, Expand);
274 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand);
275 setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand);
276
277 setTruncStoreAction(MVT::v8f64, MVT::v8f32, Expand);
278 setTruncStoreAction(MVT::v8f64, MVT::v8f16, Expand);
279
280 setTruncStoreAction(MVT::v16f64, MVT::v16f32, Expand);
281 setTruncStoreAction(MVT::v16f64, MVT::v16f16, Expand);
282 setTruncStoreAction(MVT::v16i64, MVT::v16i16, Expand);
283 setTruncStoreAction(MVT::v16i64, MVT::v16i16, Expand);
284 setTruncStoreAction(MVT::v16i64, MVT::v16i8, Expand);
285 setTruncStoreAction(MVT::v16i64, MVT::v16i8, Expand);
286 setTruncStoreAction(MVT::v16i64, MVT::v16i1, Expand);
287
288 setOperationAction(ISD::Constant, MVT::i32, Legal);
289 setOperationAction(ISD::Constant, MVT::i64, Legal);
290 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
291 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
292
293 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
294 setOperationAction(ISD::BRIND, MVT::Other, Expand);
295
296 // This is totally unsupported, just custom lower to produce an error.
297 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
298
299 // Library functions. These default to Expand, but we have instructions
300 // for them.
301 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
302 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
303 setOperationAction(ISD::FPOW, MVT::f32, Legal);
304 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
305 setOperationAction(ISD::FABS, MVT::f32, Legal);
306 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
307 setOperationAction(ISD::FRINT, MVT::f32, Legal);
308 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
309 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
310 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
311
312 setOperationAction(ISD::FROUND, MVT::f32, Custom);
313 setOperationAction(ISD::FROUND, MVT::f64, Custom);
314
315 setOperationAction(ISD::FLOG, MVT::f32, Custom);
316 setOperationAction(ISD::FLOG10, MVT::f32, Custom);
317 setOperationAction(ISD::FEXP, MVT::f32, Custom);
318
319
320 setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
321 setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
322
323 setOperationAction(ISD::FREM, MVT::f16, Custom);
324 setOperationAction(ISD::FREM, MVT::f32, Custom);
325 setOperationAction(ISD::FREM, MVT::f64, Custom);
326
327 // Expand to fneg + fadd.
328 setOperationAction(ISD::FSUB, MVT::f64, Expand);
329
330 setOperationAction(ISD::CONCAT_VECTORS, MVT::v3i32, Custom);
331 setOperationAction(ISD::CONCAT_VECTORS, MVT::v3f32, Custom);
332 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
333 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
334 setOperationAction(ISD::CONCAT_VECTORS, MVT::v5i32, Custom);
335 setOperationAction(ISD::CONCAT_VECTORS, MVT::v5f32, Custom);
336 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
337 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
338 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
339 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
340 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v3f32, Custom);
341 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v3i32, Custom);
342 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
343 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
344 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v5f32, Custom);
345 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v5i32, Custom);
346 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
347 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
348 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16f32, Custom);
349 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16i32, Custom);
350 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32f32, Custom);
351 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32i32, Custom);
352 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f64, Custom);
353 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i64, Custom);
354 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f64, Custom);
355 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i64, Custom);
356 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f64, Custom);
357 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i64, Custom);
358 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16f64, Custom);
359 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16i64, Custom);
360
361 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
362 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Custom);
363 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Custom);
364
365 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
366 for (MVT VT : ScalarIntVTs) {
367 // These should use [SU]DIVREM, so set them to expand
368 setOperationAction(ISD::SDIV, VT, Expand);
369 setOperationAction(ISD::UDIV, VT, Expand);
370 setOperationAction(ISD::SREM, VT, Expand);
371 setOperationAction(ISD::UREM, VT, Expand);
372
373 // GPU does not have divrem function for signed or unsigned.
374 setOperationAction(ISD::SDIVREM, VT, Custom);
375 setOperationAction(ISD::UDIVREM, VT, Custom);
376
377 // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
378 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
379 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
380
381 setOperationAction(ISD::BSWAP, VT, Expand);
382 setOperationAction(ISD::CTTZ, VT, Expand);
383 setOperationAction(ISD::CTLZ, VT, Expand);
384
385 // AMDGPU uses ADDC/SUBC/ADDE/SUBE
386 setOperationAction(ISD::ADDC, VT, Legal);
387 setOperationAction(ISD::SUBC, VT, Legal);
388 setOperationAction(ISD::ADDE, VT, Legal);
389 setOperationAction(ISD::SUBE, VT, Legal);
390 }
391
392 // The hardware supports 32-bit FSHR, but not FSHL.
393 setOperationAction(ISD::FSHR, MVT::i32, Legal);
394
395 // The hardware supports 32-bit ROTR, but not ROTL.
396 setOperationAction(ISD::ROTL, MVT::i32, Expand);
397 setOperationAction(ISD::ROTL, MVT::i64, Expand);
398 setOperationAction(ISD::ROTR, MVT::i64, Expand);
399
400 setOperationAction(ISD::MULHU, MVT::i16, Expand);
401 setOperationAction(ISD::MULHS, MVT::i16, Expand);
402
403 setOperationAction(ISD::MUL, MVT::i64, Expand);
404 setOperationAction(ISD::MULHU, MVT::i64, Expand);
405 setOperationAction(ISD::MULHS, MVT::i64, Expand);
406 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
407 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
408 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
409 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
410 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
411
412 setOperationAction(ISD::SMIN, MVT::i32, Legal);
413 setOperationAction(ISD::UMIN, MVT::i32, Legal);
414 setOperationAction(ISD::SMAX, MVT::i32, Legal);
415 setOperationAction(ISD::UMAX, MVT::i32, Legal);
416
417 setOperationAction(ISD::CTTZ, MVT::i64, Custom);
418 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom);
419 setOperationAction(ISD::CTLZ, MVT::i64, Custom);
420 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
421
422 static const MVT::SimpleValueType VectorIntTypes[] = {
423 MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32
424 };
425
426 for (MVT VT : VectorIntTypes) {
427 // Expand the following operations for the current type by default.
428 setOperationAction(ISD::ADD, VT, Expand);
429 setOperationAction(ISD::AND, VT, Expand);
430 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
431 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
432 setOperationAction(ISD::MUL, VT, Expand);
433 setOperationAction(ISD::MULHU, VT, Expand);
434 setOperationAction(ISD::MULHS, VT, Expand);
435 setOperationAction(ISD::OR, VT, Expand);
436 setOperationAction(ISD::SHL, VT, Expand);
437 setOperationAction(ISD::SRA, VT, Expand);
438 setOperationAction(ISD::SRL, VT, Expand);
439 setOperationAction(ISD::ROTL, VT, Expand);
440 setOperationAction(ISD::ROTR, VT, Expand);
441 setOperationAction(ISD::SUB, VT, Expand);
442 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
443 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
444 setOperationAction(ISD::SDIV, VT, Expand);
445 setOperationAction(ISD::UDIV, VT, Expand);
446 setOperationAction(ISD::SREM, VT, Expand);
447 setOperationAction(ISD::UREM, VT, Expand);
448 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
449 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
450 setOperationAction(ISD::SDIVREM, VT, Expand);
451 setOperationAction(ISD::UDIVREM, VT, Expand);
452 setOperationAction(ISD::SELECT, VT, Expand);
453 setOperationAction(ISD::VSELECT, VT, Expand);
454 setOperationAction(ISD::SELECT_CC, VT, Expand);
455 setOperationAction(ISD::XOR, VT, Expand);
456 setOperationAction(ISD::BSWAP, VT, Expand);
457 setOperationAction(ISD::CTPOP, VT, Expand);
458 setOperationAction(ISD::CTTZ, VT, Expand);
459 setOperationAction(ISD::CTLZ, VT, Expand);
460 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
461 setOperationAction(ISD::SETCC, VT, Expand);
462 }
463
464 static const MVT::SimpleValueType FloatVectorTypes[] = {
465 MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32
466 };
467
468 for (MVT VT : FloatVectorTypes) {
469 setOperationAction(ISD::FABS, VT, Expand);
470 setOperationAction(ISD::FMINNUM, VT, Expand);
471 setOperationAction(ISD::FMAXNUM, VT, Expand);
472 setOperationAction(ISD::FADD, VT, Expand);
473 setOperationAction(ISD::FCEIL, VT, Expand);
474 setOperationAction(ISD::FCOS, VT, Expand);
475 setOperationAction(ISD::FDIV, VT, Expand);
476 setOperationAction(ISD::FEXP2, VT, Expand);
477 setOperationAction(ISD::FEXP, VT, Expand);
478 setOperationAction(ISD::FLOG2, VT, Expand);
479 setOperationAction(ISD::FREM, VT, Expand);
480 setOperationAction(ISD::FLOG, VT, Expand);
481 setOperationAction(ISD::FLOG10, VT, Expand);
482 setOperationAction(ISD::FPOW, VT, Expand);
483 setOperationAction(ISD::FFLOOR, VT, Expand);
484 setOperationAction(ISD::FTRUNC, VT, Expand);
485 setOperationAction(ISD::FMUL, VT, Expand);
486 setOperationAction(ISD::FMA, VT, Expand);
487 setOperationAction(ISD::FRINT, VT, Expand);
488 setOperationAction(ISD::FNEARBYINT, VT, Expand);
489 setOperationAction(ISD::FSQRT, VT, Expand);
490 setOperationAction(ISD::FSIN, VT, Expand);
491 setOperationAction(ISD::FSUB, VT, Expand);
492 setOperationAction(ISD::FNEG, VT, Expand);
493 setOperationAction(ISD::VSELECT, VT, Expand);
494 setOperationAction(ISD::SELECT_CC, VT, Expand);
495 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
496 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
497 setOperationAction(ISD::SETCC, VT, Expand);
498 setOperationAction(ISD::FCANONICALIZE, VT, Expand);
499 }
500
501 // This causes using an unrolled select operation rather than expansion with
502 // bit operations. This is in general better, but the alternative using BFI
503 // instructions may be better if the select sources are SGPRs.
504 setOperationAction(ISD::SELECT, MVT::v2f32, Promote);
505 AddPromotedToType(ISD::SELECT, MVT::v2f32, MVT::v2i32);
506
507 setOperationAction(ISD::SELECT, MVT::v3f32, Promote);
508 AddPromotedToType(ISD::SELECT, MVT::v3f32, MVT::v3i32);
509
510 setOperationAction(ISD::SELECT, MVT::v4f32, Promote);
511 AddPromotedToType(ISD::SELECT, MVT::v4f32, MVT::v4i32);
512
513 setOperationAction(ISD::SELECT, MVT::v5f32, Promote);
514 AddPromotedToType(ISD::SELECT, MVT::v5f32, MVT::v5i32);
515
516 // There are no libcalls of any kind.
517 for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I)
518 setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr);
519
520 setSchedulingPreference(Sched::RegPressure);
521 setJumpIsExpensive(true);
522
523 // FIXME: This is only partially true. If we have to do vector compares, any
524 // SGPR pair can be a condition register. If we have a uniform condition, we
525 // are better off doing SALU operations, where there is only one SCC. For now,
526 // we don't have a way of knowing during instruction selection if a condition
527 // will be uniform and we always use vector compares. Assume we are using
528 // vector compares until that is fixed.
529 setHasMultipleConditionRegisters(true);
530
531 setMinCmpXchgSizeInBits(32);
532 setSupportsUnalignedAtomics(false);
533
534 PredictableSelectIsExpensive = false;
535
536 // We want to find all load dependencies for long chains of stores to enable
537 // merging into very wide vectors. The problem is with vectors with > 4
538 // elements. MergeConsecutiveStores will attempt to merge these because x8/x16
539 // vectors are a legal type, even though we have to split the loads
540 // usually. When we can more precisely specify load legality per address
541 // space, we should be able to make FindBetterChain/MergeConsecutiveStores
542 // smarter so that they can figure out what to do in 2 iterations without all
543 // N > 4 stores on the same chain.
544 GatherAllAliasesMaxDepth = 16;
545
546 // memcpy/memmove/memset are expanded in the IR, so we shouldn't need to worry
547 // about these during lowering.
548 MaxStoresPerMemcpy = 0xffffffff;
549 MaxStoresPerMemmove = 0xffffffff;
550 MaxStoresPerMemset = 0xffffffff;
551
552 // The expansion for 64-bit division is enormous.
553 if (AMDGPUBypassSlowDiv)
554 addBypassSlowDiv(64, 32);
555
556 setTargetDAGCombine(ISD::BITCAST);
557 setTargetDAGCombine(ISD::SHL);
558 setTargetDAGCombine(ISD::SRA);
559 setTargetDAGCombine(ISD::SRL);
560 setTargetDAGCombine(ISD::TRUNCATE);
561 setTargetDAGCombine(ISD::MUL);
562 setTargetDAGCombine(ISD::MULHU);
563 setTargetDAGCombine(ISD::MULHS);
564 setTargetDAGCombine(ISD::SELECT);
565 setTargetDAGCombine(ISD::SELECT_CC);
566 setTargetDAGCombine(ISD::STORE);
567 setTargetDAGCombine(ISD::FADD);
568 setTargetDAGCombine(ISD::FSUB);
569 setTargetDAGCombine(ISD::FNEG);
570 setTargetDAGCombine(ISD::FABS);
571 setTargetDAGCombine(ISD::AssertZext);
572 setTargetDAGCombine(ISD::AssertSext);
573 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
574 }
575
576 //===----------------------------------------------------------------------===//
577 // Target Information
578 //===----------------------------------------------------------------------===//
579
580 LLVM_READNONE
fnegFoldsIntoOp(unsigned Opc)581 static bool fnegFoldsIntoOp(unsigned Opc) {
582 switch (Opc) {
583 case ISD::FADD:
584 case ISD::FSUB:
585 case ISD::FMUL:
586 case ISD::FMA:
587 case ISD::FMAD:
588 case ISD::FMINNUM:
589 case ISD::FMAXNUM:
590 case ISD::FMINNUM_IEEE:
591 case ISD::FMAXNUM_IEEE:
592 case ISD::FSIN:
593 case ISD::FTRUNC:
594 case ISD::FRINT:
595 case ISD::FNEARBYINT:
596 case ISD::FCANONICALIZE:
597 case AMDGPUISD::RCP:
598 case AMDGPUISD::RCP_LEGACY:
599 case AMDGPUISD::RCP_IFLAG:
600 case AMDGPUISD::SIN_HW:
601 case AMDGPUISD::FMUL_LEGACY:
602 case AMDGPUISD::FMIN_LEGACY:
603 case AMDGPUISD::FMAX_LEGACY:
604 case AMDGPUISD::FMED3:
605 // TODO: handle llvm.amdgcn.fma.legacy
606 return true;
607 default:
608 return false;
609 }
610 }
611
612 /// \p returns true if the operation will definitely need to use a 64-bit
613 /// encoding, and thus will use a VOP3 encoding regardless of the source
614 /// modifiers.
615 LLVM_READONLY
opMustUseVOP3Encoding(const SDNode * N,MVT VT)616 static bool opMustUseVOP3Encoding(const SDNode *N, MVT VT) {
617 return N->getNumOperands() > 2 || VT == MVT::f64;
618 }
619
620 // Most FP instructions support source modifiers, but this could be refined
621 // slightly.
622 LLVM_READONLY
hasSourceMods(const SDNode * N)623 static bool hasSourceMods(const SDNode *N) {
624 if (isa<MemSDNode>(N))
625 return false;
626
627 switch (N->getOpcode()) {
628 case ISD::CopyToReg:
629 case ISD::SELECT:
630 case ISD::FDIV:
631 case ISD::FREM:
632 case ISD::INLINEASM:
633 case ISD::INLINEASM_BR:
634 case AMDGPUISD::DIV_SCALE:
635 case ISD::INTRINSIC_W_CHAIN:
636
637 // TODO: Should really be looking at the users of the bitcast. These are
638 // problematic because bitcasts are used to legalize all stores to integer
639 // types.
640 case ISD::BITCAST:
641 return false;
642 case ISD::INTRINSIC_WO_CHAIN: {
643 switch (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()) {
644 case Intrinsic::amdgcn_interp_p1:
645 case Intrinsic::amdgcn_interp_p2:
646 case Intrinsic::amdgcn_interp_mov:
647 case Intrinsic::amdgcn_interp_p1_f16:
648 case Intrinsic::amdgcn_interp_p2_f16:
649 return false;
650 default:
651 return true;
652 }
653 }
654 default:
655 return true;
656 }
657 }
658
allUsesHaveSourceMods(const SDNode * N,unsigned CostThreshold)659 bool AMDGPUTargetLowering::allUsesHaveSourceMods(const SDNode *N,
660 unsigned CostThreshold) {
661 // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus
662 // it is truly free to use a source modifier in all cases. If there are
663 // multiple users but for each one will necessitate using VOP3, there will be
664 // a code size increase. Try to avoid increasing code size unless we know it
665 // will save on the instruction count.
666 unsigned NumMayIncreaseSize = 0;
667 MVT VT = N->getValueType(0).getScalarType().getSimpleVT();
668
669 // XXX - Should this limit number of uses to check?
670 for (const SDNode *U : N->uses()) {
671 if (!hasSourceMods(U))
672 return false;
673
674 if (!opMustUseVOP3Encoding(U, VT)) {
675 if (++NumMayIncreaseSize > CostThreshold)
676 return false;
677 }
678 }
679
680 return true;
681 }
682
getTypeForExtReturn(LLVMContext & Context,EVT VT,ISD::NodeType ExtendKind) const683 EVT AMDGPUTargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
684 ISD::NodeType ExtendKind) const {
685 assert(!VT.isVector() && "only scalar expected");
686
687 // Round to the next multiple of 32-bits.
688 unsigned Size = VT.getSizeInBits();
689 if (Size <= 32)
690 return MVT::i32;
691 return EVT::getIntegerVT(Context, 32 * ((Size + 31) / 32));
692 }
693
getVectorIdxTy(const DataLayout &) const694 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const {
695 return MVT::i32;
696 }
697
isSelectSupported(SelectSupportKind SelType) const698 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
699 return true;
700 }
701
702 // The backend supports 32 and 64 bit floating point immediates.
703 // FIXME: Why are we reporting vectors of FP immediates as legal?
isFPImmLegal(const APFloat & Imm,EVT VT,bool ForCodeSize) const704 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
705 bool ForCodeSize) const {
706 EVT ScalarVT = VT.getScalarType();
707 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64 ||
708 (ScalarVT == MVT::f16 && Subtarget->has16BitInsts()));
709 }
710
711 // We don't want to shrink f64 / f32 constants.
ShouldShrinkFPConstant(EVT VT) const712 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
713 EVT ScalarVT = VT.getScalarType();
714 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
715 }
716
shouldReduceLoadWidth(SDNode * N,ISD::LoadExtType ExtTy,EVT NewVT) const717 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N,
718 ISD::LoadExtType ExtTy,
719 EVT NewVT) const {
720 // TODO: This may be worth removing. Check regression tests for diffs.
721 if (!TargetLoweringBase::shouldReduceLoadWidth(N, ExtTy, NewVT))
722 return false;
723
724 unsigned NewSize = NewVT.getStoreSizeInBits();
725
726 // If we are reducing to a 32-bit load or a smaller multi-dword load,
727 // this is always better.
728 if (NewSize >= 32)
729 return true;
730
731 EVT OldVT = N->getValueType(0);
732 unsigned OldSize = OldVT.getStoreSizeInBits();
733
734 MemSDNode *MN = cast<MemSDNode>(N);
735 unsigned AS = MN->getAddressSpace();
736 // Do not shrink an aligned scalar load to sub-dword.
737 // Scalar engine cannot do sub-dword loads.
738 if (OldSize >= 32 && NewSize < 32 && MN->getAlignment() >= 4 &&
739 (AS == AMDGPUAS::CONSTANT_ADDRESS ||
740 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
741 (isa<LoadSDNode>(N) &&
742 AS == AMDGPUAS::GLOBAL_ADDRESS && MN->isInvariant())) &&
743 AMDGPUInstrInfo::isUniformMMO(MN->getMemOperand()))
744 return false;
745
746 // Don't produce extloads from sub 32-bit types. SI doesn't have scalar
747 // extloads, so doing one requires using a buffer_load. In cases where we
748 // still couldn't use a scalar load, using the wider load shouldn't really
749 // hurt anything.
750
751 // If the old size already had to be an extload, there's no harm in continuing
752 // to reduce the width.
753 return (OldSize < 32);
754 }
755
isLoadBitCastBeneficial(EVT LoadTy,EVT CastTy,const SelectionDAG & DAG,const MachineMemOperand & MMO) const756 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, EVT CastTy,
757 const SelectionDAG &DAG,
758 const MachineMemOperand &MMO) const {
759
760 assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits());
761
762 if (LoadTy.getScalarType() == MVT::i32)
763 return false;
764
765 unsigned LScalarSize = LoadTy.getScalarSizeInBits();
766 unsigned CastScalarSize = CastTy.getScalarSizeInBits();
767
768 if ((LScalarSize >= CastScalarSize) && (CastScalarSize < 32))
769 return false;
770
771 bool Fast = false;
772 return allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
773 CastTy, MMO, &Fast) &&
774 Fast;
775 }
776
777 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
778 // profitable with the expansion for 64-bit since it's generally good to
779 // speculate things.
780 // FIXME: These should really have the size as a parameter.
isCheapToSpeculateCttz() const781 bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const {
782 return true;
783 }
784
isCheapToSpeculateCtlz() const785 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const {
786 return true;
787 }
788
isSDNodeAlwaysUniform(const SDNode * N) const789 bool AMDGPUTargetLowering::isSDNodeAlwaysUniform(const SDNode *N) const {
790 switch (N->getOpcode()) {
791 case ISD::EntryToken:
792 case ISD::TokenFactor:
793 return true;
794 case ISD::INTRINSIC_WO_CHAIN: {
795 unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
796 switch (IntrID) {
797 case Intrinsic::amdgcn_readfirstlane:
798 case Intrinsic::amdgcn_readlane:
799 return true;
800 }
801 return false;
802 }
803 case ISD::LOAD:
804 if (cast<LoadSDNode>(N)->getMemOperand()->getAddrSpace() ==
805 AMDGPUAS::CONSTANT_ADDRESS_32BIT)
806 return true;
807 return false;
808 }
809 return false;
810 }
811
getNegatedExpression(SDValue Op,SelectionDAG & DAG,bool LegalOperations,bool ForCodeSize,NegatibleCost & Cost,unsigned Depth) const812 SDValue AMDGPUTargetLowering::getNegatedExpression(
813 SDValue Op, SelectionDAG &DAG, bool LegalOperations, bool ForCodeSize,
814 NegatibleCost &Cost, unsigned Depth) const {
815
816 switch (Op.getOpcode()) {
817 case ISD::FMA:
818 case ISD::FMAD: {
819 // Negating a fma is not free if it has users without source mods.
820 if (!allUsesHaveSourceMods(Op.getNode()))
821 return SDValue();
822 break;
823 }
824 default:
825 break;
826 }
827
828 return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
829 ForCodeSize, Cost, Depth);
830 }
831
832 //===---------------------------------------------------------------------===//
833 // Target Properties
834 //===---------------------------------------------------------------------===//
835
isFAbsFree(EVT VT) const836 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
837 assert(VT.isFloatingPoint());
838
839 // Packed operations do not have a fabs modifier.
840 return VT == MVT::f32 || VT == MVT::f64 ||
841 (Subtarget->has16BitInsts() && VT == MVT::f16);
842 }
843
isFNegFree(EVT VT) const844 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
845 assert(VT.isFloatingPoint());
846 return VT == MVT::f32 || VT == MVT::f64 ||
847 (Subtarget->has16BitInsts() && VT == MVT::f16) ||
848 (Subtarget->hasVOP3PInsts() && VT == MVT::v2f16);
849 }
850
storeOfVectorConstantIsCheap(EVT MemVT,unsigned NumElem,unsigned AS) const851 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT,
852 unsigned NumElem,
853 unsigned AS) const {
854 return true;
855 }
856
aggressivelyPreferBuildVectorSources(EVT VecVT) const857 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const {
858 // There are few operations which truly have vector input operands. Any vector
859 // operation is going to involve operations on each component, and a
860 // build_vector will be a copy per element, so it always makes sense to use a
861 // build_vector input in place of the extracted element to avoid a copy into a
862 // super register.
863 //
864 // We should probably only do this if all users are extracts only, but this
865 // should be the common case.
866 return true;
867 }
868
isTruncateFree(EVT Source,EVT Dest) const869 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
870 // Truncate is just accessing a subregister.
871
872 unsigned SrcSize = Source.getSizeInBits();
873 unsigned DestSize = Dest.getSizeInBits();
874
875 return DestSize < SrcSize && DestSize % 32 == 0 ;
876 }
877
isTruncateFree(Type * Source,Type * Dest) const878 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
879 // Truncate is just accessing a subregister.
880
881 unsigned SrcSize = Source->getScalarSizeInBits();
882 unsigned DestSize = Dest->getScalarSizeInBits();
883
884 if (DestSize== 16 && Subtarget->has16BitInsts())
885 return SrcSize >= 32;
886
887 return DestSize < SrcSize && DestSize % 32 == 0;
888 }
889
isZExtFree(Type * Src,Type * Dest) const890 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
891 unsigned SrcSize = Src->getScalarSizeInBits();
892 unsigned DestSize = Dest->getScalarSizeInBits();
893
894 if (SrcSize == 16 && Subtarget->has16BitInsts())
895 return DestSize >= 32;
896
897 return SrcSize == 32 && DestSize == 64;
898 }
899
isZExtFree(EVT Src,EVT Dest) const900 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
901 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
902 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
903 // this will enable reducing 64-bit operations the 32-bit, which is always
904 // good.
905
906 if (Src == MVT::i16)
907 return Dest == MVT::i32 ||Dest == MVT::i64 ;
908
909 return Src == MVT::i32 && Dest == MVT::i64;
910 }
911
isZExtFree(SDValue Val,EVT VT2) const912 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
913 return isZExtFree(Val.getValueType(), VT2);
914 }
915
isNarrowingProfitable(EVT SrcVT,EVT DestVT) const916 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
917 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
918 // limited number of native 64-bit operations. Shrinking an operation to fit
919 // in a single 32-bit register should always be helpful. As currently used,
920 // this is much less general than the name suggests, and is only used in
921 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
922 // not profitable, and may actually be harmful.
923 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
924 }
925
926 //===---------------------------------------------------------------------===//
927 // TargetLowering Callbacks
928 //===---------------------------------------------------------------------===//
929
CCAssignFnForCall(CallingConv::ID CC,bool IsVarArg)930 CCAssignFn *AMDGPUCallLowering::CCAssignFnForCall(CallingConv::ID CC,
931 bool IsVarArg) {
932 switch (CC) {
933 case CallingConv::AMDGPU_VS:
934 case CallingConv::AMDGPU_GS:
935 case CallingConv::AMDGPU_PS:
936 case CallingConv::AMDGPU_CS:
937 case CallingConv::AMDGPU_HS:
938 case CallingConv::AMDGPU_ES:
939 case CallingConv::AMDGPU_LS:
940 return CC_AMDGPU;
941 case CallingConv::C:
942 case CallingConv::Fast:
943 case CallingConv::Cold:
944 return CC_AMDGPU_Func;
945 case CallingConv::AMDGPU_Gfx:
946 return CC_SI_Gfx;
947 case CallingConv::AMDGPU_KERNEL:
948 case CallingConv::SPIR_KERNEL:
949 default:
950 report_fatal_error("Unsupported calling convention for call");
951 }
952 }
953
CCAssignFnForReturn(CallingConv::ID CC,bool IsVarArg)954 CCAssignFn *AMDGPUCallLowering::CCAssignFnForReturn(CallingConv::ID CC,
955 bool IsVarArg) {
956 switch (CC) {
957 case CallingConv::AMDGPU_KERNEL:
958 case CallingConv::SPIR_KERNEL:
959 llvm_unreachable("kernels should not be handled here");
960 case CallingConv::AMDGPU_VS:
961 case CallingConv::AMDGPU_GS:
962 case CallingConv::AMDGPU_PS:
963 case CallingConv::AMDGPU_CS:
964 case CallingConv::AMDGPU_HS:
965 case CallingConv::AMDGPU_ES:
966 case CallingConv::AMDGPU_LS:
967 return RetCC_SI_Shader;
968 case CallingConv::AMDGPU_Gfx:
969 return RetCC_SI_Gfx;
970 case CallingConv::C:
971 case CallingConv::Fast:
972 case CallingConv::Cold:
973 return RetCC_AMDGPU_Func;
974 default:
975 report_fatal_error("Unsupported calling convention.");
976 }
977 }
978
979 /// The SelectionDAGBuilder will automatically promote function arguments
980 /// with illegal types. However, this does not work for the AMDGPU targets
981 /// since the function arguments are stored in memory as these illegal types.
982 /// In order to handle this properly we need to get the original types sizes
983 /// from the LLVM IR Function and fixup the ISD:InputArg values before
984 /// passing them to AnalyzeFormalArguments()
985
986 /// When the SelectionDAGBuilder computes the Ins, it takes care of splitting
987 /// input values across multiple registers. Each item in the Ins array
988 /// represents a single value that will be stored in registers. Ins[x].VT is
989 /// the value type of the value that will be stored in the register, so
990 /// whatever SDNode we lower the argument to needs to be this type.
991 ///
992 /// In order to correctly lower the arguments we need to know the size of each
993 /// argument. Since Ins[x].VT gives us the size of the register that will
994 /// hold the value, we need to look at Ins[x].ArgVT to see the 'real' type
995 /// for the orignal function argument so that we can deduce the correct memory
996 /// type to use for Ins[x]. In most cases the correct memory type will be
997 /// Ins[x].ArgVT. However, this will not always be the case. If, for example,
998 /// we have a kernel argument of type v8i8, this argument will be split into
999 /// 8 parts and each part will be represented by its own item in the Ins array.
1000 /// For each part the Ins[x].ArgVT will be the v8i8, which is the full type of
1001 /// the argument before it was split. From this, we deduce that the memory type
1002 /// for each individual part is i8. We pass the memory type as LocVT to the
1003 /// calling convention analysis function and the register type (Ins[x].VT) as
1004 /// the ValVT.
analyzeFormalArgumentsCompute(CCState & State,const SmallVectorImpl<ISD::InputArg> & Ins) const1005 void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(
1006 CCState &State,
1007 const SmallVectorImpl<ISD::InputArg> &Ins) const {
1008 const MachineFunction &MF = State.getMachineFunction();
1009 const Function &Fn = MF.getFunction();
1010 LLVMContext &Ctx = Fn.getParent()->getContext();
1011 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
1012 const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset(Fn);
1013 CallingConv::ID CC = Fn.getCallingConv();
1014
1015 Align MaxAlign = Align(1);
1016 uint64_t ExplicitArgOffset = 0;
1017 const DataLayout &DL = Fn.getParent()->getDataLayout();
1018
1019 unsigned InIndex = 0;
1020
1021 for (const Argument &Arg : Fn.args()) {
1022 const bool IsByRef = Arg.hasByRefAttr();
1023 Type *BaseArgTy = Arg.getType();
1024 Type *MemArgTy = IsByRef ? Arg.getParamByRefType() : BaseArgTy;
1025 MaybeAlign Alignment = IsByRef ? Arg.getParamAlign() : None;
1026 if (!Alignment)
1027 Alignment = DL.getABITypeAlign(MemArgTy);
1028 MaxAlign = max(Alignment, MaxAlign);
1029 uint64_t AllocSize = DL.getTypeAllocSize(MemArgTy);
1030
1031 uint64_t ArgOffset = alignTo(ExplicitArgOffset, Alignment) + ExplicitOffset;
1032 ExplicitArgOffset = alignTo(ExplicitArgOffset, Alignment) + AllocSize;
1033
1034 // We're basically throwing away everything passed into us and starting over
1035 // to get accurate in-memory offsets. The "PartOffset" is completely useless
1036 // to us as computed in Ins.
1037 //
1038 // We also need to figure out what type legalization is trying to do to get
1039 // the correct memory offsets.
1040
1041 SmallVector<EVT, 16> ValueVTs;
1042 SmallVector<uint64_t, 16> Offsets;
1043 ComputeValueVTs(*this, DL, BaseArgTy, ValueVTs, &Offsets, ArgOffset);
1044
1045 for (unsigned Value = 0, NumValues = ValueVTs.size();
1046 Value != NumValues; ++Value) {
1047 uint64_t BasePartOffset = Offsets[Value];
1048
1049 EVT ArgVT = ValueVTs[Value];
1050 EVT MemVT = ArgVT;
1051 MVT RegisterVT = getRegisterTypeForCallingConv(Ctx, CC, ArgVT);
1052 unsigned NumRegs = getNumRegistersForCallingConv(Ctx, CC, ArgVT);
1053
1054 if (NumRegs == 1) {
1055 // This argument is not split, so the IR type is the memory type.
1056 if (ArgVT.isExtended()) {
1057 // We have an extended type, like i24, so we should just use the
1058 // register type.
1059 MemVT = RegisterVT;
1060 } else {
1061 MemVT = ArgVT;
1062 }
1063 } else if (ArgVT.isVector() && RegisterVT.isVector() &&
1064 ArgVT.getScalarType() == RegisterVT.getScalarType()) {
1065 assert(ArgVT.getVectorNumElements() > RegisterVT.getVectorNumElements());
1066 // We have a vector value which has been split into a vector with
1067 // the same scalar type, but fewer elements. This should handle
1068 // all the floating-point vector types.
1069 MemVT = RegisterVT;
1070 } else if (ArgVT.isVector() &&
1071 ArgVT.getVectorNumElements() == NumRegs) {
1072 // This arg has been split so that each element is stored in a separate
1073 // register.
1074 MemVT = ArgVT.getScalarType();
1075 } else if (ArgVT.isExtended()) {
1076 // We have an extended type, like i65.
1077 MemVT = RegisterVT;
1078 } else {
1079 unsigned MemoryBits = ArgVT.getStoreSizeInBits() / NumRegs;
1080 assert(ArgVT.getStoreSizeInBits() % NumRegs == 0);
1081 if (RegisterVT.isInteger()) {
1082 MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits);
1083 } else if (RegisterVT.isVector()) {
1084 assert(!RegisterVT.getScalarType().isFloatingPoint());
1085 unsigned NumElements = RegisterVT.getVectorNumElements();
1086 assert(MemoryBits % NumElements == 0);
1087 // This vector type has been split into another vector type with
1088 // a different elements size.
1089 EVT ScalarVT = EVT::getIntegerVT(State.getContext(),
1090 MemoryBits / NumElements);
1091 MemVT = EVT::getVectorVT(State.getContext(), ScalarVT, NumElements);
1092 } else {
1093 llvm_unreachable("cannot deduce memory type.");
1094 }
1095 }
1096
1097 // Convert one element vectors to scalar.
1098 if (MemVT.isVector() && MemVT.getVectorNumElements() == 1)
1099 MemVT = MemVT.getScalarType();
1100
1101 // Round up vec3/vec5 argument.
1102 if (MemVT.isVector() && !MemVT.isPow2VectorType()) {
1103 assert(MemVT.getVectorNumElements() == 3 ||
1104 MemVT.getVectorNumElements() == 5);
1105 MemVT = MemVT.getPow2VectorType(State.getContext());
1106 } else if (!MemVT.isSimple() && !MemVT.isVector()) {
1107 MemVT = MemVT.getRoundIntegerType(State.getContext());
1108 }
1109
1110 unsigned PartOffset = 0;
1111 for (unsigned i = 0; i != NumRegs; ++i) {
1112 State.addLoc(CCValAssign::getCustomMem(InIndex++, RegisterVT,
1113 BasePartOffset + PartOffset,
1114 MemVT.getSimpleVT(),
1115 CCValAssign::Full));
1116 PartOffset += MemVT.getStoreSize();
1117 }
1118 }
1119 }
1120 }
1121
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SDLoc & DL,SelectionDAG & DAG) const1122 SDValue AMDGPUTargetLowering::LowerReturn(
1123 SDValue Chain, CallingConv::ID CallConv,
1124 bool isVarArg,
1125 const SmallVectorImpl<ISD::OutputArg> &Outs,
1126 const SmallVectorImpl<SDValue> &OutVals,
1127 const SDLoc &DL, SelectionDAG &DAG) const {
1128 // FIXME: Fails for r600 tests
1129 //assert(!isVarArg && Outs.empty() && OutVals.empty() &&
1130 // "wave terminate should not have return values");
1131 return DAG.getNode(AMDGPUISD::ENDPGM, DL, MVT::Other, Chain);
1132 }
1133
1134 //===---------------------------------------------------------------------===//
1135 // Target specific lowering
1136 //===---------------------------------------------------------------------===//
1137
1138 /// Selects the correct CCAssignFn for a given CallingConvention value.
CCAssignFnForCall(CallingConv::ID CC,bool IsVarArg)1139 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
1140 bool IsVarArg) {
1141 return AMDGPUCallLowering::CCAssignFnForCall(CC, IsVarArg);
1142 }
1143
CCAssignFnForReturn(CallingConv::ID CC,bool IsVarArg)1144 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
1145 bool IsVarArg) {
1146 return AMDGPUCallLowering::CCAssignFnForReturn(CC, IsVarArg);
1147 }
1148
addTokenForArgument(SDValue Chain,SelectionDAG & DAG,MachineFrameInfo & MFI,int ClobberedFI) const1149 SDValue AMDGPUTargetLowering::addTokenForArgument(SDValue Chain,
1150 SelectionDAG &DAG,
1151 MachineFrameInfo &MFI,
1152 int ClobberedFI) const {
1153 SmallVector<SDValue, 8> ArgChains;
1154 int64_t FirstByte = MFI.getObjectOffset(ClobberedFI);
1155 int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1;
1156
1157 // Include the original chain at the beginning of the list. When this is
1158 // used by target LowerCall hooks, this helps legalize find the
1159 // CALLSEQ_BEGIN node.
1160 ArgChains.push_back(Chain);
1161
1162 // Add a chain value for each stack argument corresponding
1163 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(),
1164 UE = DAG.getEntryNode().getNode()->use_end();
1165 U != UE; ++U) {
1166 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) {
1167 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) {
1168 if (FI->getIndex() < 0) {
1169 int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
1170 int64_t InLastByte = InFirstByte;
1171 InLastByte += MFI.getObjectSize(FI->getIndex()) - 1;
1172
1173 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1174 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1175 ArgChains.push_back(SDValue(L, 1));
1176 }
1177 }
1178 }
1179 }
1180
1181 // Build a tokenfactor for all the chains.
1182 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
1183 }
1184
lowerUnhandledCall(CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals,StringRef Reason) const1185 SDValue AMDGPUTargetLowering::lowerUnhandledCall(CallLoweringInfo &CLI,
1186 SmallVectorImpl<SDValue> &InVals,
1187 StringRef Reason) const {
1188 SDValue Callee = CLI.Callee;
1189 SelectionDAG &DAG = CLI.DAG;
1190
1191 const Function &Fn = DAG.getMachineFunction().getFunction();
1192
1193 StringRef FuncName("<unknown>");
1194
1195 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
1196 FuncName = G->getSymbol();
1197 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1198 FuncName = G->getGlobal()->getName();
1199
1200 DiagnosticInfoUnsupported NoCalls(
1201 Fn, Reason + FuncName, CLI.DL.getDebugLoc());
1202 DAG.getContext()->diagnose(NoCalls);
1203
1204 if (!CLI.IsTailCall) {
1205 for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
1206 InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
1207 }
1208
1209 return DAG.getEntryNode();
1210 }
1211
LowerCall(CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const1212 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
1213 SmallVectorImpl<SDValue> &InVals) const {
1214 return lowerUnhandledCall(CLI, InVals, "unsupported call to function ");
1215 }
1216
LowerDYNAMIC_STACKALLOC(SDValue Op,SelectionDAG & DAG) const1217 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
1218 SelectionDAG &DAG) const {
1219 const Function &Fn = DAG.getMachineFunction().getFunction();
1220
1221 DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca",
1222 SDLoc(Op).getDebugLoc());
1223 DAG.getContext()->diagnose(NoDynamicAlloca);
1224 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
1225 return DAG.getMergeValues(Ops, SDLoc());
1226 }
1227
LowerOperation(SDValue Op,SelectionDAG & DAG) const1228 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
1229 SelectionDAG &DAG) const {
1230 switch (Op.getOpcode()) {
1231 default:
1232 Op->print(errs(), &DAG);
1233 llvm_unreachable("Custom lowering code for this "
1234 "instruction is not implemented yet!");
1235 break;
1236 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
1237 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
1238 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
1239 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
1240 case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
1241 case ISD::FREM: return LowerFREM(Op, DAG);
1242 case ISD::FCEIL: return LowerFCEIL(Op, DAG);
1243 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
1244 case ISD::FRINT: return LowerFRINT(Op, DAG);
1245 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
1246 case ISD::FROUND: return LowerFROUND(Op, DAG);
1247 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
1248 case ISD::FLOG:
1249 return LowerFLOG(Op, DAG, numbers::ln2f);
1250 case ISD::FLOG10:
1251 return LowerFLOG(Op, DAG, numbers::ln2f / numbers::ln10f);
1252 case ISD::FEXP:
1253 return lowerFEXP(Op, DAG);
1254 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
1255 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
1256 case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG);
1257 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
1258 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
1259 case ISD::CTTZ:
1260 case ISD::CTTZ_ZERO_UNDEF:
1261 case ISD::CTLZ:
1262 case ISD::CTLZ_ZERO_UNDEF:
1263 return LowerCTLZ_CTTZ(Op, DAG);
1264 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
1265 }
1266 return Op;
1267 }
1268
ReplaceNodeResults(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const1269 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
1270 SmallVectorImpl<SDValue> &Results,
1271 SelectionDAG &DAG) const {
1272 switch (N->getOpcode()) {
1273 case ISD::SIGN_EXTEND_INREG:
1274 // Different parts of legalization seem to interpret which type of
1275 // sign_extend_inreg is the one to check for custom lowering. The extended
1276 // from type is what really matters, but some places check for custom
1277 // lowering of the result type. This results in trying to use
1278 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
1279 // nothing here and let the illegal result integer be handled normally.
1280 return;
1281 default:
1282 return;
1283 }
1284 }
1285
hasDefinedInitializer(const GlobalValue * GV)1286 bool AMDGPUTargetLowering::hasDefinedInitializer(const GlobalValue *GV) {
1287 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
1288 if (!GVar || !GVar->hasInitializer())
1289 return false;
1290
1291 return !isa<UndefValue>(GVar->getInitializer());
1292 }
1293
LowerGlobalAddress(AMDGPUMachineFunction * MFI,SDValue Op,SelectionDAG & DAG) const1294 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
1295 SDValue Op,
1296 SelectionDAG &DAG) const {
1297
1298 const DataLayout &DL = DAG.getDataLayout();
1299 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
1300 const GlobalValue *GV = G->getGlobal();
1301
1302 if (G->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1303 G->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) {
1304 if (!MFI->isEntryFunction()) {
1305 SDLoc DL(Op);
1306 const Function &Fn = DAG.getMachineFunction().getFunction();
1307 DiagnosticInfoUnsupported BadLDSDecl(
1308 Fn, "local memory global used by non-kernel function",
1309 DL.getDebugLoc(), DS_Warning);
1310 DAG.getContext()->diagnose(BadLDSDecl);
1311
1312 // We currently don't have a way to correctly allocate LDS objects that
1313 // aren't directly associated with a kernel. We do force inlining of
1314 // functions that use local objects. However, if these dead functions are
1315 // not eliminated, we don't want a compile time error. Just emit a warning
1316 // and a trap, since there should be no callable path here.
1317 SDValue Trap = DAG.getNode(ISD::TRAP, DL, MVT::Other, DAG.getEntryNode());
1318 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
1319 Trap, DAG.getRoot());
1320 DAG.setRoot(OutputChain);
1321 return DAG.getUNDEF(Op.getValueType());
1322 }
1323
1324 // XXX: What does the value of G->getOffset() mean?
1325 assert(G->getOffset() == 0 &&
1326 "Do not know what to do with an non-zero offset");
1327
1328 // TODO: We could emit code to handle the initialization somewhere.
1329 if (!hasDefinedInitializer(GV)) {
1330 unsigned Offset = MFI->allocateLDSGlobal(DL, *cast<GlobalVariable>(GV));
1331 return DAG.getConstant(Offset, SDLoc(Op), Op.getValueType());
1332 }
1333 }
1334
1335 const Function &Fn = DAG.getMachineFunction().getFunction();
1336 DiagnosticInfoUnsupported BadInit(
1337 Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc());
1338 DAG.getContext()->diagnose(BadInit);
1339 return SDValue();
1340 }
1341
LowerCONCAT_VECTORS(SDValue Op,SelectionDAG & DAG) const1342 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
1343 SelectionDAG &DAG) const {
1344 SmallVector<SDValue, 8> Args;
1345
1346 EVT VT = Op.getValueType();
1347 if (VT == MVT::v4i16 || VT == MVT::v4f16) {
1348 SDLoc SL(Op);
1349 SDValue Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(0));
1350 SDValue Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(1));
1351
1352 SDValue BV = DAG.getBuildVector(MVT::v2i32, SL, { Lo, Hi });
1353 return DAG.getNode(ISD::BITCAST, SL, VT, BV);
1354 }
1355
1356 for (const SDUse &U : Op->ops())
1357 DAG.ExtractVectorElements(U.get(), Args);
1358
1359 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
1360 }
1361
LowerEXTRACT_SUBVECTOR(SDValue Op,SelectionDAG & DAG) const1362 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
1363 SelectionDAG &DAG) const {
1364
1365 SmallVector<SDValue, 8> Args;
1366 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1367 EVT VT = Op.getValueType();
1368 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
1369 VT.getVectorNumElements());
1370
1371 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
1372 }
1373
1374 /// Generate Min/Max node
combineFMinMaxLegacy(const SDLoc & DL,EVT VT,SDValue LHS,SDValue RHS,SDValue True,SDValue False,SDValue CC,DAGCombinerInfo & DCI) const1375 SDValue AMDGPUTargetLowering::combineFMinMaxLegacy(const SDLoc &DL, EVT VT,
1376 SDValue LHS, SDValue RHS,
1377 SDValue True, SDValue False,
1378 SDValue CC,
1379 DAGCombinerInfo &DCI) const {
1380 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
1381 return SDValue();
1382
1383 SelectionDAG &DAG = DCI.DAG;
1384 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
1385 switch (CCOpcode) {
1386 case ISD::SETOEQ:
1387 case ISD::SETONE:
1388 case ISD::SETUNE:
1389 case ISD::SETNE:
1390 case ISD::SETUEQ:
1391 case ISD::SETEQ:
1392 case ISD::SETFALSE:
1393 case ISD::SETFALSE2:
1394 case ISD::SETTRUE:
1395 case ISD::SETTRUE2:
1396 case ISD::SETUO:
1397 case ISD::SETO:
1398 break;
1399 case ISD::SETULE:
1400 case ISD::SETULT: {
1401 if (LHS == True)
1402 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1403 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1404 }
1405 case ISD::SETOLE:
1406 case ISD::SETOLT:
1407 case ISD::SETLE:
1408 case ISD::SETLT: {
1409 // Ordered. Assume ordered for undefined.
1410
1411 // Only do this after legalization to avoid interfering with other combines
1412 // which might occur.
1413 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1414 !DCI.isCalledByLegalizer())
1415 return SDValue();
1416
1417 // We need to permute the operands to get the correct NaN behavior. The
1418 // selected operand is the second one based on the failing compare with NaN,
1419 // so permute it based on the compare type the hardware uses.
1420 if (LHS == True)
1421 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1422 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1423 }
1424 case ISD::SETUGE:
1425 case ISD::SETUGT: {
1426 if (LHS == True)
1427 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1428 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1429 }
1430 case ISD::SETGT:
1431 case ISD::SETGE:
1432 case ISD::SETOGE:
1433 case ISD::SETOGT: {
1434 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1435 !DCI.isCalledByLegalizer())
1436 return SDValue();
1437
1438 if (LHS == True)
1439 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1440 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1441 }
1442 case ISD::SETCC_INVALID:
1443 llvm_unreachable("Invalid setcc condcode!");
1444 }
1445 return SDValue();
1446 }
1447
1448 std::pair<SDValue, SDValue>
split64BitValue(SDValue Op,SelectionDAG & DAG) const1449 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const {
1450 SDLoc SL(Op);
1451
1452 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1453
1454 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1455 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1456
1457 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1458 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1459
1460 return std::make_pair(Lo, Hi);
1461 }
1462
getLoHalf64(SDValue Op,SelectionDAG & DAG) const1463 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const {
1464 SDLoc SL(Op);
1465
1466 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1467 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1468 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1469 }
1470
getHiHalf64(SDValue Op,SelectionDAG & DAG) const1471 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const {
1472 SDLoc SL(Op);
1473
1474 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1475 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1476 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1477 }
1478
1479 // Split a vector type into two parts. The first part is a power of two vector.
1480 // The second part is whatever is left over, and is a scalar if it would
1481 // otherwise be a 1-vector.
1482 std::pair<EVT, EVT>
getSplitDestVTs(const EVT & VT,SelectionDAG & DAG) const1483 AMDGPUTargetLowering::getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const {
1484 EVT LoVT, HiVT;
1485 EVT EltVT = VT.getVectorElementType();
1486 unsigned NumElts = VT.getVectorNumElements();
1487 unsigned LoNumElts = PowerOf2Ceil((NumElts + 1) / 2);
1488 LoVT = EVT::getVectorVT(*DAG.getContext(), EltVT, LoNumElts);
1489 HiVT = NumElts - LoNumElts == 1
1490 ? EltVT
1491 : EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts - LoNumElts);
1492 return std::make_pair(LoVT, HiVT);
1493 }
1494
1495 // Split a vector value into two parts of types LoVT and HiVT. HiVT could be
1496 // scalar.
1497 std::pair<SDValue, SDValue>
splitVector(const SDValue & N,const SDLoc & DL,const EVT & LoVT,const EVT & HiVT,SelectionDAG & DAG) const1498 AMDGPUTargetLowering::splitVector(const SDValue &N, const SDLoc &DL,
1499 const EVT &LoVT, const EVT &HiVT,
1500 SelectionDAG &DAG) const {
1501 assert(LoVT.getVectorNumElements() +
1502 (HiVT.isVector() ? HiVT.getVectorNumElements() : 1) <=
1503 N.getValueType().getVectorNumElements() &&
1504 "More vector elements requested than available!");
1505 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
1506 DAG.getVectorIdxConstant(0, DL));
1507 SDValue Hi = DAG.getNode(
1508 HiVT.isVector() ? ISD::EXTRACT_SUBVECTOR : ISD::EXTRACT_VECTOR_ELT, DL,
1509 HiVT, N, DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), DL));
1510 return std::make_pair(Lo, Hi);
1511 }
1512
SplitVectorLoad(const SDValue Op,SelectionDAG & DAG) const1513 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
1514 SelectionDAG &DAG) const {
1515 LoadSDNode *Load = cast<LoadSDNode>(Op);
1516 EVT VT = Op.getValueType();
1517 SDLoc SL(Op);
1518
1519
1520 // If this is a 2 element vector, we really want to scalarize and not create
1521 // weird 1 element vectors.
1522 if (VT.getVectorNumElements() == 2) {
1523 SDValue Ops[2];
1524 std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG);
1525 return DAG.getMergeValues(Ops, SL);
1526 }
1527
1528 SDValue BasePtr = Load->getBasePtr();
1529 EVT MemVT = Load->getMemoryVT();
1530
1531 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1532
1533 EVT LoVT, HiVT;
1534 EVT LoMemVT, HiMemVT;
1535 SDValue Lo, Hi;
1536
1537 std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1538 std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1539 std::tie(Lo, Hi) = splitVector(Op, SL, LoVT, HiVT, DAG);
1540
1541 unsigned Size = LoMemVT.getStoreSize();
1542 unsigned BaseAlign = Load->getAlignment();
1543 unsigned HiAlign = MinAlign(BaseAlign, Size);
1544
1545 SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
1546 Load->getChain(), BasePtr, SrcValue, LoMemVT,
1547 BaseAlign, Load->getMemOperand()->getFlags());
1548 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Size));
1549 SDValue HiLoad =
1550 DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(),
1551 HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()),
1552 HiMemVT, HiAlign, Load->getMemOperand()->getFlags());
1553
1554 SDValue Join;
1555 if (LoVT == HiVT) {
1556 // This is the case that the vector is power of two so was evenly split.
1557 Join = DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad);
1558 } else {
1559 Join = DAG.getNode(ISD::INSERT_SUBVECTOR, SL, VT, DAG.getUNDEF(VT), LoLoad,
1560 DAG.getVectorIdxConstant(0, SL));
1561 Join = DAG.getNode(
1562 HiVT.isVector() ? ISD::INSERT_SUBVECTOR : ISD::INSERT_VECTOR_ELT, SL,
1563 VT, Join, HiLoad,
1564 DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), SL));
1565 }
1566
1567 SDValue Ops[] = {Join, DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
1568 LoLoad.getValue(1), HiLoad.getValue(1))};
1569
1570 return DAG.getMergeValues(Ops, SL);
1571 }
1572
WidenOrSplitVectorLoad(SDValue Op,SelectionDAG & DAG) const1573 SDValue AMDGPUTargetLowering::WidenOrSplitVectorLoad(SDValue Op,
1574 SelectionDAG &DAG) const {
1575 LoadSDNode *Load = cast<LoadSDNode>(Op);
1576 EVT VT = Op.getValueType();
1577 SDValue BasePtr = Load->getBasePtr();
1578 EVT MemVT = Load->getMemoryVT();
1579 SDLoc SL(Op);
1580 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1581 unsigned BaseAlign = Load->getAlignment();
1582 unsigned NumElements = MemVT.getVectorNumElements();
1583
1584 // Widen from vec3 to vec4 when the load is at least 8-byte aligned
1585 // or 16-byte fully dereferenceable. Otherwise, split the vector load.
1586 if (NumElements != 3 ||
1587 (BaseAlign < 8 &&
1588 !SrcValue.isDereferenceable(16, *DAG.getContext(), DAG.getDataLayout())))
1589 return SplitVectorLoad(Op, DAG);
1590
1591 assert(NumElements == 3);
1592
1593 EVT WideVT =
1594 EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4);
1595 EVT WideMemVT =
1596 EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), 4);
1597 SDValue WideLoad = DAG.getExtLoad(
1598 Load->getExtensionType(), SL, WideVT, Load->getChain(), BasePtr, SrcValue,
1599 WideMemVT, BaseAlign, Load->getMemOperand()->getFlags());
1600 return DAG.getMergeValues(
1601 {DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, VT, WideLoad,
1602 DAG.getVectorIdxConstant(0, SL)),
1603 WideLoad.getValue(1)},
1604 SL);
1605 }
1606
SplitVectorStore(SDValue Op,SelectionDAG & DAG) const1607 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1608 SelectionDAG &DAG) const {
1609 StoreSDNode *Store = cast<StoreSDNode>(Op);
1610 SDValue Val = Store->getValue();
1611 EVT VT = Val.getValueType();
1612
1613 // If this is a 2 element vector, we really want to scalarize and not create
1614 // weird 1 element vectors.
1615 if (VT.getVectorNumElements() == 2)
1616 return scalarizeVectorStore(Store, DAG);
1617
1618 EVT MemVT = Store->getMemoryVT();
1619 SDValue Chain = Store->getChain();
1620 SDValue BasePtr = Store->getBasePtr();
1621 SDLoc SL(Op);
1622
1623 EVT LoVT, HiVT;
1624 EVT LoMemVT, HiMemVT;
1625 SDValue Lo, Hi;
1626
1627 std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1628 std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1629 std::tie(Lo, Hi) = splitVector(Val, SL, LoVT, HiVT, DAG);
1630
1631 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, LoMemVT.getStoreSize());
1632
1633 const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo();
1634 unsigned BaseAlign = Store->getAlignment();
1635 unsigned Size = LoMemVT.getStoreSize();
1636 unsigned HiAlign = MinAlign(BaseAlign, Size);
1637
1638 SDValue LoStore =
1639 DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign,
1640 Store->getMemOperand()->getFlags());
1641 SDValue HiStore =
1642 DAG.getTruncStore(Chain, SL, Hi, HiPtr, SrcValue.getWithOffset(Size),
1643 HiMemVT, HiAlign, Store->getMemOperand()->getFlags());
1644
1645 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
1646 }
1647
1648 // This is a shortcut for integer division because we have fast i32<->f32
1649 // conversions, and fast f32 reciprocal instructions. The fractional part of a
1650 // float is enough to accurately represent up to a 24-bit signed integer.
LowerDIVREM24(SDValue Op,SelectionDAG & DAG,bool Sign) const1651 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG,
1652 bool Sign) const {
1653 SDLoc DL(Op);
1654 EVT VT = Op.getValueType();
1655 SDValue LHS = Op.getOperand(0);
1656 SDValue RHS = Op.getOperand(1);
1657 MVT IntVT = MVT::i32;
1658 MVT FltVT = MVT::f32;
1659
1660 unsigned LHSSignBits = DAG.ComputeNumSignBits(LHS);
1661 if (LHSSignBits < 9)
1662 return SDValue();
1663
1664 unsigned RHSSignBits = DAG.ComputeNumSignBits(RHS);
1665 if (RHSSignBits < 9)
1666 return SDValue();
1667
1668 unsigned BitSize = VT.getSizeInBits();
1669 unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
1670 unsigned DivBits = BitSize - SignBits;
1671 if (Sign)
1672 ++DivBits;
1673
1674 ISD::NodeType ToFp = Sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
1675 ISD::NodeType ToInt = Sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
1676
1677 SDValue jq = DAG.getConstant(1, DL, IntVT);
1678
1679 if (Sign) {
1680 // char|short jq = ia ^ ib;
1681 jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
1682
1683 // jq = jq >> (bitsize - 2)
1684 jq = DAG.getNode(ISD::SRA, DL, VT, jq,
1685 DAG.getConstant(BitSize - 2, DL, VT));
1686
1687 // jq = jq | 0x1
1688 jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT));
1689 }
1690
1691 // int ia = (int)LHS;
1692 SDValue ia = LHS;
1693
1694 // int ib, (int)RHS;
1695 SDValue ib = RHS;
1696
1697 // float fa = (float)ia;
1698 SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia);
1699
1700 // float fb = (float)ib;
1701 SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib);
1702
1703 SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
1704 fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
1705
1706 // fq = trunc(fq);
1707 fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq);
1708
1709 // float fqneg = -fq;
1710 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
1711
1712 MachineFunction &MF = DAG.getMachineFunction();
1713 const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
1714
1715 // float fr = mad(fqneg, fb, fa);
1716 unsigned OpCode = !Subtarget->hasMadMacF32Insts() ?
1717 (unsigned)ISD::FMA :
1718 !MFI->getMode().allFP32Denormals() ?
1719 (unsigned)ISD::FMAD :
1720 (unsigned)AMDGPUISD::FMAD_FTZ;
1721 SDValue fr = DAG.getNode(OpCode, DL, FltVT, fqneg, fb, fa);
1722
1723 // int iq = (int)fq;
1724 SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq);
1725
1726 // fr = fabs(fr);
1727 fr = DAG.getNode(ISD::FABS, DL, FltVT, fr);
1728
1729 // fb = fabs(fb);
1730 fb = DAG.getNode(ISD::FABS, DL, FltVT, fb);
1731
1732 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
1733
1734 // int cv = fr >= fb;
1735 SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
1736
1737 // jq = (cv ? jq : 0);
1738 jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT));
1739
1740 // dst = iq + jq;
1741 SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq);
1742
1743 // Rem needs compensation, it's easier to recompute it
1744 SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS);
1745 Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem);
1746
1747 // Truncate to number of bits this divide really is.
1748 if (Sign) {
1749 SDValue InRegSize
1750 = DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), DivBits));
1751 Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize);
1752 Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize);
1753 } else {
1754 SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT);
1755 Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask);
1756 Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask);
1757 }
1758
1759 return DAG.getMergeValues({ Div, Rem }, DL);
1760 }
1761
LowerUDIVREM64(SDValue Op,SelectionDAG & DAG,SmallVectorImpl<SDValue> & Results) const1762 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
1763 SelectionDAG &DAG,
1764 SmallVectorImpl<SDValue> &Results) const {
1765 SDLoc DL(Op);
1766 EVT VT = Op.getValueType();
1767
1768 assert(VT == MVT::i64 && "LowerUDIVREM64 expects an i64");
1769
1770 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1771
1772 SDValue One = DAG.getConstant(1, DL, HalfVT);
1773 SDValue Zero = DAG.getConstant(0, DL, HalfVT);
1774
1775 //HiLo split
1776 SDValue LHS = Op.getOperand(0);
1777 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
1778 SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, One);
1779
1780 SDValue RHS = Op.getOperand(1);
1781 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
1782 SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, One);
1783
1784 if (DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) &&
1785 DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) {
1786
1787 SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1788 LHS_Lo, RHS_Lo);
1789
1790 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), Zero});
1791 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), Zero});
1792
1793 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV));
1794 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM));
1795 return;
1796 }
1797
1798 if (isTypeLegal(MVT::i64)) {
1799 MachineFunction &MF = DAG.getMachineFunction();
1800 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1801
1802 // Compute denominator reciprocal.
1803 unsigned FMAD = !Subtarget->hasMadMacF32Insts() ?
1804 (unsigned)ISD::FMA :
1805 !MFI->getMode().allFP32Denormals() ?
1806 (unsigned)ISD::FMAD :
1807 (unsigned)AMDGPUISD::FMAD_FTZ;
1808
1809 SDValue Cvt_Lo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Lo);
1810 SDValue Cvt_Hi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Hi);
1811 SDValue Mad1 = DAG.getNode(FMAD, DL, MVT::f32, Cvt_Hi,
1812 DAG.getConstantFP(APInt(32, 0x4f800000).bitsToFloat(), DL, MVT::f32),
1813 Cvt_Lo);
1814 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, DL, MVT::f32, Mad1);
1815 SDValue Mul1 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Rcp,
1816 DAG.getConstantFP(APInt(32, 0x5f7ffffc).bitsToFloat(), DL, MVT::f32));
1817 SDValue Mul2 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Mul1,
1818 DAG.getConstantFP(APInt(32, 0x2f800000).bitsToFloat(), DL, MVT::f32));
1819 SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, MVT::f32, Mul2);
1820 SDValue Mad2 = DAG.getNode(FMAD, DL, MVT::f32, Trunc,
1821 DAG.getConstantFP(APInt(32, 0xcf800000).bitsToFloat(), DL, MVT::f32),
1822 Mul1);
1823 SDValue Rcp_Lo = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Mad2);
1824 SDValue Rcp_Hi = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Trunc);
1825 SDValue Rcp64 = DAG.getBitcast(VT,
1826 DAG.getBuildVector(MVT::v2i32, DL, {Rcp_Lo, Rcp_Hi}));
1827
1828 SDValue Zero64 = DAG.getConstant(0, DL, VT);
1829 SDValue One64 = DAG.getConstant(1, DL, VT);
1830 SDValue Zero1 = DAG.getConstant(0, DL, MVT::i1);
1831 SDVTList HalfCarryVT = DAG.getVTList(HalfVT, MVT::i1);
1832
1833 SDValue Neg_RHS = DAG.getNode(ISD::SUB, DL, VT, Zero64, RHS);
1834 SDValue Mullo1 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Rcp64);
1835 SDValue Mulhi1 = DAG.getNode(ISD::MULHU, DL, VT, Rcp64, Mullo1);
1836 SDValue Mulhi1_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1,
1837 Zero);
1838 SDValue Mulhi1_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1,
1839 One);
1840
1841 SDValue Add1_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Lo,
1842 Mulhi1_Lo, Zero1);
1843 SDValue Add1_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Hi,
1844 Mulhi1_Hi, Add1_Lo.getValue(1));
1845 SDValue Add1_HiNc = DAG.getNode(ISD::ADD, DL, HalfVT, Rcp_Hi, Mulhi1_Hi);
1846 SDValue Add1 = DAG.getBitcast(VT,
1847 DAG.getBuildVector(MVT::v2i32, DL, {Add1_Lo, Add1_Hi}));
1848
1849 SDValue Mullo2 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Add1);
1850 SDValue Mulhi2 = DAG.getNode(ISD::MULHU, DL, VT, Add1, Mullo2);
1851 SDValue Mulhi2_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2,
1852 Zero);
1853 SDValue Mulhi2_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2,
1854 One);
1855
1856 SDValue Add2_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_Lo,
1857 Mulhi2_Lo, Zero1);
1858 SDValue Add2_HiC = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_HiNc,
1859 Mulhi2_Hi, Add1_Lo.getValue(1));
1860 SDValue Add2_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add2_HiC,
1861 Zero, Add2_Lo.getValue(1));
1862 SDValue Add2 = DAG.getBitcast(VT,
1863 DAG.getBuildVector(MVT::v2i32, DL, {Add2_Lo, Add2_Hi}));
1864 SDValue Mulhi3 = DAG.getNode(ISD::MULHU, DL, VT, LHS, Add2);
1865
1866 SDValue Mul3 = DAG.getNode(ISD::MUL, DL, VT, RHS, Mulhi3);
1867
1868 SDValue Mul3_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, Zero);
1869 SDValue Mul3_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, One);
1870 SDValue Sub1_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Lo,
1871 Mul3_Lo, Zero1);
1872 SDValue Sub1_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Hi,
1873 Mul3_Hi, Sub1_Lo.getValue(1));
1874 SDValue Sub1_Mi = DAG.getNode(ISD::SUB, DL, HalfVT, LHS_Hi, Mul3_Hi);
1875 SDValue Sub1 = DAG.getBitcast(VT,
1876 DAG.getBuildVector(MVT::v2i32, DL, {Sub1_Lo, Sub1_Hi}));
1877
1878 SDValue MinusOne = DAG.getConstant(0xffffffffu, DL, HalfVT);
1879 SDValue C1 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, MinusOne, Zero,
1880 ISD::SETUGE);
1881 SDValue C2 = DAG.getSelectCC(DL, Sub1_Lo, RHS_Lo, MinusOne, Zero,
1882 ISD::SETUGE);
1883 SDValue C3 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, C2, C1, ISD::SETEQ);
1884
1885 // TODO: Here and below portions of the code can be enclosed into if/endif.
1886 // Currently control flow is unconditional and we have 4 selects after
1887 // potential endif to substitute PHIs.
1888
1889 // if C3 != 0 ...
1890 SDValue Sub2_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Lo,
1891 RHS_Lo, Zero1);
1892 SDValue Sub2_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Mi,
1893 RHS_Hi, Sub1_Lo.getValue(1));
1894 SDValue Sub2_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi,
1895 Zero, Sub2_Lo.getValue(1));
1896 SDValue Sub2 = DAG.getBitcast(VT,
1897 DAG.getBuildVector(MVT::v2i32, DL, {Sub2_Lo, Sub2_Hi}));
1898
1899 SDValue Add3 = DAG.getNode(ISD::ADD, DL, VT, Mulhi3, One64);
1900
1901 SDValue C4 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, MinusOne, Zero,
1902 ISD::SETUGE);
1903 SDValue C5 = DAG.getSelectCC(DL, Sub2_Lo, RHS_Lo, MinusOne, Zero,
1904 ISD::SETUGE);
1905 SDValue C6 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, C5, C4, ISD::SETEQ);
1906
1907 // if (C6 != 0)
1908 SDValue Add4 = DAG.getNode(ISD::ADD, DL, VT, Add3, One64);
1909
1910 SDValue Sub3_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Lo,
1911 RHS_Lo, Zero1);
1912 SDValue Sub3_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi,
1913 RHS_Hi, Sub2_Lo.getValue(1));
1914 SDValue Sub3_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub3_Mi,
1915 Zero, Sub3_Lo.getValue(1));
1916 SDValue Sub3 = DAG.getBitcast(VT,
1917 DAG.getBuildVector(MVT::v2i32, DL, {Sub3_Lo, Sub3_Hi}));
1918
1919 // endif C6
1920 // endif C3
1921
1922 SDValue Sel1 = DAG.getSelectCC(DL, C6, Zero, Add4, Add3, ISD::SETNE);
1923 SDValue Div = DAG.getSelectCC(DL, C3, Zero, Sel1, Mulhi3, ISD::SETNE);
1924
1925 SDValue Sel2 = DAG.getSelectCC(DL, C6, Zero, Sub3, Sub2, ISD::SETNE);
1926 SDValue Rem = DAG.getSelectCC(DL, C3, Zero, Sel2, Sub1, ISD::SETNE);
1927
1928 Results.push_back(Div);
1929 Results.push_back(Rem);
1930
1931 return;
1932 }
1933
1934 // r600 expandion.
1935 // Get Speculative values
1936 SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
1937 SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
1938
1939 SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, Zero, REM_Part, LHS_Hi, ISD::SETEQ);
1940 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, Zero});
1941 REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM);
1942
1943 SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, Zero, DIV_Part, Zero, ISD::SETEQ);
1944 SDValue DIV_Lo = Zero;
1945
1946 const unsigned halfBitWidth = HalfVT.getSizeInBits();
1947
1948 for (unsigned i = 0; i < halfBitWidth; ++i) {
1949 const unsigned bitPos = halfBitWidth - i - 1;
1950 SDValue POS = DAG.getConstant(bitPos, DL, HalfVT);
1951 // Get value of high bit
1952 SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
1953 HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, One);
1954 HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit);
1955
1956 // Shift
1957 REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT));
1958 // Add LHS high bit
1959 REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
1960
1961 SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT);
1962 SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, Zero, ISD::SETUGE);
1963
1964 DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
1965
1966 // Update REM
1967 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
1968 REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
1969 }
1970
1971 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi});
1972 DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV);
1973 Results.push_back(DIV);
1974 Results.push_back(REM);
1975 }
1976
LowerUDIVREM(SDValue Op,SelectionDAG & DAG) const1977 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
1978 SelectionDAG &DAG) const {
1979 SDLoc DL(Op);
1980 EVT VT = Op.getValueType();
1981
1982 if (VT == MVT::i64) {
1983 SmallVector<SDValue, 2> Results;
1984 LowerUDIVREM64(Op, DAG, Results);
1985 return DAG.getMergeValues(Results, DL);
1986 }
1987
1988 if (VT == MVT::i32) {
1989 if (SDValue Res = LowerDIVREM24(Op, DAG, false))
1990 return Res;
1991 }
1992
1993 SDValue X = Op.getOperand(0);
1994 SDValue Y = Op.getOperand(1);
1995
1996 // See AMDGPUCodeGenPrepare::expandDivRem32 for a description of the
1997 // algorithm used here.
1998
1999 // Initial estimate of inv(y).
2000 SDValue Z = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Y);
2001
2002 // One round of UNR.
2003 SDValue NegY = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Y);
2004 SDValue NegYZ = DAG.getNode(ISD::MUL, DL, VT, NegY, Z);
2005 Z = DAG.getNode(ISD::ADD, DL, VT, Z,
2006 DAG.getNode(ISD::MULHU, DL, VT, Z, NegYZ));
2007
2008 // Quotient/remainder estimate.
2009 SDValue Q = DAG.getNode(ISD::MULHU, DL, VT, X, Z);
2010 SDValue R =
2011 DAG.getNode(ISD::SUB, DL, VT, X, DAG.getNode(ISD::MUL, DL, VT, Q, Y));
2012
2013 // First quotient/remainder refinement.
2014 EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2015 SDValue One = DAG.getConstant(1, DL, VT);
2016 SDValue Cond = DAG.getSetCC(DL, CCVT, R, Y, ISD::SETUGE);
2017 Q = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2018 DAG.getNode(ISD::ADD, DL, VT, Q, One), Q);
2019 R = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2020 DAG.getNode(ISD::SUB, DL, VT, R, Y), R);
2021
2022 // Second quotient/remainder refinement.
2023 Cond = DAG.getSetCC(DL, CCVT, R, Y, ISD::SETUGE);
2024 Q = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2025 DAG.getNode(ISD::ADD, DL, VT, Q, One), Q);
2026 R = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2027 DAG.getNode(ISD::SUB, DL, VT, R, Y), R);
2028
2029 return DAG.getMergeValues({Q, R}, DL);
2030 }
2031
LowerSDIVREM(SDValue Op,SelectionDAG & DAG) const2032 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
2033 SelectionDAG &DAG) const {
2034 SDLoc DL(Op);
2035 EVT VT = Op.getValueType();
2036
2037 SDValue LHS = Op.getOperand(0);
2038 SDValue RHS = Op.getOperand(1);
2039
2040 SDValue Zero = DAG.getConstant(0, DL, VT);
2041 SDValue NegOne = DAG.getConstant(-1, DL, VT);
2042
2043 if (VT == MVT::i32) {
2044 if (SDValue Res = LowerDIVREM24(Op, DAG, true))
2045 return Res;
2046 }
2047
2048 if (VT == MVT::i64 &&
2049 DAG.ComputeNumSignBits(LHS) > 32 &&
2050 DAG.ComputeNumSignBits(RHS) > 32) {
2051 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
2052
2053 //HiLo split
2054 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
2055 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
2056 SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
2057 LHS_Lo, RHS_Lo);
2058 SDValue Res[2] = {
2059 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)),
2060 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1))
2061 };
2062 return DAG.getMergeValues(Res, DL);
2063 }
2064
2065 SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
2066 SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
2067 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
2068 SDValue RSign = LHSign; // Remainder sign is the same as LHS
2069
2070 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
2071 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
2072
2073 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
2074 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
2075
2076 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
2077 SDValue Rem = Div.getValue(1);
2078
2079 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
2080 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
2081
2082 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
2083 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
2084
2085 SDValue Res[2] = {
2086 Div,
2087 Rem
2088 };
2089 return DAG.getMergeValues(Res, DL);
2090 }
2091
2092 // (frem x, y) -> (fma (fneg (ftrunc (fdiv x, y))), y, x)
LowerFREM(SDValue Op,SelectionDAG & DAG) const2093 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const {
2094 SDLoc SL(Op);
2095 EVT VT = Op.getValueType();
2096 auto Flags = Op->getFlags();
2097 SDValue X = Op.getOperand(0);
2098 SDValue Y = Op.getOperand(1);
2099
2100 SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y, Flags);
2101 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, VT, Div, Flags);
2102 SDValue Neg = DAG.getNode(ISD::FNEG, SL, VT, Trunc, Flags);
2103 // TODO: For f32 use FMAD instead if !hasFastFMA32?
2104 return DAG.getNode(ISD::FMA, SL, VT, Neg, Y, X, Flags);
2105 }
2106
LowerFCEIL(SDValue Op,SelectionDAG & DAG) const2107 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
2108 SDLoc SL(Op);
2109 SDValue Src = Op.getOperand(0);
2110
2111 // result = trunc(src)
2112 // if (src > 0.0 && src != result)
2113 // result += 1.0
2114
2115 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2116
2117 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2118 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
2119
2120 EVT SetCCVT =
2121 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2122
2123 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
2124 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2125 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2126
2127 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
2128 // TODO: Should this propagate fast-math-flags?
2129 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2130 }
2131
extractF64Exponent(SDValue Hi,const SDLoc & SL,SelectionDAG & DAG)2132 static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL,
2133 SelectionDAG &DAG) {
2134 const unsigned FractBits = 52;
2135 const unsigned ExpBits = 11;
2136
2137 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
2138 Hi,
2139 DAG.getConstant(FractBits - 32, SL, MVT::i32),
2140 DAG.getConstant(ExpBits, SL, MVT::i32));
2141 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
2142 DAG.getConstant(1023, SL, MVT::i32));
2143
2144 return Exp;
2145 }
2146
LowerFTRUNC(SDValue Op,SelectionDAG & DAG) const2147 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
2148 SDLoc SL(Op);
2149 SDValue Src = Op.getOperand(0);
2150
2151 assert(Op.getValueType() == MVT::f64);
2152
2153 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2154 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2155
2156 SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2157
2158 // Extract the upper half, since this is where we will find the sign and
2159 // exponent.
2160 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One);
2161
2162 SDValue Exp = extractF64Exponent(Hi, SL, DAG);
2163
2164 const unsigned FractBits = 52;
2165
2166 // Extract the sign bit.
2167 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32);
2168 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
2169
2170 // Extend back to 64-bits.
2171 SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit});
2172 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
2173
2174 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
2175 const SDValue FractMask
2176 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
2177
2178 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
2179 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
2180 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
2181
2182 EVT SetCCVT =
2183 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
2184
2185 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32);
2186
2187 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
2188 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
2189
2190 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
2191 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
2192
2193 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
2194 }
2195
LowerFRINT(SDValue Op,SelectionDAG & DAG) const2196 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
2197 SDLoc SL(Op);
2198 SDValue Src = Op.getOperand(0);
2199
2200 assert(Op.getValueType() == MVT::f64);
2201
2202 APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
2203 SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64);
2204 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
2205
2206 // TODO: Should this propagate fast-math-flags?
2207
2208 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
2209 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
2210
2211 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
2212
2213 APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
2214 SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64);
2215
2216 EVT SetCCVT =
2217 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2218 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
2219
2220 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
2221 }
2222
LowerFNEARBYINT(SDValue Op,SelectionDAG & DAG) const2223 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
2224 // FNEARBYINT and FRINT are the same, except in their handling of FP
2225 // exceptions. Those aren't really meaningful for us, and OpenCL only has
2226 // rint, so just treat them as equivalent.
2227 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
2228 }
2229
2230 // XXX - May require not supporting f32 denormals?
2231
2232 // Don't handle v2f16. The extra instructions to scalarize and repack around the
2233 // compare and vselect end up producing worse code than scalarizing the whole
2234 // operation.
LowerFROUND(SDValue Op,SelectionDAG & DAG) const2235 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
2236 SDLoc SL(Op);
2237 SDValue X = Op.getOperand(0);
2238 EVT VT = Op.getValueType();
2239
2240 SDValue T = DAG.getNode(ISD::FTRUNC, SL, VT, X);
2241
2242 // TODO: Should this propagate fast-math-flags?
2243
2244 SDValue Diff = DAG.getNode(ISD::FSUB, SL, VT, X, T);
2245
2246 SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, VT, Diff);
2247
2248 const SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
2249 const SDValue One = DAG.getConstantFP(1.0, SL, VT);
2250 const SDValue Half = DAG.getConstantFP(0.5, SL, VT);
2251
2252 SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, VT, One, X);
2253
2254 EVT SetCCVT =
2255 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2256
2257 SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE);
2258
2259 SDValue Sel = DAG.getNode(ISD::SELECT, SL, VT, Cmp, SignOne, Zero);
2260
2261 return DAG.getNode(ISD::FADD, SL, VT, T, Sel);
2262 }
2263
LowerFFLOOR(SDValue Op,SelectionDAG & DAG) const2264 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
2265 SDLoc SL(Op);
2266 SDValue Src = Op.getOperand(0);
2267
2268 // result = trunc(src);
2269 // if (src < 0.0 && src != result)
2270 // result += -1.0.
2271
2272 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2273
2274 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2275 const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64);
2276
2277 EVT SetCCVT =
2278 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2279
2280 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
2281 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2282 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2283
2284 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
2285 // TODO: Should this propagate fast-math-flags?
2286 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2287 }
2288
LowerFLOG(SDValue Op,SelectionDAG & DAG,double Log2BaseInverted) const2289 SDValue AMDGPUTargetLowering::LowerFLOG(SDValue Op, SelectionDAG &DAG,
2290 double Log2BaseInverted) const {
2291 EVT VT = Op.getValueType();
2292
2293 SDLoc SL(Op);
2294 SDValue Operand = Op.getOperand(0);
2295 SDValue Log2Operand = DAG.getNode(ISD::FLOG2, SL, VT, Operand);
2296 SDValue Log2BaseInvertedOperand = DAG.getConstantFP(Log2BaseInverted, SL, VT);
2297
2298 return DAG.getNode(ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand);
2299 }
2300
2301 // exp2(M_LOG2E_F * f);
lowerFEXP(SDValue Op,SelectionDAG & DAG) const2302 SDValue AMDGPUTargetLowering::lowerFEXP(SDValue Op, SelectionDAG &DAG) const {
2303 EVT VT = Op.getValueType();
2304 SDLoc SL(Op);
2305 SDValue Src = Op.getOperand(0);
2306
2307 const SDValue K = DAG.getConstantFP(numbers::log2e, SL, VT);
2308 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Src, K, Op->getFlags());
2309 return DAG.getNode(ISD::FEXP2, SL, VT, Mul, Op->getFlags());
2310 }
2311
isCtlzOpc(unsigned Opc)2312 static bool isCtlzOpc(unsigned Opc) {
2313 return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF;
2314 }
2315
isCttzOpc(unsigned Opc)2316 static bool isCttzOpc(unsigned Opc) {
2317 return Opc == ISD::CTTZ || Opc == ISD::CTTZ_ZERO_UNDEF;
2318 }
2319
LowerCTLZ_CTTZ(SDValue Op,SelectionDAG & DAG) const2320 SDValue AMDGPUTargetLowering::LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const {
2321 SDLoc SL(Op);
2322 SDValue Src = Op.getOperand(0);
2323 bool ZeroUndef = Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
2324 Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF;
2325
2326 unsigned ISDOpc, NewOpc;
2327 if (isCtlzOpc(Op.getOpcode())) {
2328 ISDOpc = ISD::CTLZ_ZERO_UNDEF;
2329 NewOpc = AMDGPUISD::FFBH_U32;
2330 } else if (isCttzOpc(Op.getOpcode())) {
2331 ISDOpc = ISD::CTTZ_ZERO_UNDEF;
2332 NewOpc = AMDGPUISD::FFBL_B32;
2333 } else
2334 llvm_unreachable("Unexpected OPCode!!!");
2335
2336
2337 if (ZeroUndef && Src.getValueType() == MVT::i32)
2338 return DAG.getNode(NewOpc, SL, MVT::i32, Src);
2339
2340 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2341
2342 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2343 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2344
2345 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
2346 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
2347
2348 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
2349 *DAG.getContext(), MVT::i32);
2350
2351 SDValue HiOrLo = isCtlzOpc(Op.getOpcode()) ? Hi : Lo;
2352 SDValue Hi0orLo0 = DAG.getSetCC(SL, SetCCVT, HiOrLo, Zero, ISD::SETEQ);
2353
2354 SDValue OprLo = DAG.getNode(ISDOpc, SL, MVT::i32, Lo);
2355 SDValue OprHi = DAG.getNode(ISDOpc, SL, MVT::i32, Hi);
2356
2357 const SDValue Bits32 = DAG.getConstant(32, SL, MVT::i32);
2358 SDValue Add, NewOpr;
2359 if (isCtlzOpc(Op.getOpcode())) {
2360 Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprLo, Bits32);
2361 // ctlz(x) = hi_32(x) == 0 ? ctlz(lo_32(x)) + 32 : ctlz(hi_32(x))
2362 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprHi);
2363 } else {
2364 Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprHi, Bits32);
2365 // cttz(x) = lo_32(x) == 0 ? cttz(hi_32(x)) + 32 : cttz(lo_32(x))
2366 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprLo);
2367 }
2368
2369 if (!ZeroUndef) {
2370 // Test if the full 64-bit input is zero.
2371
2372 // FIXME: DAG combines turn what should be an s_and_b64 into a v_or_b32,
2373 // which we probably don't want.
2374 SDValue LoOrHi = isCtlzOpc(Op.getOpcode()) ? Lo : Hi;
2375 SDValue Lo0OrHi0 = DAG.getSetCC(SL, SetCCVT, LoOrHi, Zero, ISD::SETEQ);
2376 SDValue SrcIsZero = DAG.getNode(ISD::AND, SL, SetCCVT, Lo0OrHi0, Hi0orLo0);
2377
2378 // TODO: If i64 setcc is half rate, it can result in 1 fewer instruction
2379 // with the same cycles, otherwise it is slower.
2380 // SDValue SrcIsZero = DAG.getSetCC(SL, SetCCVT, Src,
2381 // DAG.getConstant(0, SL, MVT::i64), ISD::SETEQ);
2382
2383 const SDValue Bits32 = DAG.getConstant(64, SL, MVT::i32);
2384
2385 // The instruction returns -1 for 0 input, but the defined intrinsic
2386 // behavior is to return the number of bits.
2387 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32,
2388 SrcIsZero, Bits32, NewOpr);
2389 }
2390
2391 return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewOpr);
2392 }
2393
LowerINT_TO_FP32(SDValue Op,SelectionDAG & DAG,bool Signed) const2394 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG,
2395 bool Signed) const {
2396 // Unsigned
2397 // cul2f(ulong u)
2398 //{
2399 // uint lz = clz(u);
2400 // uint e = (u != 0) ? 127U + 63U - lz : 0;
2401 // u = (u << lz) & 0x7fffffffffffffffUL;
2402 // ulong t = u & 0xffffffffffUL;
2403 // uint v = (e << 23) | (uint)(u >> 40);
2404 // uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U);
2405 // return as_float(v + r);
2406 //}
2407 // Signed
2408 // cl2f(long l)
2409 //{
2410 // long s = l >> 63;
2411 // float r = cul2f((l + s) ^ s);
2412 // return s ? -r : r;
2413 //}
2414
2415 SDLoc SL(Op);
2416 SDValue Src = Op.getOperand(0);
2417 SDValue L = Src;
2418
2419 SDValue S;
2420 if (Signed) {
2421 const SDValue SignBit = DAG.getConstant(63, SL, MVT::i64);
2422 S = DAG.getNode(ISD::SRA, SL, MVT::i64, L, SignBit);
2423
2424 SDValue LPlusS = DAG.getNode(ISD::ADD, SL, MVT::i64, L, S);
2425 L = DAG.getNode(ISD::XOR, SL, MVT::i64, LPlusS, S);
2426 }
2427
2428 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
2429 *DAG.getContext(), MVT::f32);
2430
2431
2432 SDValue ZeroI32 = DAG.getConstant(0, SL, MVT::i32);
2433 SDValue ZeroI64 = DAG.getConstant(0, SL, MVT::i64);
2434 SDValue LZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i64, L);
2435 LZ = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LZ);
2436
2437 SDValue K = DAG.getConstant(127U + 63U, SL, MVT::i32);
2438 SDValue E = DAG.getSelect(SL, MVT::i32,
2439 DAG.getSetCC(SL, SetCCVT, L, ZeroI64, ISD::SETNE),
2440 DAG.getNode(ISD::SUB, SL, MVT::i32, K, LZ),
2441 ZeroI32);
2442
2443 SDValue U = DAG.getNode(ISD::AND, SL, MVT::i64,
2444 DAG.getNode(ISD::SHL, SL, MVT::i64, L, LZ),
2445 DAG.getConstant((-1ULL) >> 1, SL, MVT::i64));
2446
2447 SDValue T = DAG.getNode(ISD::AND, SL, MVT::i64, U,
2448 DAG.getConstant(0xffffffffffULL, SL, MVT::i64));
2449
2450 SDValue UShl = DAG.getNode(ISD::SRL, SL, MVT::i64,
2451 U, DAG.getConstant(40, SL, MVT::i64));
2452
2453 SDValue V = DAG.getNode(ISD::OR, SL, MVT::i32,
2454 DAG.getNode(ISD::SHL, SL, MVT::i32, E, DAG.getConstant(23, SL, MVT::i32)),
2455 DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, UShl));
2456
2457 SDValue C = DAG.getConstant(0x8000000000ULL, SL, MVT::i64);
2458 SDValue RCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETUGT);
2459 SDValue TCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETEQ);
2460
2461 SDValue One = DAG.getConstant(1, SL, MVT::i32);
2462
2463 SDValue VTrunc1 = DAG.getNode(ISD::AND, SL, MVT::i32, V, One);
2464
2465 SDValue R = DAG.getSelect(SL, MVT::i32,
2466 RCmp,
2467 One,
2468 DAG.getSelect(SL, MVT::i32, TCmp, VTrunc1, ZeroI32));
2469 R = DAG.getNode(ISD::ADD, SL, MVT::i32, V, R);
2470 R = DAG.getNode(ISD::BITCAST, SL, MVT::f32, R);
2471
2472 if (!Signed)
2473 return R;
2474
2475 SDValue RNeg = DAG.getNode(ISD::FNEG, SL, MVT::f32, R);
2476 return DAG.getSelect(SL, MVT::f32, DAG.getSExtOrTrunc(S, SL, SetCCVT), RNeg, R);
2477 }
2478
LowerINT_TO_FP64(SDValue Op,SelectionDAG & DAG,bool Signed) const2479 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
2480 bool Signed) const {
2481 SDLoc SL(Op);
2482 SDValue Src = Op.getOperand(0);
2483
2484 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2485
2486 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
2487 DAG.getConstant(0, SL, MVT::i32));
2488 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
2489 DAG.getConstant(1, SL, MVT::i32));
2490
2491 SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP,
2492 SL, MVT::f64, Hi);
2493
2494 SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo);
2495
2496 SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi,
2497 DAG.getConstant(32, SL, MVT::i32));
2498 // TODO: Should this propagate fast-math-flags?
2499 return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo);
2500 }
2501
LowerUINT_TO_FP(SDValue Op,SelectionDAG & DAG) const2502 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
2503 SelectionDAG &DAG) const {
2504 // TODO: Factor out code common with LowerSINT_TO_FP.
2505 EVT DestVT = Op.getValueType();
2506 SDValue Src = Op.getOperand(0);
2507 EVT SrcVT = Src.getValueType();
2508
2509 if (SrcVT == MVT::i16) {
2510 if (DestVT == MVT::f16)
2511 return Op;
2512 SDLoc DL(Op);
2513
2514 // Promote src to i32
2515 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Src);
2516 return DAG.getNode(ISD::UINT_TO_FP, DL, DestVT, Ext);
2517 }
2518
2519 assert(SrcVT == MVT::i64 && "operation should be legal");
2520
2521 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2522 SDLoc DL(Op);
2523
2524 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2525 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op));
2526 SDValue FPRound =
2527 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2528
2529 return FPRound;
2530 }
2531
2532 if (DestVT == MVT::f32)
2533 return LowerINT_TO_FP32(Op, DAG, false);
2534
2535 assert(DestVT == MVT::f64);
2536 return LowerINT_TO_FP64(Op, DAG, false);
2537 }
2538
LowerSINT_TO_FP(SDValue Op,SelectionDAG & DAG) const2539 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op,
2540 SelectionDAG &DAG) const {
2541 EVT DestVT = Op.getValueType();
2542
2543 SDValue Src = Op.getOperand(0);
2544 EVT SrcVT = Src.getValueType();
2545
2546 if (SrcVT == MVT::i16) {
2547 if (DestVT == MVT::f16)
2548 return Op;
2549
2550 SDLoc DL(Op);
2551 // Promote src to i32
2552 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32, Src);
2553 return DAG.getNode(ISD::SINT_TO_FP, DL, DestVT, Ext);
2554 }
2555
2556 assert(SrcVT == MVT::i64 && "operation should be legal");
2557
2558 // TODO: Factor out code common with LowerUINT_TO_FP.
2559
2560 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2561 SDLoc DL(Op);
2562 SDValue Src = Op.getOperand(0);
2563
2564 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2565 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op));
2566 SDValue FPRound =
2567 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2568
2569 return FPRound;
2570 }
2571
2572 if (DestVT == MVT::f32)
2573 return LowerINT_TO_FP32(Op, DAG, true);
2574
2575 assert(DestVT == MVT::f64);
2576 return LowerINT_TO_FP64(Op, DAG, true);
2577 }
2578
LowerFP64_TO_INT(SDValue Op,SelectionDAG & DAG,bool Signed) const2579 SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG,
2580 bool Signed) const {
2581 SDLoc SL(Op);
2582
2583 SDValue Src = Op.getOperand(0);
2584
2585 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2586
2587 SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL,
2588 MVT::f64);
2589 SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL,
2590 MVT::f64);
2591 // TODO: Should this propagate fast-math-flags?
2592 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0);
2593
2594 SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul);
2595
2596
2597 SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc);
2598
2599 SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL,
2600 MVT::i32, FloorMul);
2601 SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
2602
2603 SDValue Result = DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi});
2604
2605 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result);
2606 }
2607
LowerFP_TO_FP16(SDValue Op,SelectionDAG & DAG) const2608 SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const {
2609 SDLoc DL(Op);
2610 SDValue N0 = Op.getOperand(0);
2611
2612 // Convert to target node to get known bits
2613 if (N0.getValueType() == MVT::f32)
2614 return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0);
2615
2616 if (getTargetMachine().Options.UnsafeFPMath) {
2617 // There is a generic expand for FP_TO_FP16 with unsafe fast math.
2618 return SDValue();
2619 }
2620
2621 assert(N0.getSimpleValueType() == MVT::f64);
2622
2623 // f64 -> f16 conversion using round-to-nearest-even rounding mode.
2624 const unsigned ExpMask = 0x7ff;
2625 const unsigned ExpBiasf64 = 1023;
2626 const unsigned ExpBiasf16 = 15;
2627 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
2628 SDValue One = DAG.getConstant(1, DL, MVT::i32);
2629 SDValue U = DAG.getNode(ISD::BITCAST, DL, MVT::i64, N0);
2630 SDValue UH = DAG.getNode(ISD::SRL, DL, MVT::i64, U,
2631 DAG.getConstant(32, DL, MVT::i64));
2632 UH = DAG.getZExtOrTrunc(UH, DL, MVT::i32);
2633 U = DAG.getZExtOrTrunc(U, DL, MVT::i32);
2634 SDValue E = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2635 DAG.getConstant(20, DL, MVT::i64));
2636 E = DAG.getNode(ISD::AND, DL, MVT::i32, E,
2637 DAG.getConstant(ExpMask, DL, MVT::i32));
2638 // Subtract the fp64 exponent bias (1023) to get the real exponent and
2639 // add the f16 bias (15) to get the biased exponent for the f16 format.
2640 E = DAG.getNode(ISD::ADD, DL, MVT::i32, E,
2641 DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32));
2642
2643 SDValue M = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2644 DAG.getConstant(8, DL, MVT::i32));
2645 M = DAG.getNode(ISD::AND, DL, MVT::i32, M,
2646 DAG.getConstant(0xffe, DL, MVT::i32));
2647
2648 SDValue MaskedSig = DAG.getNode(ISD::AND, DL, MVT::i32, UH,
2649 DAG.getConstant(0x1ff, DL, MVT::i32));
2650 MaskedSig = DAG.getNode(ISD::OR, DL, MVT::i32, MaskedSig, U);
2651
2652 SDValue Lo40Set = DAG.getSelectCC(DL, MaskedSig, Zero, Zero, One, ISD::SETEQ);
2653 M = DAG.getNode(ISD::OR, DL, MVT::i32, M, Lo40Set);
2654
2655 // (M != 0 ? 0x0200 : 0) | 0x7c00;
2656 SDValue I = DAG.getNode(ISD::OR, DL, MVT::i32,
2657 DAG.getSelectCC(DL, M, Zero, DAG.getConstant(0x0200, DL, MVT::i32),
2658 Zero, ISD::SETNE), DAG.getConstant(0x7c00, DL, MVT::i32));
2659
2660 // N = M | (E << 12);
2661 SDValue N = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2662 DAG.getNode(ISD::SHL, DL, MVT::i32, E,
2663 DAG.getConstant(12, DL, MVT::i32)));
2664
2665 // B = clamp(1-E, 0, 13);
2666 SDValue OneSubExp = DAG.getNode(ISD::SUB, DL, MVT::i32,
2667 One, E);
2668 SDValue B = DAG.getNode(ISD::SMAX, DL, MVT::i32, OneSubExp, Zero);
2669 B = DAG.getNode(ISD::SMIN, DL, MVT::i32, B,
2670 DAG.getConstant(13, DL, MVT::i32));
2671
2672 SDValue SigSetHigh = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2673 DAG.getConstant(0x1000, DL, MVT::i32));
2674
2675 SDValue D = DAG.getNode(ISD::SRL, DL, MVT::i32, SigSetHigh, B);
2676 SDValue D0 = DAG.getNode(ISD::SHL, DL, MVT::i32, D, B);
2677 SDValue D1 = DAG.getSelectCC(DL, D0, SigSetHigh, One, Zero, ISD::SETNE);
2678 D = DAG.getNode(ISD::OR, DL, MVT::i32, D, D1);
2679
2680 SDValue V = DAG.getSelectCC(DL, E, One, D, N, ISD::SETLT);
2681 SDValue VLow3 = DAG.getNode(ISD::AND, DL, MVT::i32, V,
2682 DAG.getConstant(0x7, DL, MVT::i32));
2683 V = DAG.getNode(ISD::SRL, DL, MVT::i32, V,
2684 DAG.getConstant(2, DL, MVT::i32));
2685 SDValue V0 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(3, DL, MVT::i32),
2686 One, Zero, ISD::SETEQ);
2687 SDValue V1 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(5, DL, MVT::i32),
2688 One, Zero, ISD::SETGT);
2689 V1 = DAG.getNode(ISD::OR, DL, MVT::i32, V0, V1);
2690 V = DAG.getNode(ISD::ADD, DL, MVT::i32, V, V1);
2691
2692 V = DAG.getSelectCC(DL, E, DAG.getConstant(30, DL, MVT::i32),
2693 DAG.getConstant(0x7c00, DL, MVT::i32), V, ISD::SETGT);
2694 V = DAG.getSelectCC(DL, E, DAG.getConstant(1039, DL, MVT::i32),
2695 I, V, ISD::SETEQ);
2696
2697 // Extract the sign bit.
2698 SDValue Sign = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2699 DAG.getConstant(16, DL, MVT::i32));
2700 Sign = DAG.getNode(ISD::AND, DL, MVT::i32, Sign,
2701 DAG.getConstant(0x8000, DL, MVT::i32));
2702
2703 V = DAG.getNode(ISD::OR, DL, MVT::i32, Sign, V);
2704 return DAG.getZExtOrTrunc(V, DL, Op.getValueType());
2705 }
2706
LowerFP_TO_SINT(SDValue Op,SelectionDAG & DAG) const2707 SDValue AMDGPUTargetLowering::LowerFP_TO_SINT(SDValue Op,
2708 SelectionDAG &DAG) const {
2709 SDValue Src = Op.getOperand(0);
2710
2711 // TODO: Factor out code common with LowerFP_TO_UINT.
2712
2713 EVT SrcVT = Src.getValueType();
2714 if (SrcVT == MVT::f16 ||
2715 (SrcVT == MVT::f32 && Src.getOpcode() == ISD::FP16_TO_FP)) {
2716 SDLoc DL(Op);
2717
2718 SDValue FpToInt32 = DAG.getNode(Op.getOpcode(), DL, MVT::i32, Src);
2719 return DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, FpToInt32);
2720 }
2721
2722 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2723 return LowerFP64_TO_INT(Op, DAG, true);
2724
2725 return SDValue();
2726 }
2727
LowerFP_TO_UINT(SDValue Op,SelectionDAG & DAG) const2728 SDValue AMDGPUTargetLowering::LowerFP_TO_UINT(SDValue Op,
2729 SelectionDAG &DAG) const {
2730 SDValue Src = Op.getOperand(0);
2731
2732 // TODO: Factor out code common with LowerFP_TO_SINT.
2733
2734 EVT SrcVT = Src.getValueType();
2735 if (SrcVT == MVT::f16 ||
2736 (SrcVT == MVT::f32 && Src.getOpcode() == ISD::FP16_TO_FP)) {
2737 SDLoc DL(Op);
2738
2739 SDValue FpToUInt32 = DAG.getNode(Op.getOpcode(), DL, MVT::i32, Src);
2740 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, FpToUInt32);
2741 }
2742
2743 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2744 return LowerFP64_TO_INT(Op, DAG, false);
2745
2746 return SDValue();
2747 }
2748
LowerSIGN_EXTEND_INREG(SDValue Op,SelectionDAG & DAG) const2749 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
2750 SelectionDAG &DAG) const {
2751 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2752 MVT VT = Op.getSimpleValueType();
2753 MVT ScalarVT = VT.getScalarType();
2754
2755 assert(VT.isVector());
2756
2757 SDValue Src = Op.getOperand(0);
2758 SDLoc DL(Op);
2759
2760 // TODO: Don't scalarize on Evergreen?
2761 unsigned NElts = VT.getVectorNumElements();
2762 SmallVector<SDValue, 8> Args;
2763 DAG.ExtractVectorElements(Src, Args, 0, NElts);
2764
2765 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
2766 for (unsigned I = 0; I < NElts; ++I)
2767 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
2768
2769 return DAG.getBuildVector(VT, DL, Args);
2770 }
2771
2772 //===----------------------------------------------------------------------===//
2773 // Custom DAG optimizations
2774 //===----------------------------------------------------------------------===//
2775
isU24(SDValue Op,SelectionDAG & DAG)2776 static bool isU24(SDValue Op, SelectionDAG &DAG) {
2777 return AMDGPUTargetLowering::numBitsUnsigned(Op, DAG) <= 24;
2778 }
2779
isI24(SDValue Op,SelectionDAG & DAG)2780 static bool isI24(SDValue Op, SelectionDAG &DAG) {
2781 EVT VT = Op.getValueType();
2782 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
2783 // as unsigned 24-bit values.
2784 AMDGPUTargetLowering::numBitsSigned(Op, DAG) < 24;
2785 }
2786
simplifyI24(SDNode * Node24,TargetLowering::DAGCombinerInfo & DCI)2787 static SDValue simplifyI24(SDNode *Node24,
2788 TargetLowering::DAGCombinerInfo &DCI) {
2789 SelectionDAG &DAG = DCI.DAG;
2790 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2791 bool IsIntrin = Node24->getOpcode() == ISD::INTRINSIC_WO_CHAIN;
2792
2793 SDValue LHS = IsIntrin ? Node24->getOperand(1) : Node24->getOperand(0);
2794 SDValue RHS = IsIntrin ? Node24->getOperand(2) : Node24->getOperand(1);
2795 unsigned NewOpcode = Node24->getOpcode();
2796 if (IsIntrin) {
2797 unsigned IID = cast<ConstantSDNode>(Node24->getOperand(0))->getZExtValue();
2798 NewOpcode = IID == Intrinsic::amdgcn_mul_i24 ?
2799 AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
2800 }
2801
2802 APInt Demanded = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 24);
2803
2804 // First try to simplify using SimplifyMultipleUseDemandedBits which allows
2805 // the operands to have other uses, but will only perform simplifications that
2806 // involve bypassing some nodes for this user.
2807 SDValue DemandedLHS = TLI.SimplifyMultipleUseDemandedBits(LHS, Demanded, DAG);
2808 SDValue DemandedRHS = TLI.SimplifyMultipleUseDemandedBits(RHS, Demanded, DAG);
2809 if (DemandedLHS || DemandedRHS)
2810 return DAG.getNode(NewOpcode, SDLoc(Node24), Node24->getVTList(),
2811 DemandedLHS ? DemandedLHS : LHS,
2812 DemandedRHS ? DemandedRHS : RHS);
2813
2814 // Now try SimplifyDemandedBits which can simplify the nodes used by our
2815 // operands if this node is the only user.
2816 if (TLI.SimplifyDemandedBits(LHS, Demanded, DCI))
2817 return SDValue(Node24, 0);
2818 if (TLI.SimplifyDemandedBits(RHS, Demanded, DCI))
2819 return SDValue(Node24, 0);
2820
2821 return SDValue();
2822 }
2823
2824 template <typename IntTy>
constantFoldBFE(SelectionDAG & DAG,IntTy Src0,uint32_t Offset,uint32_t Width,const SDLoc & DL)2825 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset,
2826 uint32_t Width, const SDLoc &DL) {
2827 if (Width + Offset < 32) {
2828 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
2829 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
2830 return DAG.getConstant(Result, DL, MVT::i32);
2831 }
2832
2833 return DAG.getConstant(Src0 >> Offset, DL, MVT::i32);
2834 }
2835
hasVolatileUser(SDNode * Val)2836 static bool hasVolatileUser(SDNode *Val) {
2837 for (SDNode *U : Val->uses()) {
2838 if (MemSDNode *M = dyn_cast<MemSDNode>(U)) {
2839 if (M->isVolatile())
2840 return true;
2841 }
2842 }
2843
2844 return false;
2845 }
2846
shouldCombineMemoryType(EVT VT) const2847 bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const {
2848 // i32 vectors are the canonical memory type.
2849 if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT))
2850 return false;
2851
2852 if (!VT.isByteSized())
2853 return false;
2854
2855 unsigned Size = VT.getStoreSize();
2856
2857 if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector())
2858 return false;
2859
2860 if (Size == 3 || (Size > 4 && (Size % 4 != 0)))
2861 return false;
2862
2863 return true;
2864 }
2865
2866 // Replace load of an illegal type with a store of a bitcast to a friendlier
2867 // type.
performLoadCombine(SDNode * N,DAGCombinerInfo & DCI) const2868 SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N,
2869 DAGCombinerInfo &DCI) const {
2870 if (!DCI.isBeforeLegalize())
2871 return SDValue();
2872
2873 LoadSDNode *LN = cast<LoadSDNode>(N);
2874 if (!LN->isSimple() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN))
2875 return SDValue();
2876
2877 SDLoc SL(N);
2878 SelectionDAG &DAG = DCI.DAG;
2879 EVT VT = LN->getMemoryVT();
2880
2881 unsigned Size = VT.getStoreSize();
2882 Align Alignment = LN->getAlign();
2883 if (Alignment < Size && isTypeLegal(VT)) {
2884 bool IsFast;
2885 unsigned AS = LN->getAddressSpace();
2886
2887 // Expand unaligned loads earlier than legalization. Due to visitation order
2888 // problems during legalization, the emitted instructions to pack and unpack
2889 // the bytes again are not eliminated in the case of an unaligned copy.
2890 if (!allowsMisalignedMemoryAccesses(VT, AS, Alignment.value(),
2891 LN->getMemOperand()->getFlags(),
2892 &IsFast)) {
2893 SDValue Ops[2];
2894
2895 if (VT.isVector())
2896 std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(LN, DAG);
2897 else
2898 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
2899
2900 return DAG.getMergeValues(Ops, SDLoc(N));
2901 }
2902
2903 if (!IsFast)
2904 return SDValue();
2905 }
2906
2907 if (!shouldCombineMemoryType(VT))
2908 return SDValue();
2909
2910 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2911
2912 SDValue NewLoad
2913 = DAG.getLoad(NewVT, SL, LN->getChain(),
2914 LN->getBasePtr(), LN->getMemOperand());
2915
2916 SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad);
2917 DCI.CombineTo(N, BC, NewLoad.getValue(1));
2918 return SDValue(N, 0);
2919 }
2920
2921 // Replace store of an illegal type with a store of a bitcast to a friendlier
2922 // type.
performStoreCombine(SDNode * N,DAGCombinerInfo & DCI) const2923 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
2924 DAGCombinerInfo &DCI) const {
2925 if (!DCI.isBeforeLegalize())
2926 return SDValue();
2927
2928 StoreSDNode *SN = cast<StoreSDNode>(N);
2929 if (!SN->isSimple() || !ISD::isNormalStore(SN))
2930 return SDValue();
2931
2932 EVT VT = SN->getMemoryVT();
2933 unsigned Size = VT.getStoreSize();
2934
2935 SDLoc SL(N);
2936 SelectionDAG &DAG = DCI.DAG;
2937 Align Alignment = SN->getAlign();
2938 if (Alignment < Size && isTypeLegal(VT)) {
2939 bool IsFast;
2940 unsigned AS = SN->getAddressSpace();
2941
2942 // Expand unaligned stores earlier than legalization. Due to visitation
2943 // order problems during legalization, the emitted instructions to pack and
2944 // unpack the bytes again are not eliminated in the case of an unaligned
2945 // copy.
2946 if (!allowsMisalignedMemoryAccesses(VT, AS, Alignment.value(),
2947 SN->getMemOperand()->getFlags(),
2948 &IsFast)) {
2949 if (VT.isVector())
2950 return scalarizeVectorStore(SN, DAG);
2951
2952 return expandUnalignedStore(SN, DAG);
2953 }
2954
2955 if (!IsFast)
2956 return SDValue();
2957 }
2958
2959 if (!shouldCombineMemoryType(VT))
2960 return SDValue();
2961
2962 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2963 SDValue Val = SN->getValue();
2964
2965 //DCI.AddToWorklist(Val.getNode());
2966
2967 bool OtherUses = !Val.hasOneUse();
2968 SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NewVT, Val);
2969 if (OtherUses) {
2970 SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal);
2971 DAG.ReplaceAllUsesOfValueWith(Val, CastBack);
2972 }
2973
2974 return DAG.getStore(SN->getChain(), SL, CastVal,
2975 SN->getBasePtr(), SN->getMemOperand());
2976 }
2977
2978 // FIXME: This should go in generic DAG combiner with an isTruncateFree check,
2979 // but isTruncateFree is inaccurate for i16 now because of SALU vs. VALU
2980 // issues.
performAssertSZExtCombine(SDNode * N,DAGCombinerInfo & DCI) const2981 SDValue AMDGPUTargetLowering::performAssertSZExtCombine(SDNode *N,
2982 DAGCombinerInfo &DCI) const {
2983 SelectionDAG &DAG = DCI.DAG;
2984 SDValue N0 = N->getOperand(0);
2985
2986 // (vt2 (assertzext (truncate vt0:x), vt1)) ->
2987 // (vt2 (truncate (assertzext vt0:x, vt1)))
2988 if (N0.getOpcode() == ISD::TRUNCATE) {
2989 SDValue N1 = N->getOperand(1);
2990 EVT ExtVT = cast<VTSDNode>(N1)->getVT();
2991 SDLoc SL(N);
2992
2993 SDValue Src = N0.getOperand(0);
2994 EVT SrcVT = Src.getValueType();
2995 if (SrcVT.bitsGE(ExtVT)) {
2996 SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1);
2997 return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg);
2998 }
2999 }
3000
3001 return SDValue();
3002 }
3003
performIntrinsicWOChainCombine(SDNode * N,DAGCombinerInfo & DCI) const3004 SDValue AMDGPUTargetLowering::performIntrinsicWOChainCombine(
3005 SDNode *N, DAGCombinerInfo &DCI) const {
3006 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3007 switch (IID) {
3008 case Intrinsic::amdgcn_mul_i24:
3009 case Intrinsic::amdgcn_mul_u24:
3010 return simplifyI24(N, DCI);
3011 case Intrinsic::amdgcn_fract:
3012 case Intrinsic::amdgcn_rsq:
3013 case Intrinsic::amdgcn_rcp_legacy:
3014 case Intrinsic::amdgcn_rsq_legacy:
3015 case Intrinsic::amdgcn_rsq_clamp:
3016 case Intrinsic::amdgcn_ldexp: {
3017 // FIXME: This is probably wrong. If src is an sNaN, it won't be quieted
3018 SDValue Src = N->getOperand(1);
3019 return Src.isUndef() ? Src : SDValue();
3020 }
3021 default:
3022 return SDValue();
3023 }
3024 }
3025
3026 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the
3027 /// binary operation \p Opc to it with the corresponding constant operands.
splitBinaryBitConstantOpImpl(DAGCombinerInfo & DCI,const SDLoc & SL,unsigned Opc,SDValue LHS,uint32_t ValLo,uint32_t ValHi) const3028 SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl(
3029 DAGCombinerInfo &DCI, const SDLoc &SL,
3030 unsigned Opc, SDValue LHS,
3031 uint32_t ValLo, uint32_t ValHi) const {
3032 SelectionDAG &DAG = DCI.DAG;
3033 SDValue Lo, Hi;
3034 std::tie(Lo, Hi) = split64BitValue(LHS, DAG);
3035
3036 SDValue LoRHS = DAG.getConstant(ValLo, SL, MVT::i32);
3037 SDValue HiRHS = DAG.getConstant(ValHi, SL, MVT::i32);
3038
3039 SDValue LoAnd = DAG.getNode(Opc, SL, MVT::i32, Lo, LoRHS);
3040 SDValue HiAnd = DAG.getNode(Opc, SL, MVT::i32, Hi, HiRHS);
3041
3042 // Re-visit the ands. It's possible we eliminated one of them and it could
3043 // simplify the vector.
3044 DCI.AddToWorklist(Lo.getNode());
3045 DCI.AddToWorklist(Hi.getNode());
3046
3047 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd});
3048 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3049 }
3050
performShlCombine(SDNode * N,DAGCombinerInfo & DCI) const3051 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
3052 DAGCombinerInfo &DCI) const {
3053 EVT VT = N->getValueType(0);
3054
3055 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3056 if (!RHS)
3057 return SDValue();
3058
3059 SDValue LHS = N->getOperand(0);
3060 unsigned RHSVal = RHS->getZExtValue();
3061 if (!RHSVal)
3062 return LHS;
3063
3064 SDLoc SL(N);
3065 SelectionDAG &DAG = DCI.DAG;
3066
3067 switch (LHS->getOpcode()) {
3068 default:
3069 break;
3070 case ISD::ZERO_EXTEND:
3071 case ISD::SIGN_EXTEND:
3072 case ISD::ANY_EXTEND: {
3073 SDValue X = LHS->getOperand(0);
3074
3075 if (VT == MVT::i32 && RHSVal == 16 && X.getValueType() == MVT::i16 &&
3076 isOperationLegal(ISD::BUILD_VECTOR, MVT::v2i16)) {
3077 // Prefer build_vector as the canonical form if packed types are legal.
3078 // (shl ([asz]ext i16:x), 16 -> build_vector 0, x
3079 SDValue Vec = DAG.getBuildVector(MVT::v2i16, SL,
3080 { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) });
3081 return DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
3082 }
3083
3084 // shl (ext x) => zext (shl x), if shift does not overflow int
3085 if (VT != MVT::i64)
3086 break;
3087 KnownBits Known = DAG.computeKnownBits(X);
3088 unsigned LZ = Known.countMinLeadingZeros();
3089 if (LZ < RHSVal)
3090 break;
3091 EVT XVT = X.getValueType();
3092 SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0));
3093 return DAG.getZExtOrTrunc(Shl, SL, VT);
3094 }
3095 }
3096
3097 if (VT != MVT::i64)
3098 return SDValue();
3099
3100 // i64 (shl x, C) -> (build_pair 0, (shl x, C -32))
3101
3102 // On some subtargets, 64-bit shift is a quarter rate instruction. In the
3103 // common case, splitting this into a move and a 32-bit shift is faster and
3104 // the same code size.
3105 if (RHSVal < 32)
3106 return SDValue();
3107
3108 SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32);
3109
3110 SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
3111 SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
3112
3113 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
3114
3115 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
3116 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3117 }
3118
performSraCombine(SDNode * N,DAGCombinerInfo & DCI) const3119 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
3120 DAGCombinerInfo &DCI) const {
3121 if (N->getValueType(0) != MVT::i64)
3122 return SDValue();
3123
3124 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3125 if (!RHS)
3126 return SDValue();
3127
3128 SelectionDAG &DAG = DCI.DAG;
3129 SDLoc SL(N);
3130 unsigned RHSVal = RHS->getZExtValue();
3131
3132 // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31)
3133 if (RHSVal == 32) {
3134 SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
3135 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
3136 DAG.getConstant(31, SL, MVT::i32));
3137
3138 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift});
3139 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
3140 }
3141
3142 // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31)
3143 if (RHSVal == 63) {
3144 SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
3145 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
3146 DAG.getConstant(31, SL, MVT::i32));
3147 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift});
3148 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
3149 }
3150
3151 return SDValue();
3152 }
3153
performSrlCombine(SDNode * N,DAGCombinerInfo & DCI) const3154 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
3155 DAGCombinerInfo &DCI) const {
3156 auto *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3157 if (!RHS)
3158 return SDValue();
3159
3160 EVT VT = N->getValueType(0);
3161 SDValue LHS = N->getOperand(0);
3162 unsigned ShiftAmt = RHS->getZExtValue();
3163 SelectionDAG &DAG = DCI.DAG;
3164 SDLoc SL(N);
3165
3166 // fold (srl (and x, c1 << c2), c2) -> (and (srl(x, c2), c1)
3167 // this improves the ability to match BFE patterns in isel.
3168 if (LHS.getOpcode() == ISD::AND) {
3169 if (auto *Mask = dyn_cast<ConstantSDNode>(LHS.getOperand(1))) {
3170 if (Mask->getAPIntValue().isShiftedMask() &&
3171 Mask->getAPIntValue().countTrailingZeros() == ShiftAmt) {
3172 return DAG.getNode(
3173 ISD::AND, SL, VT,
3174 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(0), N->getOperand(1)),
3175 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(1), N->getOperand(1)));
3176 }
3177 }
3178 }
3179
3180 if (VT != MVT::i64)
3181 return SDValue();
3182
3183 if (ShiftAmt < 32)
3184 return SDValue();
3185
3186 // srl i64:x, C for C >= 32
3187 // =>
3188 // build_pair (srl hi_32(x), C - 32), 0
3189 SDValue One = DAG.getConstant(1, SL, MVT::i32);
3190 SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
3191
3192 SDValue VecOp = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, LHS);
3193 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecOp, One);
3194
3195 SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32);
3196 SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst);
3197
3198 SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero});
3199
3200 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair);
3201 }
3202
performTruncateCombine(SDNode * N,DAGCombinerInfo & DCI) const3203 SDValue AMDGPUTargetLowering::performTruncateCombine(
3204 SDNode *N, DAGCombinerInfo &DCI) const {
3205 SDLoc SL(N);
3206 SelectionDAG &DAG = DCI.DAG;
3207 EVT VT = N->getValueType(0);
3208 SDValue Src = N->getOperand(0);
3209
3210 // vt1 (truncate (bitcast (build_vector vt0:x, ...))) -> vt1 (bitcast vt0:x)
3211 if (Src.getOpcode() == ISD::BITCAST && !VT.isVector()) {
3212 SDValue Vec = Src.getOperand(0);
3213 if (Vec.getOpcode() == ISD::BUILD_VECTOR) {
3214 SDValue Elt0 = Vec.getOperand(0);
3215 EVT EltVT = Elt0.getValueType();
3216 if (VT.getFixedSizeInBits() <= EltVT.getFixedSizeInBits()) {
3217 if (EltVT.isFloatingPoint()) {
3218 Elt0 = DAG.getNode(ISD::BITCAST, SL,
3219 EltVT.changeTypeToInteger(), Elt0);
3220 }
3221
3222 return DAG.getNode(ISD::TRUNCATE, SL, VT, Elt0);
3223 }
3224 }
3225 }
3226
3227 // Equivalent of above for accessing the high element of a vector as an
3228 // integer operation.
3229 // trunc (srl (bitcast (build_vector x, y))), 16 -> trunc (bitcast y)
3230 if (Src.getOpcode() == ISD::SRL && !VT.isVector()) {
3231 if (auto K = isConstOrConstSplat(Src.getOperand(1))) {
3232 if (2 * K->getZExtValue() == Src.getValueType().getScalarSizeInBits()) {
3233 SDValue BV = stripBitcast(Src.getOperand(0));
3234 if (BV.getOpcode() == ISD::BUILD_VECTOR &&
3235 BV.getValueType().getVectorNumElements() == 2) {
3236 SDValue SrcElt = BV.getOperand(1);
3237 EVT SrcEltVT = SrcElt.getValueType();
3238 if (SrcEltVT.isFloatingPoint()) {
3239 SrcElt = DAG.getNode(ISD::BITCAST, SL,
3240 SrcEltVT.changeTypeToInteger(), SrcElt);
3241 }
3242
3243 return DAG.getNode(ISD::TRUNCATE, SL, VT, SrcElt);
3244 }
3245 }
3246 }
3247 }
3248
3249 // Partially shrink 64-bit shifts to 32-bit if reduced to 16-bit.
3250 //
3251 // i16 (trunc (srl i64:x, K)), K <= 16 ->
3252 // i16 (trunc (srl (i32 (trunc x), K)))
3253 if (VT.getScalarSizeInBits() < 32) {
3254 EVT SrcVT = Src.getValueType();
3255 if (SrcVT.getScalarSizeInBits() > 32 &&
3256 (Src.getOpcode() == ISD::SRL ||
3257 Src.getOpcode() == ISD::SRA ||
3258 Src.getOpcode() == ISD::SHL)) {
3259 SDValue Amt = Src.getOperand(1);
3260 KnownBits Known = DAG.computeKnownBits(Amt);
3261 unsigned Size = VT.getScalarSizeInBits();
3262 if ((Known.isConstant() && Known.getConstant().ule(Size)) ||
3263 (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size))) {
3264 EVT MidVT = VT.isVector() ?
3265 EVT::getVectorVT(*DAG.getContext(), MVT::i32,
3266 VT.getVectorNumElements()) : MVT::i32;
3267
3268 EVT NewShiftVT = getShiftAmountTy(MidVT, DAG.getDataLayout());
3269 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MidVT,
3270 Src.getOperand(0));
3271 DCI.AddToWorklist(Trunc.getNode());
3272
3273 if (Amt.getValueType() != NewShiftVT) {
3274 Amt = DAG.getZExtOrTrunc(Amt, SL, NewShiftVT);
3275 DCI.AddToWorklist(Amt.getNode());
3276 }
3277
3278 SDValue ShrunkShift = DAG.getNode(Src.getOpcode(), SL, MidVT,
3279 Trunc, Amt);
3280 return DAG.getNode(ISD::TRUNCATE, SL, VT, ShrunkShift);
3281 }
3282 }
3283 }
3284
3285 return SDValue();
3286 }
3287
3288 // We need to specifically handle i64 mul here to avoid unnecessary conversion
3289 // instructions. If we only match on the legalized i64 mul expansion,
3290 // SimplifyDemandedBits will be unable to remove them because there will be
3291 // multiple uses due to the separate mul + mulh[su].
getMul24(SelectionDAG & DAG,const SDLoc & SL,SDValue N0,SDValue N1,unsigned Size,bool Signed)3292 static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL,
3293 SDValue N0, SDValue N1, unsigned Size, bool Signed) {
3294 if (Size <= 32) {
3295 unsigned MulOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
3296 return DAG.getNode(MulOpc, SL, MVT::i32, N0, N1);
3297 }
3298
3299 unsigned MulLoOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
3300 unsigned MulHiOpc = Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24;
3301
3302 SDValue MulLo = DAG.getNode(MulLoOpc, SL, MVT::i32, N0, N1);
3303 SDValue MulHi = DAG.getNode(MulHiOpc, SL, MVT::i32, N0, N1);
3304
3305 return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64, MulLo, MulHi);
3306 }
3307
performMulCombine(SDNode * N,DAGCombinerInfo & DCI) const3308 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
3309 DAGCombinerInfo &DCI) const {
3310 EVT VT = N->getValueType(0);
3311
3312 unsigned Size = VT.getSizeInBits();
3313 if (VT.isVector() || Size > 64)
3314 return SDValue();
3315
3316 // There are i16 integer mul/mad.
3317 if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16))
3318 return SDValue();
3319
3320 SelectionDAG &DAG = DCI.DAG;
3321 SDLoc DL(N);
3322
3323 SDValue N0 = N->getOperand(0);
3324 SDValue N1 = N->getOperand(1);
3325
3326 // SimplifyDemandedBits has the annoying habit of turning useful zero_extends
3327 // in the source into any_extends if the result of the mul is truncated. Since
3328 // we can assume the high bits are whatever we want, use the underlying value
3329 // to avoid the unknown high bits from interfering.
3330 if (N0.getOpcode() == ISD::ANY_EXTEND)
3331 N0 = N0.getOperand(0);
3332
3333 if (N1.getOpcode() == ISD::ANY_EXTEND)
3334 N1 = N1.getOperand(0);
3335
3336 SDValue Mul;
3337
3338 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
3339 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3340 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3341 Mul = getMul24(DAG, DL, N0, N1, Size, false);
3342 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
3343 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3344 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3345 Mul = getMul24(DAG, DL, N0, N1, Size, true);
3346 } else {
3347 return SDValue();
3348 }
3349
3350 // We need to use sext even for MUL_U24, because MUL_U24 is used
3351 // for signed multiply of 8 and 16-bit types.
3352 return DAG.getSExtOrTrunc(Mul, DL, VT);
3353 }
3354
performMulhsCombine(SDNode * N,DAGCombinerInfo & DCI) const3355 SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N,
3356 DAGCombinerInfo &DCI) const {
3357 EVT VT = N->getValueType(0);
3358
3359 if (!Subtarget->hasMulI24() || VT.isVector())
3360 return SDValue();
3361
3362 SelectionDAG &DAG = DCI.DAG;
3363 SDLoc DL(N);
3364
3365 SDValue N0 = N->getOperand(0);
3366 SDValue N1 = N->getOperand(1);
3367
3368 if (!isI24(N0, DAG) || !isI24(N1, DAG))
3369 return SDValue();
3370
3371 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3372 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3373
3374 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_I24, DL, MVT::i32, N0, N1);
3375 DCI.AddToWorklist(Mulhi.getNode());
3376 return DAG.getSExtOrTrunc(Mulhi, DL, VT);
3377 }
3378
performMulhuCombine(SDNode * N,DAGCombinerInfo & DCI) const3379 SDValue AMDGPUTargetLowering::performMulhuCombine(SDNode *N,
3380 DAGCombinerInfo &DCI) const {
3381 EVT VT = N->getValueType(0);
3382
3383 if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32)
3384 return SDValue();
3385
3386 SelectionDAG &DAG = DCI.DAG;
3387 SDLoc DL(N);
3388
3389 SDValue N0 = N->getOperand(0);
3390 SDValue N1 = N->getOperand(1);
3391
3392 if (!isU24(N0, DAG) || !isU24(N1, DAG))
3393 return SDValue();
3394
3395 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3396 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3397
3398 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_U24, DL, MVT::i32, N0, N1);
3399 DCI.AddToWorklist(Mulhi.getNode());
3400 return DAG.getZExtOrTrunc(Mulhi, DL, VT);
3401 }
3402
isNegativeOne(SDValue Val)3403 static bool isNegativeOne(SDValue Val) {
3404 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val))
3405 return C->isAllOnesValue();
3406 return false;
3407 }
3408
getFFBX_U32(SelectionDAG & DAG,SDValue Op,const SDLoc & DL,unsigned Opc) const3409 SDValue AMDGPUTargetLowering::getFFBX_U32(SelectionDAG &DAG,
3410 SDValue Op,
3411 const SDLoc &DL,
3412 unsigned Opc) const {
3413 EVT VT = Op.getValueType();
3414 EVT LegalVT = getTypeToTransformTo(*DAG.getContext(), VT);
3415 if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() &&
3416 LegalVT != MVT::i16))
3417 return SDValue();
3418
3419 if (VT != MVT::i32)
3420 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Op);
3421
3422 SDValue FFBX = DAG.getNode(Opc, DL, MVT::i32, Op);
3423 if (VT != MVT::i32)
3424 FFBX = DAG.getNode(ISD::TRUNCATE, DL, VT, FFBX);
3425
3426 return FFBX;
3427 }
3428
3429 // The native instructions return -1 on 0 input. Optimize out a select that
3430 // produces -1 on 0.
3431 //
3432 // TODO: If zero is not undef, we could also do this if the output is compared
3433 // against the bitwidth.
3434 //
3435 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly.
performCtlz_CttzCombine(const SDLoc & SL,SDValue Cond,SDValue LHS,SDValue RHS,DAGCombinerInfo & DCI) const3436 SDValue AMDGPUTargetLowering::performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond,
3437 SDValue LHS, SDValue RHS,
3438 DAGCombinerInfo &DCI) const {
3439 ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
3440 if (!CmpRhs || !CmpRhs->isNullValue())
3441 return SDValue();
3442
3443 SelectionDAG &DAG = DCI.DAG;
3444 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
3445 SDValue CmpLHS = Cond.getOperand(0);
3446
3447 // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x
3448 // select (setcc x, 0, eq), -1, (cttz_zero_undef x) -> ffbl_u32 x
3449 if (CCOpcode == ISD::SETEQ &&
3450 (isCtlzOpc(RHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) &&
3451 RHS.getOperand(0) == CmpLHS && isNegativeOne(LHS)) {
3452 unsigned Opc =
3453 isCttzOpc(RHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : AMDGPUISD::FFBH_U32;
3454 return getFFBX_U32(DAG, CmpLHS, SL, Opc);
3455 }
3456
3457 // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x
3458 // select (setcc x, 0, ne), (cttz_zero_undef x), -1 -> ffbl_u32 x
3459 if (CCOpcode == ISD::SETNE &&
3460 (isCtlzOpc(LHS.getOpcode()) || isCttzOpc(LHS.getOpcode())) &&
3461 LHS.getOperand(0) == CmpLHS && isNegativeOne(RHS)) {
3462 unsigned Opc =
3463 isCttzOpc(LHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : AMDGPUISD::FFBH_U32;
3464
3465 return getFFBX_U32(DAG, CmpLHS, SL, Opc);
3466 }
3467
3468 return SDValue();
3469 }
3470
distributeOpThroughSelect(TargetLowering::DAGCombinerInfo & DCI,unsigned Op,const SDLoc & SL,SDValue Cond,SDValue N1,SDValue N2)3471 static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI,
3472 unsigned Op,
3473 const SDLoc &SL,
3474 SDValue Cond,
3475 SDValue N1,
3476 SDValue N2) {
3477 SelectionDAG &DAG = DCI.DAG;
3478 EVT VT = N1.getValueType();
3479
3480 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, Cond,
3481 N1.getOperand(0), N2.getOperand(0));
3482 DCI.AddToWorklist(NewSelect.getNode());
3483 return DAG.getNode(Op, SL, VT, NewSelect);
3484 }
3485
3486 // Pull a free FP operation out of a select so it may fold into uses.
3487 //
3488 // select c, (fneg x), (fneg y) -> fneg (select c, x, y)
3489 // select c, (fneg x), k -> fneg (select c, x, (fneg k))
3490 //
3491 // select c, (fabs x), (fabs y) -> fabs (select c, x, y)
3492 // select c, (fabs x), +k -> fabs (select c, x, k)
foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo & DCI,SDValue N)3493 static SDValue foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI,
3494 SDValue N) {
3495 SelectionDAG &DAG = DCI.DAG;
3496 SDValue Cond = N.getOperand(0);
3497 SDValue LHS = N.getOperand(1);
3498 SDValue RHS = N.getOperand(2);
3499
3500 EVT VT = N.getValueType();
3501 if ((LHS.getOpcode() == ISD::FABS && RHS.getOpcode() == ISD::FABS) ||
3502 (LHS.getOpcode() == ISD::FNEG && RHS.getOpcode() == ISD::FNEG)) {
3503 return distributeOpThroughSelect(DCI, LHS.getOpcode(),
3504 SDLoc(N), Cond, LHS, RHS);
3505 }
3506
3507 bool Inv = false;
3508 if (RHS.getOpcode() == ISD::FABS || RHS.getOpcode() == ISD::FNEG) {
3509 std::swap(LHS, RHS);
3510 Inv = true;
3511 }
3512
3513 // TODO: Support vector constants.
3514 ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
3515 if ((LHS.getOpcode() == ISD::FNEG || LHS.getOpcode() == ISD::FABS) && CRHS) {
3516 SDLoc SL(N);
3517 // If one side is an fneg/fabs and the other is a constant, we can push the
3518 // fneg/fabs down. If it's an fabs, the constant needs to be non-negative.
3519 SDValue NewLHS = LHS.getOperand(0);
3520 SDValue NewRHS = RHS;
3521
3522 // Careful: if the neg can be folded up, don't try to pull it back down.
3523 bool ShouldFoldNeg = true;
3524
3525 if (NewLHS.hasOneUse()) {
3526 unsigned Opc = NewLHS.getOpcode();
3527 if (LHS.getOpcode() == ISD::FNEG && fnegFoldsIntoOp(Opc))
3528 ShouldFoldNeg = false;
3529 if (LHS.getOpcode() == ISD::FABS && Opc == ISD::FMUL)
3530 ShouldFoldNeg = false;
3531 }
3532
3533 if (ShouldFoldNeg) {
3534 if (LHS.getOpcode() == ISD::FNEG)
3535 NewRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3536 else if (CRHS->isNegative())
3537 return SDValue();
3538
3539 if (Inv)
3540 std::swap(NewLHS, NewRHS);
3541
3542 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT,
3543 Cond, NewLHS, NewRHS);
3544 DCI.AddToWorklist(NewSelect.getNode());
3545 return DAG.getNode(LHS.getOpcode(), SL, VT, NewSelect);
3546 }
3547 }
3548
3549 return SDValue();
3550 }
3551
3552
performSelectCombine(SDNode * N,DAGCombinerInfo & DCI) const3553 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N,
3554 DAGCombinerInfo &DCI) const {
3555 if (SDValue Folded = foldFreeOpFromSelect(DCI, SDValue(N, 0)))
3556 return Folded;
3557
3558 SDValue Cond = N->getOperand(0);
3559 if (Cond.getOpcode() != ISD::SETCC)
3560 return SDValue();
3561
3562 EVT VT = N->getValueType(0);
3563 SDValue LHS = Cond.getOperand(0);
3564 SDValue RHS = Cond.getOperand(1);
3565 SDValue CC = Cond.getOperand(2);
3566
3567 SDValue True = N->getOperand(1);
3568 SDValue False = N->getOperand(2);
3569
3570 if (Cond.hasOneUse()) { // TODO: Look for multiple select uses.
3571 SelectionDAG &DAG = DCI.DAG;
3572 if (DAG.isConstantValueOfAnyType(True) &&
3573 !DAG.isConstantValueOfAnyType(False)) {
3574 // Swap cmp + select pair to move constant to false input.
3575 // This will allow using VOPC cndmasks more often.
3576 // select (setcc x, y), k, x -> select (setccinv x, y), x, k
3577
3578 SDLoc SL(N);
3579 ISD::CondCode NewCC =
3580 getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), LHS.getValueType());
3581
3582 SDValue NewCond = DAG.getSetCC(SL, Cond.getValueType(), LHS, RHS, NewCC);
3583 return DAG.getNode(ISD::SELECT, SL, VT, NewCond, False, True);
3584 }
3585
3586 if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) {
3587 SDValue MinMax
3588 = combineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI);
3589 // Revisit this node so we can catch min3/max3/med3 patterns.
3590 //DCI.AddToWorklist(MinMax.getNode());
3591 return MinMax;
3592 }
3593 }
3594
3595 // There's no reason to not do this if the condition has other uses.
3596 return performCtlz_CttzCombine(SDLoc(N), Cond, True, False, DCI);
3597 }
3598
isInv2Pi(const APFloat & APF)3599 static bool isInv2Pi(const APFloat &APF) {
3600 static const APFloat KF16(APFloat::IEEEhalf(), APInt(16, 0x3118));
3601 static const APFloat KF32(APFloat::IEEEsingle(), APInt(32, 0x3e22f983));
3602 static const APFloat KF64(APFloat::IEEEdouble(), APInt(64, 0x3fc45f306dc9c882));
3603
3604 return APF.bitwiseIsEqual(KF16) ||
3605 APF.bitwiseIsEqual(KF32) ||
3606 APF.bitwiseIsEqual(KF64);
3607 }
3608
3609 // 0 and 1.0 / (0.5 * pi) do not have inline immmediates, so there is an
3610 // additional cost to negate them.
isConstantCostlierToNegate(SDValue N) const3611 bool AMDGPUTargetLowering::isConstantCostlierToNegate(SDValue N) const {
3612 if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N)) {
3613 if (C->isZero() && !C->isNegative())
3614 return true;
3615
3616 if (Subtarget->hasInv2PiInlineImm() && isInv2Pi(C->getValueAPF()))
3617 return true;
3618 }
3619
3620 return false;
3621 }
3622
inverseMinMax(unsigned Opc)3623 static unsigned inverseMinMax(unsigned Opc) {
3624 switch (Opc) {
3625 case ISD::FMAXNUM:
3626 return ISD::FMINNUM;
3627 case ISD::FMINNUM:
3628 return ISD::FMAXNUM;
3629 case ISD::FMAXNUM_IEEE:
3630 return ISD::FMINNUM_IEEE;
3631 case ISD::FMINNUM_IEEE:
3632 return ISD::FMAXNUM_IEEE;
3633 case AMDGPUISD::FMAX_LEGACY:
3634 return AMDGPUISD::FMIN_LEGACY;
3635 case AMDGPUISD::FMIN_LEGACY:
3636 return AMDGPUISD::FMAX_LEGACY;
3637 default:
3638 llvm_unreachable("invalid min/max opcode");
3639 }
3640 }
3641
performFNegCombine(SDNode * N,DAGCombinerInfo & DCI) const3642 SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N,
3643 DAGCombinerInfo &DCI) const {
3644 SelectionDAG &DAG = DCI.DAG;
3645 SDValue N0 = N->getOperand(0);
3646 EVT VT = N->getValueType(0);
3647
3648 unsigned Opc = N0.getOpcode();
3649
3650 // If the input has multiple uses and we can either fold the negate down, or
3651 // the other uses cannot, give up. This both prevents unprofitable
3652 // transformations and infinite loops: we won't repeatedly try to fold around
3653 // a negate that has no 'good' form.
3654 if (N0.hasOneUse()) {
3655 // This may be able to fold into the source, but at a code size cost. Don't
3656 // fold if the fold into the user is free.
3657 if (allUsesHaveSourceMods(N, 0))
3658 return SDValue();
3659 } else {
3660 if (fnegFoldsIntoOp(Opc) &&
3661 (allUsesHaveSourceMods(N) || !allUsesHaveSourceMods(N0.getNode())))
3662 return SDValue();
3663 }
3664
3665 SDLoc SL(N);
3666 switch (Opc) {
3667 case ISD::FADD: {
3668 if (!mayIgnoreSignedZero(N0))
3669 return SDValue();
3670
3671 // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y))
3672 SDValue LHS = N0.getOperand(0);
3673 SDValue RHS = N0.getOperand(1);
3674
3675 if (LHS.getOpcode() != ISD::FNEG)
3676 LHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
3677 else
3678 LHS = LHS.getOperand(0);
3679
3680 if (RHS.getOpcode() != ISD::FNEG)
3681 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3682 else
3683 RHS = RHS.getOperand(0);
3684
3685 SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags());
3686 if (Res.getOpcode() != ISD::FADD)
3687 return SDValue(); // Op got folded away.
3688 if (!N0.hasOneUse())
3689 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3690 return Res;
3691 }
3692 case ISD::FMUL:
3693 case AMDGPUISD::FMUL_LEGACY: {
3694 // (fneg (fmul x, y)) -> (fmul x, (fneg y))
3695 // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y))
3696 SDValue LHS = N0.getOperand(0);
3697 SDValue RHS = N0.getOperand(1);
3698
3699 if (LHS.getOpcode() == ISD::FNEG)
3700 LHS = LHS.getOperand(0);
3701 else if (RHS.getOpcode() == ISD::FNEG)
3702 RHS = RHS.getOperand(0);
3703 else
3704 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3705
3706 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags());
3707 if (Res.getOpcode() != Opc)
3708 return SDValue(); // Op got folded away.
3709 if (!N0.hasOneUse())
3710 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3711 return Res;
3712 }
3713 case ISD::FMA:
3714 case ISD::FMAD: {
3715 // TODO: handle llvm.amdgcn.fma.legacy
3716 if (!mayIgnoreSignedZero(N0))
3717 return SDValue();
3718
3719 // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z))
3720 SDValue LHS = N0.getOperand(0);
3721 SDValue MHS = N0.getOperand(1);
3722 SDValue RHS = N0.getOperand(2);
3723
3724 if (LHS.getOpcode() == ISD::FNEG)
3725 LHS = LHS.getOperand(0);
3726 else if (MHS.getOpcode() == ISD::FNEG)
3727 MHS = MHS.getOperand(0);
3728 else
3729 MHS = DAG.getNode(ISD::FNEG, SL, VT, MHS);
3730
3731 if (RHS.getOpcode() != ISD::FNEG)
3732 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3733 else
3734 RHS = RHS.getOperand(0);
3735
3736 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, MHS, RHS);
3737 if (Res.getOpcode() != Opc)
3738 return SDValue(); // Op got folded away.
3739 if (!N0.hasOneUse())
3740 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3741 return Res;
3742 }
3743 case ISD::FMAXNUM:
3744 case ISD::FMINNUM:
3745 case ISD::FMAXNUM_IEEE:
3746 case ISD::FMINNUM_IEEE:
3747 case AMDGPUISD::FMAX_LEGACY:
3748 case AMDGPUISD::FMIN_LEGACY: {
3749 // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y)
3750 // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y)
3751 // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y)
3752 // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y)
3753
3754 SDValue LHS = N0.getOperand(0);
3755 SDValue RHS = N0.getOperand(1);
3756
3757 // 0 doesn't have a negated inline immediate.
3758 // TODO: This constant check should be generalized to other operations.
3759 if (isConstantCostlierToNegate(RHS))
3760 return SDValue();
3761
3762 SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
3763 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3764 unsigned Opposite = inverseMinMax(Opc);
3765
3766 SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags());
3767 if (Res.getOpcode() != Opposite)
3768 return SDValue(); // Op got folded away.
3769 if (!N0.hasOneUse())
3770 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3771 return Res;
3772 }
3773 case AMDGPUISD::FMED3: {
3774 SDValue Ops[3];
3775 for (unsigned I = 0; I < 3; ++I)
3776 Ops[I] = DAG.getNode(ISD::FNEG, SL, VT, N0->getOperand(I), N0->getFlags());
3777
3778 SDValue Res = DAG.getNode(AMDGPUISD::FMED3, SL, VT, Ops, N0->getFlags());
3779 if (Res.getOpcode() != AMDGPUISD::FMED3)
3780 return SDValue(); // Op got folded away.
3781
3782 if (!N0.hasOneUse()) {
3783 SDValue Neg = DAG.getNode(ISD::FNEG, SL, VT, Res);
3784 DAG.ReplaceAllUsesWith(N0, Neg);
3785
3786 for (SDNode *U : Neg->uses())
3787 DCI.AddToWorklist(U);
3788 }
3789
3790 return Res;
3791 }
3792 case ISD::FP_EXTEND:
3793 case ISD::FTRUNC:
3794 case ISD::FRINT:
3795 case ISD::FNEARBYINT: // XXX - Should fround be handled?
3796 case ISD::FSIN:
3797 case ISD::FCANONICALIZE:
3798 case AMDGPUISD::RCP:
3799 case AMDGPUISD::RCP_LEGACY:
3800 case AMDGPUISD::RCP_IFLAG:
3801 case AMDGPUISD::SIN_HW: {
3802 SDValue CvtSrc = N0.getOperand(0);
3803 if (CvtSrc.getOpcode() == ISD::FNEG) {
3804 // (fneg (fp_extend (fneg x))) -> (fp_extend x)
3805 // (fneg (rcp (fneg x))) -> (rcp x)
3806 return DAG.getNode(Opc, SL, VT, CvtSrc.getOperand(0));
3807 }
3808
3809 if (!N0.hasOneUse())
3810 return SDValue();
3811
3812 // (fneg (fp_extend x)) -> (fp_extend (fneg x))
3813 // (fneg (rcp x)) -> (rcp (fneg x))
3814 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
3815 return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags());
3816 }
3817 case ISD::FP_ROUND: {
3818 SDValue CvtSrc = N0.getOperand(0);
3819
3820 if (CvtSrc.getOpcode() == ISD::FNEG) {
3821 // (fneg (fp_round (fneg x))) -> (fp_round x)
3822 return DAG.getNode(ISD::FP_ROUND, SL, VT,
3823 CvtSrc.getOperand(0), N0.getOperand(1));
3824 }
3825
3826 if (!N0.hasOneUse())
3827 return SDValue();
3828
3829 // (fneg (fp_round x)) -> (fp_round (fneg x))
3830 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
3831 return DAG.getNode(ISD::FP_ROUND, SL, VT, Neg, N0.getOperand(1));
3832 }
3833 case ISD::FP16_TO_FP: {
3834 // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal
3835 // f16, but legalization of f16 fneg ends up pulling it out of the source.
3836 // Put the fneg back as a legal source operation that can be matched later.
3837 SDLoc SL(N);
3838
3839 SDValue Src = N0.getOperand(0);
3840 EVT SrcVT = Src.getValueType();
3841
3842 // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000)
3843 SDValue IntFNeg = DAG.getNode(ISD::XOR, SL, SrcVT, Src,
3844 DAG.getConstant(0x8000, SL, SrcVT));
3845 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg);
3846 }
3847 default:
3848 return SDValue();
3849 }
3850 }
3851
performFAbsCombine(SDNode * N,DAGCombinerInfo & DCI) const3852 SDValue AMDGPUTargetLowering::performFAbsCombine(SDNode *N,
3853 DAGCombinerInfo &DCI) const {
3854 SelectionDAG &DAG = DCI.DAG;
3855 SDValue N0 = N->getOperand(0);
3856
3857 if (!N0.hasOneUse())
3858 return SDValue();
3859
3860 switch (N0.getOpcode()) {
3861 case ISD::FP16_TO_FP: {
3862 assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal");
3863 SDLoc SL(N);
3864 SDValue Src = N0.getOperand(0);
3865 EVT SrcVT = Src.getValueType();
3866
3867 // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff)
3868 SDValue IntFAbs = DAG.getNode(ISD::AND, SL, SrcVT, Src,
3869 DAG.getConstant(0x7fff, SL, SrcVT));
3870 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs);
3871 }
3872 default:
3873 return SDValue();
3874 }
3875 }
3876
performRcpCombine(SDNode * N,DAGCombinerInfo & DCI) const3877 SDValue AMDGPUTargetLowering::performRcpCombine(SDNode *N,
3878 DAGCombinerInfo &DCI) const {
3879 const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
3880 if (!CFP)
3881 return SDValue();
3882
3883 // XXX - Should this flush denormals?
3884 const APFloat &Val = CFP->getValueAPF();
3885 APFloat One(Val.getSemantics(), "1.0");
3886 return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0));
3887 }
3888
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const3889 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
3890 DAGCombinerInfo &DCI) const {
3891 SelectionDAG &DAG = DCI.DAG;
3892 SDLoc DL(N);
3893
3894 switch(N->getOpcode()) {
3895 default:
3896 break;
3897 case ISD::BITCAST: {
3898 EVT DestVT = N->getValueType(0);
3899
3900 // Push casts through vector builds. This helps avoid emitting a large
3901 // number of copies when materializing floating point vector constants.
3902 //
3903 // vNt1 bitcast (vNt0 (build_vector t0:x, t0:y)) =>
3904 // vnt1 = build_vector (t1 (bitcast t0:x)), (t1 (bitcast t0:y))
3905 if (DestVT.isVector()) {
3906 SDValue Src = N->getOperand(0);
3907 if (Src.getOpcode() == ISD::BUILD_VECTOR) {
3908 EVT SrcVT = Src.getValueType();
3909 unsigned NElts = DestVT.getVectorNumElements();
3910
3911 if (SrcVT.getVectorNumElements() == NElts) {
3912 EVT DestEltVT = DestVT.getVectorElementType();
3913
3914 SmallVector<SDValue, 8> CastedElts;
3915 SDLoc SL(N);
3916 for (unsigned I = 0, E = SrcVT.getVectorNumElements(); I != E; ++I) {
3917 SDValue Elt = Src.getOperand(I);
3918 CastedElts.push_back(DAG.getNode(ISD::BITCAST, DL, DestEltVT, Elt));
3919 }
3920
3921 return DAG.getBuildVector(DestVT, SL, CastedElts);
3922 }
3923 }
3924 }
3925
3926 if (DestVT.getSizeInBits() != 64 || !DestVT.isVector())
3927 break;
3928
3929 // Fold bitcasts of constants.
3930 //
3931 // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k)
3932 // TODO: Generalize and move to DAGCombiner
3933 SDValue Src = N->getOperand(0);
3934 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) {
3935 SDLoc SL(N);
3936 uint64_t CVal = C->getZExtValue();
3937 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
3938 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
3939 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
3940 return DAG.getNode(ISD::BITCAST, SL, DestVT, BV);
3941 }
3942
3943 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) {
3944 const APInt &Val = C->getValueAPF().bitcastToAPInt();
3945 SDLoc SL(N);
3946 uint64_t CVal = Val.getZExtValue();
3947 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
3948 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
3949 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
3950
3951 return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec);
3952 }
3953
3954 break;
3955 }
3956 case ISD::SHL: {
3957 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
3958 break;
3959
3960 return performShlCombine(N, DCI);
3961 }
3962 case ISD::SRL: {
3963 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
3964 break;
3965
3966 return performSrlCombine(N, DCI);
3967 }
3968 case ISD::SRA: {
3969 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
3970 break;
3971
3972 return performSraCombine(N, DCI);
3973 }
3974 case ISD::TRUNCATE:
3975 return performTruncateCombine(N, DCI);
3976 case ISD::MUL:
3977 return performMulCombine(N, DCI);
3978 case ISD::MULHS:
3979 return performMulhsCombine(N, DCI);
3980 case ISD::MULHU:
3981 return performMulhuCombine(N, DCI);
3982 case AMDGPUISD::MUL_I24:
3983 case AMDGPUISD::MUL_U24:
3984 case AMDGPUISD::MULHI_I24:
3985 case AMDGPUISD::MULHI_U24: {
3986 if (SDValue V = simplifyI24(N, DCI))
3987 return V;
3988 return SDValue();
3989 }
3990 case ISD::SELECT:
3991 return performSelectCombine(N, DCI);
3992 case ISD::FNEG:
3993 return performFNegCombine(N, DCI);
3994 case ISD::FABS:
3995 return performFAbsCombine(N, DCI);
3996 case AMDGPUISD::BFE_I32:
3997 case AMDGPUISD::BFE_U32: {
3998 assert(!N->getValueType(0).isVector() &&
3999 "Vector handling of BFE not implemented");
4000 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
4001 if (!Width)
4002 break;
4003
4004 uint32_t WidthVal = Width->getZExtValue() & 0x1f;
4005 if (WidthVal == 0)
4006 return DAG.getConstant(0, DL, MVT::i32);
4007
4008 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
4009 if (!Offset)
4010 break;
4011
4012 SDValue BitsFrom = N->getOperand(0);
4013 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
4014
4015 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
4016
4017 if (OffsetVal == 0) {
4018 // This is already sign / zero extended, so try to fold away extra BFEs.
4019 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
4020
4021 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
4022 if (OpSignBits >= SignBits)
4023 return BitsFrom;
4024
4025 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
4026 if (Signed) {
4027 // This is a sign_extend_inreg. Replace it to take advantage of existing
4028 // DAG Combines. If not eliminated, we will match back to BFE during
4029 // selection.
4030
4031 // TODO: The sext_inreg of extended types ends, although we can could
4032 // handle them in a single BFE.
4033 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
4034 DAG.getValueType(SmallVT));
4035 }
4036
4037 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
4038 }
4039
4040 if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) {
4041 if (Signed) {
4042 return constantFoldBFE<int32_t>(DAG,
4043 CVal->getSExtValue(),
4044 OffsetVal,
4045 WidthVal,
4046 DL);
4047 }
4048
4049 return constantFoldBFE<uint32_t>(DAG,
4050 CVal->getZExtValue(),
4051 OffsetVal,
4052 WidthVal,
4053 DL);
4054 }
4055
4056 if ((OffsetVal + WidthVal) >= 32 &&
4057 !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) {
4058 SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32);
4059 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
4060 BitsFrom, ShiftVal);
4061 }
4062
4063 if (BitsFrom.hasOneUse()) {
4064 APInt Demanded = APInt::getBitsSet(32,
4065 OffsetVal,
4066 OffsetVal + WidthVal);
4067
4068 KnownBits Known;
4069 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
4070 !DCI.isBeforeLegalizeOps());
4071 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4072 if (TLI.ShrinkDemandedConstant(BitsFrom, Demanded, TLO) ||
4073 TLI.SimplifyDemandedBits(BitsFrom, Demanded, Known, TLO)) {
4074 DCI.CommitTargetLoweringOpt(TLO);
4075 }
4076 }
4077
4078 break;
4079 }
4080 case ISD::LOAD:
4081 return performLoadCombine(N, DCI);
4082 case ISD::STORE:
4083 return performStoreCombine(N, DCI);
4084 case AMDGPUISD::RCP:
4085 case AMDGPUISD::RCP_IFLAG:
4086 return performRcpCombine(N, DCI);
4087 case ISD::AssertZext:
4088 case ISD::AssertSext:
4089 return performAssertSZExtCombine(N, DCI);
4090 case ISD::INTRINSIC_WO_CHAIN:
4091 return performIntrinsicWOChainCombine(N, DCI);
4092 }
4093 return SDValue();
4094 }
4095
4096 //===----------------------------------------------------------------------===//
4097 // Helper functions
4098 //===----------------------------------------------------------------------===//
4099
CreateLiveInRegister(SelectionDAG & DAG,const TargetRegisterClass * RC,Register Reg,EVT VT,const SDLoc & SL,bool RawReg) const4100 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
4101 const TargetRegisterClass *RC,
4102 Register Reg, EVT VT,
4103 const SDLoc &SL,
4104 bool RawReg) const {
4105 MachineFunction &MF = DAG.getMachineFunction();
4106 MachineRegisterInfo &MRI = MF.getRegInfo();
4107 Register VReg;
4108
4109 if (!MRI.isLiveIn(Reg)) {
4110 VReg = MRI.createVirtualRegister(RC);
4111 MRI.addLiveIn(Reg, VReg);
4112 } else {
4113 VReg = MRI.getLiveInVirtReg(Reg);
4114 }
4115
4116 if (RawReg)
4117 return DAG.getRegister(VReg, VT);
4118
4119 return DAG.getCopyFromReg(DAG.getEntryNode(), SL, VReg, VT);
4120 }
4121
4122 // This may be called multiple times, and nothing prevents creating multiple
4123 // objects at the same offset. See if we already defined this object.
getOrCreateFixedStackObject(MachineFrameInfo & MFI,unsigned Size,int64_t Offset)4124 static int getOrCreateFixedStackObject(MachineFrameInfo &MFI, unsigned Size,
4125 int64_t Offset) {
4126 for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) {
4127 if (MFI.getObjectOffset(I) == Offset) {
4128 assert(MFI.getObjectSize(I) == Size);
4129 return I;
4130 }
4131 }
4132
4133 return MFI.CreateFixedObject(Size, Offset, true);
4134 }
4135
loadStackInputValue(SelectionDAG & DAG,EVT VT,const SDLoc & SL,int64_t Offset) const4136 SDValue AMDGPUTargetLowering::loadStackInputValue(SelectionDAG &DAG,
4137 EVT VT,
4138 const SDLoc &SL,
4139 int64_t Offset) const {
4140 MachineFunction &MF = DAG.getMachineFunction();
4141 MachineFrameInfo &MFI = MF.getFrameInfo();
4142 int FI = getOrCreateFixedStackObject(MFI, VT.getStoreSize(), Offset);
4143
4144 auto SrcPtrInfo = MachinePointerInfo::getStack(MF, Offset);
4145 SDValue Ptr = DAG.getFrameIndex(FI, MVT::i32);
4146
4147 return DAG.getLoad(VT, SL, DAG.getEntryNode(), Ptr, SrcPtrInfo, Align(4),
4148 MachineMemOperand::MODereferenceable |
4149 MachineMemOperand::MOInvariant);
4150 }
4151
storeStackInputValue(SelectionDAG & DAG,const SDLoc & SL,SDValue Chain,SDValue ArgVal,int64_t Offset) const4152 SDValue AMDGPUTargetLowering::storeStackInputValue(SelectionDAG &DAG,
4153 const SDLoc &SL,
4154 SDValue Chain,
4155 SDValue ArgVal,
4156 int64_t Offset) const {
4157 MachineFunction &MF = DAG.getMachineFunction();
4158 MachinePointerInfo DstInfo = MachinePointerInfo::getStack(MF, Offset);
4159
4160 SDValue Ptr = DAG.getConstant(Offset, SL, MVT::i32);
4161 SDValue Store = DAG.getStore(Chain, SL, ArgVal, Ptr, DstInfo, Align(4),
4162 MachineMemOperand::MODereferenceable);
4163 return Store;
4164 }
4165
loadInputValue(SelectionDAG & DAG,const TargetRegisterClass * RC,EVT VT,const SDLoc & SL,const ArgDescriptor & Arg) const4166 SDValue AMDGPUTargetLowering::loadInputValue(SelectionDAG &DAG,
4167 const TargetRegisterClass *RC,
4168 EVT VT, const SDLoc &SL,
4169 const ArgDescriptor &Arg) const {
4170 assert(Arg && "Attempting to load missing argument");
4171
4172 SDValue V = Arg.isRegister() ?
4173 CreateLiveInRegister(DAG, RC, Arg.getRegister(), VT, SL) :
4174 loadStackInputValue(DAG, VT, SL, Arg.getStackOffset());
4175
4176 if (!Arg.isMasked())
4177 return V;
4178
4179 unsigned Mask = Arg.getMask();
4180 unsigned Shift = countTrailingZeros<unsigned>(Mask);
4181 V = DAG.getNode(ISD::SRL, SL, VT, V,
4182 DAG.getShiftAmountConstant(Shift, VT, SL));
4183 return DAG.getNode(ISD::AND, SL, VT, V,
4184 DAG.getConstant(Mask >> Shift, SL, VT));
4185 }
4186
getImplicitParameterOffset(const MachineFunction & MF,const ImplicitParameter Param) const4187 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset(
4188 const MachineFunction &MF, const ImplicitParameter Param) const {
4189 const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
4190 const AMDGPUSubtarget &ST =
4191 AMDGPUSubtarget::get(getTargetMachine(), MF.getFunction());
4192 unsigned ExplicitArgOffset = ST.getExplicitKernelArgOffset(MF.getFunction());
4193 const Align Alignment = ST.getAlignmentForImplicitArgPtr();
4194 uint64_t ArgOffset = alignTo(MFI->getExplicitKernArgSize(), Alignment) +
4195 ExplicitArgOffset;
4196 switch (Param) {
4197 case GRID_DIM:
4198 return ArgOffset;
4199 case GRID_OFFSET:
4200 return ArgOffset + 4;
4201 }
4202 llvm_unreachable("unexpected implicit parameter type");
4203 }
4204
4205 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
4206
getTargetNodeName(unsigned Opcode) const4207 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
4208 switch ((AMDGPUISD::NodeType)Opcode) {
4209 case AMDGPUISD::FIRST_NUMBER: break;
4210 // AMDIL DAG nodes
4211 NODE_NAME_CASE(UMUL);
4212 NODE_NAME_CASE(BRANCH_COND);
4213
4214 // AMDGPU DAG nodes
4215 NODE_NAME_CASE(IF)
4216 NODE_NAME_CASE(ELSE)
4217 NODE_NAME_CASE(LOOP)
4218 NODE_NAME_CASE(CALL)
4219 NODE_NAME_CASE(TC_RETURN)
4220 NODE_NAME_CASE(TRAP)
4221 NODE_NAME_CASE(RET_FLAG)
4222 NODE_NAME_CASE(RETURN_TO_EPILOG)
4223 NODE_NAME_CASE(ENDPGM)
4224 NODE_NAME_CASE(DWORDADDR)
4225 NODE_NAME_CASE(FRACT)
4226 NODE_NAME_CASE(SETCC)
4227 NODE_NAME_CASE(SETREG)
4228 NODE_NAME_CASE(DENORM_MODE)
4229 NODE_NAME_CASE(FMA_W_CHAIN)
4230 NODE_NAME_CASE(FMUL_W_CHAIN)
4231 NODE_NAME_CASE(CLAMP)
4232 NODE_NAME_CASE(COS_HW)
4233 NODE_NAME_CASE(SIN_HW)
4234 NODE_NAME_CASE(FMAX_LEGACY)
4235 NODE_NAME_CASE(FMIN_LEGACY)
4236 NODE_NAME_CASE(FMAX3)
4237 NODE_NAME_CASE(SMAX3)
4238 NODE_NAME_CASE(UMAX3)
4239 NODE_NAME_CASE(FMIN3)
4240 NODE_NAME_CASE(SMIN3)
4241 NODE_NAME_CASE(UMIN3)
4242 NODE_NAME_CASE(FMED3)
4243 NODE_NAME_CASE(SMED3)
4244 NODE_NAME_CASE(UMED3)
4245 NODE_NAME_CASE(FDOT2)
4246 NODE_NAME_CASE(URECIP)
4247 NODE_NAME_CASE(DIV_SCALE)
4248 NODE_NAME_CASE(DIV_FMAS)
4249 NODE_NAME_CASE(DIV_FIXUP)
4250 NODE_NAME_CASE(FMAD_FTZ)
4251 NODE_NAME_CASE(RCP)
4252 NODE_NAME_CASE(RSQ)
4253 NODE_NAME_CASE(RCP_LEGACY)
4254 NODE_NAME_CASE(RCP_IFLAG)
4255 NODE_NAME_CASE(FMUL_LEGACY)
4256 NODE_NAME_CASE(RSQ_CLAMP)
4257 NODE_NAME_CASE(LDEXP)
4258 NODE_NAME_CASE(FP_CLASS)
4259 NODE_NAME_CASE(DOT4)
4260 NODE_NAME_CASE(CARRY)
4261 NODE_NAME_CASE(BORROW)
4262 NODE_NAME_CASE(BFE_U32)
4263 NODE_NAME_CASE(BFE_I32)
4264 NODE_NAME_CASE(BFI)
4265 NODE_NAME_CASE(BFM)
4266 NODE_NAME_CASE(FFBH_U32)
4267 NODE_NAME_CASE(FFBH_I32)
4268 NODE_NAME_CASE(FFBL_B32)
4269 NODE_NAME_CASE(MUL_U24)
4270 NODE_NAME_CASE(MUL_I24)
4271 NODE_NAME_CASE(MULHI_U24)
4272 NODE_NAME_CASE(MULHI_I24)
4273 NODE_NAME_CASE(MAD_U24)
4274 NODE_NAME_CASE(MAD_I24)
4275 NODE_NAME_CASE(MAD_I64_I32)
4276 NODE_NAME_CASE(MAD_U64_U32)
4277 NODE_NAME_CASE(PERM)
4278 NODE_NAME_CASE(TEXTURE_FETCH)
4279 NODE_NAME_CASE(R600_EXPORT)
4280 NODE_NAME_CASE(CONST_ADDRESS)
4281 NODE_NAME_CASE(REGISTER_LOAD)
4282 NODE_NAME_CASE(REGISTER_STORE)
4283 NODE_NAME_CASE(SAMPLE)
4284 NODE_NAME_CASE(SAMPLEB)
4285 NODE_NAME_CASE(SAMPLED)
4286 NODE_NAME_CASE(SAMPLEL)
4287 NODE_NAME_CASE(CVT_F32_UBYTE0)
4288 NODE_NAME_CASE(CVT_F32_UBYTE1)
4289 NODE_NAME_CASE(CVT_F32_UBYTE2)
4290 NODE_NAME_CASE(CVT_F32_UBYTE3)
4291 NODE_NAME_CASE(CVT_PKRTZ_F16_F32)
4292 NODE_NAME_CASE(CVT_PKNORM_I16_F32)
4293 NODE_NAME_CASE(CVT_PKNORM_U16_F32)
4294 NODE_NAME_CASE(CVT_PK_I16_I32)
4295 NODE_NAME_CASE(CVT_PK_U16_U32)
4296 NODE_NAME_CASE(FP_TO_FP16)
4297 NODE_NAME_CASE(FP16_ZEXT)
4298 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
4299 NODE_NAME_CASE(CONST_DATA_PTR)
4300 NODE_NAME_CASE(PC_ADD_REL_OFFSET)
4301 NODE_NAME_CASE(LDS)
4302 NODE_NAME_CASE(DUMMY_CHAIN)
4303 case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
4304 NODE_NAME_CASE(LOAD_D16_HI)
4305 NODE_NAME_CASE(LOAD_D16_LO)
4306 NODE_NAME_CASE(LOAD_D16_HI_I8)
4307 NODE_NAME_CASE(LOAD_D16_HI_U8)
4308 NODE_NAME_CASE(LOAD_D16_LO_I8)
4309 NODE_NAME_CASE(LOAD_D16_LO_U8)
4310 NODE_NAME_CASE(STORE_MSKOR)
4311 NODE_NAME_CASE(LOAD_CONSTANT)
4312 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
4313 NODE_NAME_CASE(TBUFFER_STORE_FORMAT_D16)
4314 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT)
4315 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT_D16)
4316 NODE_NAME_CASE(DS_ORDERED_COUNT)
4317 NODE_NAME_CASE(ATOMIC_CMP_SWAP)
4318 NODE_NAME_CASE(ATOMIC_INC)
4319 NODE_NAME_CASE(ATOMIC_DEC)
4320 NODE_NAME_CASE(ATOMIC_LOAD_FMIN)
4321 NODE_NAME_CASE(ATOMIC_LOAD_FMAX)
4322 NODE_NAME_CASE(BUFFER_LOAD)
4323 NODE_NAME_CASE(BUFFER_LOAD_UBYTE)
4324 NODE_NAME_CASE(BUFFER_LOAD_USHORT)
4325 NODE_NAME_CASE(BUFFER_LOAD_BYTE)
4326 NODE_NAME_CASE(BUFFER_LOAD_SHORT)
4327 NODE_NAME_CASE(BUFFER_LOAD_FORMAT)
4328 NODE_NAME_CASE(BUFFER_LOAD_FORMAT_D16)
4329 NODE_NAME_CASE(SBUFFER_LOAD)
4330 NODE_NAME_CASE(BUFFER_STORE)
4331 NODE_NAME_CASE(BUFFER_STORE_BYTE)
4332 NODE_NAME_CASE(BUFFER_STORE_SHORT)
4333 NODE_NAME_CASE(BUFFER_STORE_FORMAT)
4334 NODE_NAME_CASE(BUFFER_STORE_FORMAT_D16)
4335 NODE_NAME_CASE(BUFFER_ATOMIC_SWAP)
4336 NODE_NAME_CASE(BUFFER_ATOMIC_ADD)
4337 NODE_NAME_CASE(BUFFER_ATOMIC_SUB)
4338 NODE_NAME_CASE(BUFFER_ATOMIC_SMIN)
4339 NODE_NAME_CASE(BUFFER_ATOMIC_UMIN)
4340 NODE_NAME_CASE(BUFFER_ATOMIC_SMAX)
4341 NODE_NAME_CASE(BUFFER_ATOMIC_UMAX)
4342 NODE_NAME_CASE(BUFFER_ATOMIC_AND)
4343 NODE_NAME_CASE(BUFFER_ATOMIC_OR)
4344 NODE_NAME_CASE(BUFFER_ATOMIC_XOR)
4345 NODE_NAME_CASE(BUFFER_ATOMIC_INC)
4346 NODE_NAME_CASE(BUFFER_ATOMIC_DEC)
4347 NODE_NAME_CASE(BUFFER_ATOMIC_CMPSWAP)
4348 NODE_NAME_CASE(BUFFER_ATOMIC_CSUB)
4349 NODE_NAME_CASE(BUFFER_ATOMIC_FADD)
4350
4351 case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break;
4352 }
4353 return nullptr;
4354 }
4355
getSqrtEstimate(SDValue Operand,SelectionDAG & DAG,int Enabled,int & RefinementSteps,bool & UseOneConstNR,bool Reciprocal) const4356 SDValue AMDGPUTargetLowering::getSqrtEstimate(SDValue Operand,
4357 SelectionDAG &DAG, int Enabled,
4358 int &RefinementSteps,
4359 bool &UseOneConstNR,
4360 bool Reciprocal) const {
4361 EVT VT = Operand.getValueType();
4362
4363 if (VT == MVT::f32) {
4364 RefinementSteps = 0;
4365 return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand);
4366 }
4367
4368 // TODO: There is also f64 rsq instruction, but the documentation is less
4369 // clear on its precision.
4370
4371 return SDValue();
4372 }
4373
getRecipEstimate(SDValue Operand,SelectionDAG & DAG,int Enabled,int & RefinementSteps) const4374 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand,
4375 SelectionDAG &DAG, int Enabled,
4376 int &RefinementSteps) const {
4377 EVT VT = Operand.getValueType();
4378
4379 if (VT == MVT::f32) {
4380 // Reciprocal, < 1 ulp error.
4381 //
4382 // This reciprocal approximation converges to < 0.5 ulp error with one
4383 // newton rhapson performed with two fused multiple adds (FMAs).
4384
4385 RefinementSteps = 0;
4386 return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand);
4387 }
4388
4389 // TODO: There is also f64 rcp instruction, but the documentation is less
4390 // clear on its precision.
4391
4392 return SDValue();
4393 }
4394
computeKnownBitsForTargetNode(const SDValue Op,KnownBits & Known,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const4395 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
4396 const SDValue Op, KnownBits &Known,
4397 const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const {
4398
4399 Known.resetAll(); // Don't know anything.
4400
4401 unsigned Opc = Op.getOpcode();
4402
4403 switch (Opc) {
4404 default:
4405 break;
4406 case AMDGPUISD::CARRY:
4407 case AMDGPUISD::BORROW: {
4408 Known.Zero = APInt::getHighBitsSet(32, 31);
4409 break;
4410 }
4411
4412 case AMDGPUISD::BFE_I32:
4413 case AMDGPUISD::BFE_U32: {
4414 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4415 if (!CWidth)
4416 return;
4417
4418 uint32_t Width = CWidth->getZExtValue() & 0x1f;
4419
4420 if (Opc == AMDGPUISD::BFE_U32)
4421 Known.Zero = APInt::getHighBitsSet(32, 32 - Width);
4422
4423 break;
4424 }
4425 case AMDGPUISD::FP_TO_FP16:
4426 case AMDGPUISD::FP16_ZEXT: {
4427 unsigned BitWidth = Known.getBitWidth();
4428
4429 // High bits are zero.
4430 Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
4431 break;
4432 }
4433 case AMDGPUISD::MUL_U24:
4434 case AMDGPUISD::MUL_I24: {
4435 KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
4436 KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
4437 unsigned TrailZ = LHSKnown.countMinTrailingZeros() +
4438 RHSKnown.countMinTrailingZeros();
4439 Known.Zero.setLowBits(std::min(TrailZ, 32u));
4440 // Skip extra check if all bits are known zeros.
4441 if (TrailZ >= 32)
4442 break;
4443
4444 // Truncate to 24 bits.
4445 LHSKnown = LHSKnown.trunc(24);
4446 RHSKnown = RHSKnown.trunc(24);
4447
4448 if (Opc == AMDGPUISD::MUL_I24) {
4449 unsigned LHSValBits = 24 - LHSKnown.countMinSignBits();
4450 unsigned RHSValBits = 24 - RHSKnown.countMinSignBits();
4451 unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u);
4452 if (MaxValBits >= 32)
4453 break;
4454 bool LHSNegative = LHSKnown.isNegative();
4455 bool LHSNonNegative = LHSKnown.isNonNegative();
4456 bool LHSPositive = LHSKnown.isStrictlyPositive();
4457 bool RHSNegative = RHSKnown.isNegative();
4458 bool RHSNonNegative = RHSKnown.isNonNegative();
4459 bool RHSPositive = RHSKnown.isStrictlyPositive();
4460
4461 if ((LHSNonNegative && RHSNonNegative) || (LHSNegative && RHSNegative))
4462 Known.Zero.setHighBits(32 - MaxValBits);
4463 else if ((LHSNegative && RHSPositive) || (LHSPositive && RHSNegative))
4464 Known.One.setHighBits(32 - MaxValBits);
4465 } else {
4466 unsigned LHSValBits = 24 - LHSKnown.countMinLeadingZeros();
4467 unsigned RHSValBits = 24 - RHSKnown.countMinLeadingZeros();
4468 unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u);
4469 if (MaxValBits >= 32)
4470 break;
4471 Known.Zero.setHighBits(32 - MaxValBits);
4472 }
4473 break;
4474 }
4475 case AMDGPUISD::PERM: {
4476 ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4477 if (!CMask)
4478 return;
4479
4480 KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
4481 KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
4482 unsigned Sel = CMask->getZExtValue();
4483
4484 for (unsigned I = 0; I < 32; I += 8) {
4485 unsigned SelBits = Sel & 0xff;
4486 if (SelBits < 4) {
4487 SelBits *= 8;
4488 Known.One |= ((RHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
4489 Known.Zero |= ((RHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
4490 } else if (SelBits < 7) {
4491 SelBits = (SelBits & 3) * 8;
4492 Known.One |= ((LHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
4493 Known.Zero |= ((LHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
4494 } else if (SelBits == 0x0c) {
4495 Known.Zero |= 0xFFull << I;
4496 } else if (SelBits > 0x0c) {
4497 Known.One |= 0xFFull << I;
4498 }
4499 Sel >>= 8;
4500 }
4501 break;
4502 }
4503 case AMDGPUISD::BUFFER_LOAD_UBYTE: {
4504 Known.Zero.setHighBits(24);
4505 break;
4506 }
4507 case AMDGPUISD::BUFFER_LOAD_USHORT: {
4508 Known.Zero.setHighBits(16);
4509 break;
4510 }
4511 case AMDGPUISD::LDS: {
4512 auto GA = cast<GlobalAddressSDNode>(Op.getOperand(0).getNode());
4513 Align Alignment = GA->getGlobal()->getPointerAlignment(DAG.getDataLayout());
4514
4515 Known.Zero.setHighBits(16);
4516 Known.Zero.setLowBits(Log2(Alignment));
4517 break;
4518 }
4519 case ISD::INTRINSIC_WO_CHAIN: {
4520 unsigned IID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4521 switch (IID) {
4522 case Intrinsic::amdgcn_mbcnt_lo:
4523 case Intrinsic::amdgcn_mbcnt_hi: {
4524 const GCNSubtarget &ST =
4525 DAG.getMachineFunction().getSubtarget<GCNSubtarget>();
4526 // These return at most the wavefront size - 1.
4527 unsigned Size = Op.getValueType().getSizeInBits();
4528 Known.Zero.setHighBits(Size - ST.getWavefrontSizeLog2());
4529 break;
4530 }
4531 default:
4532 break;
4533 }
4534 }
4535 }
4536 }
4537
ComputeNumSignBitsForTargetNode(SDValue Op,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const4538 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
4539 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
4540 unsigned Depth) const {
4541 switch (Op.getOpcode()) {
4542 case AMDGPUISD::BFE_I32: {
4543 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4544 if (!Width)
4545 return 1;
4546
4547 unsigned SignBits = 32 - Width->getZExtValue() + 1;
4548 if (!isNullConstant(Op.getOperand(1)))
4549 return SignBits;
4550
4551 // TODO: Could probably figure something out with non-0 offsets.
4552 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4553 return std::max(SignBits, Op0SignBits);
4554 }
4555
4556 case AMDGPUISD::BFE_U32: {
4557 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4558 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
4559 }
4560
4561 case AMDGPUISD::CARRY:
4562 case AMDGPUISD::BORROW:
4563 return 31;
4564 case AMDGPUISD::BUFFER_LOAD_BYTE:
4565 return 25;
4566 case AMDGPUISD::BUFFER_LOAD_SHORT:
4567 return 17;
4568 case AMDGPUISD::BUFFER_LOAD_UBYTE:
4569 return 24;
4570 case AMDGPUISD::BUFFER_LOAD_USHORT:
4571 return 16;
4572 case AMDGPUISD::FP_TO_FP16:
4573 case AMDGPUISD::FP16_ZEXT:
4574 return 16;
4575 default:
4576 return 1;
4577 }
4578 }
4579
computeNumSignBitsForTargetInstr(GISelKnownBits & Analysis,Register R,const APInt & DemandedElts,const MachineRegisterInfo & MRI,unsigned Depth) const4580 unsigned AMDGPUTargetLowering::computeNumSignBitsForTargetInstr(
4581 GISelKnownBits &Analysis, Register R,
4582 const APInt &DemandedElts, const MachineRegisterInfo &MRI,
4583 unsigned Depth) const {
4584 const MachineInstr *MI = MRI.getVRegDef(R);
4585 if (!MI)
4586 return 1;
4587
4588 // TODO: Check range metadata on MMO.
4589 switch (MI->getOpcode()) {
4590 case AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE:
4591 return 25;
4592 case AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT:
4593 return 17;
4594 case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE:
4595 return 24;
4596 case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT:
4597 return 16;
4598 default:
4599 return 1;
4600 }
4601 }
4602
isKnownNeverNaNForTargetNode(SDValue Op,const SelectionDAG & DAG,bool SNaN,unsigned Depth) const4603 bool AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
4604 const SelectionDAG &DAG,
4605 bool SNaN,
4606 unsigned Depth) const {
4607 unsigned Opcode = Op.getOpcode();
4608 switch (Opcode) {
4609 case AMDGPUISD::FMIN_LEGACY:
4610 case AMDGPUISD::FMAX_LEGACY: {
4611 if (SNaN)
4612 return true;
4613
4614 // TODO: Can check no nans on one of the operands for each one, but which
4615 // one?
4616 return false;
4617 }
4618 case AMDGPUISD::FMUL_LEGACY:
4619 case AMDGPUISD::CVT_PKRTZ_F16_F32: {
4620 if (SNaN)
4621 return true;
4622 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4623 DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4624 }
4625 case AMDGPUISD::FMED3:
4626 case AMDGPUISD::FMIN3:
4627 case AMDGPUISD::FMAX3:
4628 case AMDGPUISD::FMAD_FTZ: {
4629 if (SNaN)
4630 return true;
4631 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4632 DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4633 DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4634 }
4635 case AMDGPUISD::CVT_F32_UBYTE0:
4636 case AMDGPUISD::CVT_F32_UBYTE1:
4637 case AMDGPUISD::CVT_F32_UBYTE2:
4638 case AMDGPUISD::CVT_F32_UBYTE3:
4639 return true;
4640
4641 case AMDGPUISD::RCP:
4642 case AMDGPUISD::RSQ:
4643 case AMDGPUISD::RCP_LEGACY:
4644 case AMDGPUISD::RSQ_CLAMP: {
4645 if (SNaN)
4646 return true;
4647
4648 // TODO: Need is known positive check.
4649 return false;
4650 }
4651 case AMDGPUISD::LDEXP:
4652 case AMDGPUISD::FRACT: {
4653 if (SNaN)
4654 return true;
4655 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4656 }
4657 case AMDGPUISD::DIV_SCALE:
4658 case AMDGPUISD::DIV_FMAS:
4659 case AMDGPUISD::DIV_FIXUP:
4660 // TODO: Refine on operands.
4661 return SNaN;
4662 case AMDGPUISD::SIN_HW:
4663 case AMDGPUISD::COS_HW: {
4664 // TODO: Need check for infinity
4665 return SNaN;
4666 }
4667 case ISD::INTRINSIC_WO_CHAIN: {
4668 unsigned IntrinsicID
4669 = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4670 // TODO: Handle more intrinsics
4671 switch (IntrinsicID) {
4672 case Intrinsic::amdgcn_cubeid:
4673 return true;
4674
4675 case Intrinsic::amdgcn_frexp_mant: {
4676 if (SNaN)
4677 return true;
4678 return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4679 }
4680 case Intrinsic::amdgcn_cvt_pkrtz: {
4681 if (SNaN)
4682 return true;
4683 return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4684 DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4685 }
4686 case Intrinsic::amdgcn_rcp:
4687 case Intrinsic::amdgcn_rsq:
4688 case Intrinsic::amdgcn_rcp_legacy:
4689 case Intrinsic::amdgcn_rsq_legacy:
4690 case Intrinsic::amdgcn_rsq_clamp: {
4691 if (SNaN)
4692 return true;
4693
4694 // TODO: Need is known positive check.
4695 return false;
4696 }
4697 case Intrinsic::amdgcn_trig_preop:
4698 case Intrinsic::amdgcn_fdot2:
4699 // TODO: Refine on operand
4700 return SNaN;
4701 case Intrinsic::amdgcn_fma_legacy:
4702 if (SNaN)
4703 return true;
4704 return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4705 DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1) &&
4706 DAG.isKnownNeverNaN(Op.getOperand(3), SNaN, Depth + 1);
4707 default:
4708 return false;
4709 }
4710 }
4711 default:
4712 return false;
4713 }
4714 }
4715
4716 TargetLowering::AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst * RMW) const4717 AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
4718 switch (RMW->getOperation()) {
4719 case AtomicRMWInst::Nand:
4720 case AtomicRMWInst::FAdd:
4721 case AtomicRMWInst::FSub:
4722 return AtomicExpansionKind::CmpXChg;
4723 default:
4724 return AtomicExpansionKind::None;
4725 }
4726 }
4727