1 //===-- NVPTXISelLowering.cpp - NVPTX DAG Lowering Implementation ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that NVPTX uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "NVPTXISelLowering.h"
15 #include "MCTargetDesc/NVPTXBaseInfo.h"
16 #include "NVPTX.h"
17 #include "NVPTXSubtarget.h"
18 #include "NVPTXTargetMachine.h"
19 #include "NVPTXTargetObjectFile.h"
20 #include "NVPTXUtilities.h"
21 #include "llvm/ADT/APInt.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/CodeGen/Analysis.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineMemOperand.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/SelectionDAGNodes.h"
30 #include "llvm/CodeGen/TargetCallingConv.h"
31 #include "llvm/CodeGen/TargetLowering.h"
32 #include "llvm/CodeGen/ValueTypes.h"
33 #include "llvm/IR/Argument.h"
34 #include "llvm/IR/Attributes.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/GlobalValue.h"
40 #include "llvm/IR/Instruction.h"
41 #include "llvm/IR/Instructions.h"
42 #include "llvm/IR/IntrinsicsNVPTX.h"
43 #include "llvm/IR/Module.h"
44 #include "llvm/IR/Type.h"
45 #include "llvm/IR/Value.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/CodeGen.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Support/ErrorHandling.h"
50 #include "llvm/Support/MachineValueType.h"
51 #include "llvm/Support/MathExtras.h"
52 #include "llvm/Support/raw_ostream.h"
53 #include "llvm/Target/TargetMachine.h"
54 #include "llvm/Target/TargetOptions.h"
55 #include <algorithm>
56 #include <cassert>
57 #include <cstdint>
58 #include <iterator>
59 #include <sstream>
60 #include <string>
61 #include <utility>
62 #include <vector>
63
64 #define DEBUG_TYPE "nvptx-lower"
65
66 using namespace llvm;
67
68 static unsigned int uniqueCallSite = 0;
69
70 static cl::opt<bool> sched4reg(
71 "nvptx-sched4reg",
72 cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
73
74 static cl::opt<unsigned>
75 FMAContractLevelOpt("nvptx-fma-level", cl::ZeroOrMore, cl::Hidden,
76 cl::desc("NVPTX Specific: FMA contraction (0: don't do it"
77 " 1: do it 2: do it aggressively"),
78 cl::init(2));
79
80 static cl::opt<int> UsePrecDivF32(
81 "nvptx-prec-divf32", cl::ZeroOrMore, cl::Hidden,
82 cl::desc("NVPTX Specifies: 0 use div.approx, 1 use div.full, 2 use"
83 " IEEE Compliant F32 div.rnd if available."),
84 cl::init(2));
85
86 static cl::opt<bool> UsePrecSqrtF32(
87 "nvptx-prec-sqrtf32", cl::Hidden,
88 cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
89 cl::init(true));
90
getDivF32Level() const91 int NVPTXTargetLowering::getDivF32Level() const {
92 if (UsePrecDivF32.getNumOccurrences() > 0) {
93 // If nvptx-prec-div32=N is used on the command-line, always honor it
94 return UsePrecDivF32;
95 } else {
96 // Otherwise, use div.approx if fast math is enabled
97 if (getTargetMachine().Options.UnsafeFPMath)
98 return 0;
99 else
100 return 2;
101 }
102 }
103
usePrecSqrtF32() const104 bool NVPTXTargetLowering::usePrecSqrtF32() const {
105 if (UsePrecSqrtF32.getNumOccurrences() > 0) {
106 // If nvptx-prec-sqrtf32 is used on the command-line, always honor it
107 return UsePrecSqrtF32;
108 } else {
109 // Otherwise, use sqrt.approx if fast math is enabled
110 return !getTargetMachine().Options.UnsafeFPMath;
111 }
112 }
113
useF32FTZ(const MachineFunction & MF) const114 bool NVPTXTargetLowering::useF32FTZ(const MachineFunction &MF) const {
115 return MF.getDenormalMode(APFloat::IEEEsingle()).Output ==
116 DenormalMode::PreserveSign;
117 }
118
IsPTXVectorType(MVT VT)119 static bool IsPTXVectorType(MVT VT) {
120 switch (VT.SimpleTy) {
121 default:
122 return false;
123 case MVT::v2i1:
124 case MVT::v4i1:
125 case MVT::v2i8:
126 case MVT::v4i8:
127 case MVT::v2i16:
128 case MVT::v4i16:
129 case MVT::v2i32:
130 case MVT::v4i32:
131 case MVT::v2i64:
132 case MVT::v2f16:
133 case MVT::v4f16:
134 case MVT::v8f16: // <4 x f16x2>
135 case MVT::v2f32:
136 case MVT::v4f32:
137 case MVT::v2f64:
138 return true;
139 }
140 }
141
142 /// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
143 /// EVTs that compose it. Unlike ComputeValueVTs, this will break apart vectors
144 /// into their primitive components.
145 /// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
146 /// same number of types as the Ins/Outs arrays in LowerFormalArguments,
147 /// LowerCall, and LowerReturn.
ComputePTXValueVTs(const TargetLowering & TLI,const DataLayout & DL,Type * Ty,SmallVectorImpl<EVT> & ValueVTs,SmallVectorImpl<uint64_t> * Offsets=nullptr,uint64_t StartingOffset=0)148 static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
149 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
150 SmallVectorImpl<uint64_t> *Offsets = nullptr,
151 uint64_t StartingOffset = 0) {
152 SmallVector<EVT, 16> TempVTs;
153 SmallVector<uint64_t, 16> TempOffsets;
154
155 // Special case for i128 - decompose to (i64, i64)
156 if (Ty->isIntegerTy(128)) {
157 ValueVTs.push_back(EVT(MVT::i64));
158 ValueVTs.push_back(EVT(MVT::i64));
159
160 if (Offsets) {
161 Offsets->push_back(StartingOffset + 0);
162 Offsets->push_back(StartingOffset + 8);
163 }
164
165 return;
166 }
167
168 // Given a struct type, recursively traverse the elements with custom ComputePTXValueVTs.
169 if (StructType *STy = dyn_cast<StructType>(Ty)) {
170 auto const *SL = DL.getStructLayout(STy);
171 auto ElementNum = 0;
172 for(auto *EI : STy->elements()) {
173 ComputePTXValueVTs(TLI, DL, EI, ValueVTs, Offsets,
174 StartingOffset + SL->getElementOffset(ElementNum));
175 ++ElementNum;
176 }
177 return;
178 }
179
180 ComputeValueVTs(TLI, DL, Ty, TempVTs, &TempOffsets, StartingOffset);
181 for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {
182 EVT VT = TempVTs[i];
183 uint64_t Off = TempOffsets[i];
184 // Split vectors into individual elements, except for v2f16, which
185 // we will pass as a single scalar.
186 if (VT.isVector()) {
187 unsigned NumElts = VT.getVectorNumElements();
188 EVT EltVT = VT.getVectorElementType();
189 // Vectors with an even number of f16 elements will be passed to
190 // us as an array of v2f16 elements. We must match this so we
191 // stay in sync with Ins/Outs.
192 if (EltVT == MVT::f16 && NumElts % 2 == 0) {
193 EltVT = MVT::v2f16;
194 NumElts /= 2;
195 }
196 for (unsigned j = 0; j != NumElts; ++j) {
197 ValueVTs.push_back(EltVT);
198 if (Offsets)
199 Offsets->push_back(Off + j * EltVT.getStoreSize());
200 }
201 } else {
202 ValueVTs.push_back(VT);
203 if (Offsets)
204 Offsets->push_back(Off);
205 }
206 }
207 }
208
209 // Check whether we can merge loads/stores of some of the pieces of a
210 // flattened function parameter or return value into a single vector
211 // load/store.
212 //
213 // The flattened parameter is represented as a list of EVTs and
214 // offsets, and the whole structure is aligned to ParamAlignment. This
215 // function determines whether we can load/store pieces of the
216 // parameter starting at index Idx using a single vectorized op of
217 // size AccessSize. If so, it returns the number of param pieces
218 // covered by the vector op. Otherwise, it returns 1.
CanMergeParamLoadStoresStartingAt(unsigned Idx,uint32_t AccessSize,const SmallVectorImpl<EVT> & ValueVTs,const SmallVectorImpl<uint64_t> & Offsets,Align ParamAlignment)219 static unsigned CanMergeParamLoadStoresStartingAt(
220 unsigned Idx, uint32_t AccessSize, const SmallVectorImpl<EVT> &ValueVTs,
221 const SmallVectorImpl<uint64_t> &Offsets, Align ParamAlignment) {
222
223 // Can't vectorize if param alignment is not sufficient.
224 if (ParamAlignment < AccessSize)
225 return 1;
226 // Can't vectorize if offset is not aligned.
227 if (Offsets[Idx] & (AccessSize - 1))
228 return 1;
229
230 EVT EltVT = ValueVTs[Idx];
231 unsigned EltSize = EltVT.getStoreSize();
232
233 // Element is too large to vectorize.
234 if (EltSize >= AccessSize)
235 return 1;
236
237 unsigned NumElts = AccessSize / EltSize;
238 // Can't vectorize if AccessBytes if not a multiple of EltSize.
239 if (AccessSize != EltSize * NumElts)
240 return 1;
241
242 // We don't have enough elements to vectorize.
243 if (Idx + NumElts > ValueVTs.size())
244 return 1;
245
246 // PTX ISA can only deal with 2- and 4-element vector ops.
247 if (NumElts != 4 && NumElts != 2)
248 return 1;
249
250 for (unsigned j = Idx + 1; j < Idx + NumElts; ++j) {
251 // Types do not match.
252 if (ValueVTs[j] != EltVT)
253 return 1;
254
255 // Elements are not contiguous.
256 if (Offsets[j] - Offsets[j - 1] != EltSize)
257 return 1;
258 }
259 // OK. We can vectorize ValueVTs[i..i+NumElts)
260 return NumElts;
261 }
262
263 // Flags for tracking per-element vectorization state of loads/stores
264 // of a flattened function parameter or return value.
265 enum ParamVectorizationFlags {
266 PVF_INNER = 0x0, // Middle elements of a vector.
267 PVF_FIRST = 0x1, // First element of the vector.
268 PVF_LAST = 0x2, // Last element of the vector.
269 // Scalar is effectively a 1-element vector.
270 PVF_SCALAR = PVF_FIRST | PVF_LAST
271 };
272
273 // Computes whether and how we can vectorize the loads/stores of a
274 // flattened function parameter or return value.
275 //
276 // The flattened parameter is represented as the list of ValueVTs and
277 // Offsets, and is aligned to ParamAlignment bytes. We return a vector
278 // of the same size as ValueVTs indicating how each piece should be
279 // loaded/stored (i.e. as a scalar, or as part of a vector
280 // load/store).
281 static SmallVector<ParamVectorizationFlags, 16>
VectorizePTXValueVTs(const SmallVectorImpl<EVT> & ValueVTs,const SmallVectorImpl<uint64_t> & Offsets,Align ParamAlignment)282 VectorizePTXValueVTs(const SmallVectorImpl<EVT> &ValueVTs,
283 const SmallVectorImpl<uint64_t> &Offsets,
284 Align ParamAlignment) {
285 // Set vector size to match ValueVTs and mark all elements as
286 // scalars by default.
287 SmallVector<ParamVectorizationFlags, 16> VectorInfo;
288 VectorInfo.assign(ValueVTs.size(), PVF_SCALAR);
289
290 // Check what we can vectorize using 128/64/32-bit accesses.
291 for (int I = 0, E = ValueVTs.size(); I != E; ++I) {
292 // Skip elements we've already processed.
293 assert(VectorInfo[I] == PVF_SCALAR && "Unexpected vector info state.");
294 for (unsigned AccessSize : {16, 8, 4, 2}) {
295 unsigned NumElts = CanMergeParamLoadStoresStartingAt(
296 I, AccessSize, ValueVTs, Offsets, ParamAlignment);
297 // Mark vectorized elements.
298 switch (NumElts) {
299 default:
300 llvm_unreachable("Unexpected return value");
301 case 1:
302 // Can't vectorize using this size, try next smaller size.
303 continue;
304 case 2:
305 assert(I + 1 < E && "Not enough elements.");
306 VectorInfo[I] = PVF_FIRST;
307 VectorInfo[I + 1] = PVF_LAST;
308 I += 1;
309 break;
310 case 4:
311 assert(I + 3 < E && "Not enough elements.");
312 VectorInfo[I] = PVF_FIRST;
313 VectorInfo[I + 1] = PVF_INNER;
314 VectorInfo[I + 2] = PVF_INNER;
315 VectorInfo[I + 3] = PVF_LAST;
316 I += 3;
317 break;
318 }
319 // Break out of the inner loop because we've already succeeded
320 // using largest possible AccessSize.
321 break;
322 }
323 }
324 return VectorInfo;
325 }
326
327 // NVPTXTargetLowering Constructor.
NVPTXTargetLowering(const NVPTXTargetMachine & TM,const NVPTXSubtarget & STI)328 NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
329 const NVPTXSubtarget &STI)
330 : TargetLowering(TM), nvTM(&TM), STI(STI) {
331 // always lower memset, memcpy, and memmove intrinsics to load/store
332 // instructions, rather
333 // then generating calls to memset, mempcy or memmove.
334 MaxStoresPerMemset = (unsigned) 0xFFFFFFFF;
335 MaxStoresPerMemcpy = (unsigned) 0xFFFFFFFF;
336 MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF;
337
338 setBooleanContents(ZeroOrNegativeOneBooleanContent);
339 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
340
341 // Jump is Expensive. Don't create extra control flow for 'and', 'or'
342 // condition branches.
343 setJumpIsExpensive(true);
344
345 // Wide divides are _very_ slow. Try to reduce the width of the divide if
346 // possible.
347 addBypassSlowDiv(64, 32);
348
349 // By default, use the Source scheduling
350 if (sched4reg)
351 setSchedulingPreference(Sched::RegPressure);
352 else
353 setSchedulingPreference(Sched::Source);
354
355 auto setFP16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
356 LegalizeAction NoF16Action) {
357 setOperationAction(Op, VT, STI.allowFP16Math() ? Action : NoF16Action);
358 };
359
360 addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
361 addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
362 addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
363 addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
364 addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
365 addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
366 addRegisterClass(MVT::f16, &NVPTX::Float16RegsRegClass);
367 addRegisterClass(MVT::v2f16, &NVPTX::Float16x2RegsRegClass);
368
369 // Conversion to/from FP16/FP16x2 is always legal.
370 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Legal);
371 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Legal);
372 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
373 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
374 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Expand);
375 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f16, Expand);
376
377 setFP16OperationAction(ISD::SETCC, MVT::f16, Legal, Promote);
378 setFP16OperationAction(ISD::SETCC, MVT::v2f16, Legal, Expand);
379
380 // Operations not directly supported by NVPTX.
381 for (MVT VT : {MVT::f16, MVT::v2f16, MVT::f32, MVT::f64, MVT::i1, MVT::i8,
382 MVT::i16, MVT::i32, MVT::i64}) {
383 setOperationAction(ISD::SELECT_CC, VT, Expand);
384 setOperationAction(ISD::BR_CC, VT, Expand);
385 }
386
387 // Some SIGN_EXTEND_INREG can be done using cvt instruction.
388 // For others we will expand to a SHL/SRA pair.
389 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);
390 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
391 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
392 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
393 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
394
395 setOperationAction(ISD::SHL_PARTS, MVT::i32 , Custom);
396 setOperationAction(ISD::SRA_PARTS, MVT::i32 , Custom);
397 setOperationAction(ISD::SRL_PARTS, MVT::i32 , Custom);
398 setOperationAction(ISD::SHL_PARTS, MVT::i64 , Custom);
399 setOperationAction(ISD::SRA_PARTS, MVT::i64 , Custom);
400 setOperationAction(ISD::SRL_PARTS, MVT::i64 , Custom);
401
402 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
403 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
404
405 // TODO: we may consider expanding ROTL/ROTR on older GPUs. Currently on GPUs
406 // that don't have h/w rotation we lower them to multi-instruction assembly.
407 // See ROT*_sw in NVPTXIntrInfo.td
408 setOperationAction(ISD::ROTL, MVT::i64, Legal);
409 setOperationAction(ISD::ROTR, MVT::i64, Legal);
410 setOperationAction(ISD::ROTL, MVT::i32, Legal);
411 setOperationAction(ISD::ROTR, MVT::i32, Legal);
412
413 setOperationAction(ISD::ROTL, MVT::i16, Expand);
414 setOperationAction(ISD::ROTR, MVT::i16, Expand);
415 setOperationAction(ISD::ROTL, MVT::i8, Expand);
416 setOperationAction(ISD::ROTR, MVT::i8, Expand);
417 setOperationAction(ISD::BSWAP, MVT::i16, Expand);
418 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
419 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
420
421 // Indirect branch is not supported.
422 // This also disables Jump Table creation.
423 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
424 setOperationAction(ISD::BRIND, MVT::Other, Expand);
425
426 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
427 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
428
429 // We want to legalize constant related memmove and memcopy
430 // intrinsics.
431 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
432
433 // Turn FP extload into load/fpextend
434 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
435 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
436 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
437 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
438 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
439 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
440 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
441 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
442 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
443 // Turn FP truncstore into trunc + store.
444 // FIXME: vector types should also be expanded
445 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
446 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
447 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
448
449 // PTX does not support load / store predicate registers
450 setOperationAction(ISD::LOAD, MVT::i1, Custom);
451 setOperationAction(ISD::STORE, MVT::i1, Custom);
452
453 for (MVT VT : MVT::integer_valuetypes()) {
454 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
455 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
456 setTruncStoreAction(VT, MVT::i1, Expand);
457 }
458
459 // This is legal in NVPTX
460 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
461 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
462 setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
463
464 // TRAP can be lowered to PTX trap
465 setOperationAction(ISD::TRAP, MVT::Other, Legal);
466
467 // Register custom handling for vector loads/stores
468 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
469 if (IsPTXVectorType(VT)) {
470 setOperationAction(ISD::LOAD, VT, Custom);
471 setOperationAction(ISD::STORE, VT, Custom);
472 setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);
473 }
474 }
475
476 // Custom handling for i8 intrinsics
477 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
478
479 for (const auto& Ty : {MVT::i16, MVT::i32, MVT::i64}) {
480 setOperationAction(ISD::ABS, Ty, Legal);
481 setOperationAction(ISD::SMIN, Ty, Legal);
482 setOperationAction(ISD::SMAX, Ty, Legal);
483 setOperationAction(ISD::UMIN, Ty, Legal);
484 setOperationAction(ISD::UMAX, Ty, Legal);
485
486 setOperationAction(ISD::CTPOP, Ty, Legal);
487 setOperationAction(ISD::CTLZ, Ty, Legal);
488 }
489
490 setOperationAction(ISD::CTTZ, MVT::i16, Expand);
491 setOperationAction(ISD::CTTZ, MVT::i32, Expand);
492 setOperationAction(ISD::CTTZ, MVT::i64, Expand);
493
494 // PTX does not directly support SELP of i1, so promote to i32 first
495 setOperationAction(ISD::SELECT, MVT::i1, Custom);
496
497 // PTX cannot multiply two i64s in a single instruction.
498 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
499 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
500
501 // We have some custom DAG combine patterns for these nodes
502 setTargetDAGCombine(ISD::ADD);
503 setTargetDAGCombine(ISD::AND);
504 setTargetDAGCombine(ISD::FADD);
505 setTargetDAGCombine(ISD::MUL);
506 setTargetDAGCombine(ISD::SHL);
507 setTargetDAGCombine(ISD::SREM);
508 setTargetDAGCombine(ISD::UREM);
509
510 // setcc for f16x2 needs special handling to prevent legalizer's
511 // attempt to scalarize it due to v2i1 not being legal.
512 if (STI.allowFP16Math())
513 setTargetDAGCombine(ISD::SETCC);
514
515 // Promote fp16 arithmetic if fp16 hardware isn't available or the
516 // user passed --nvptx-no-fp16-math. The flag is useful because,
517 // although sm_53+ GPUs have some sort of FP16 support in
518 // hardware, only sm_53 and sm_60 have full implementation. Others
519 // only have token amount of hardware and are likely to run faster
520 // by using fp32 units instead.
521 for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB, ISD::FMA}) {
522 setFP16OperationAction(Op, MVT::f16, Legal, Promote);
523 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
524 }
525
526 // There's no neg.f16 instruction. Expand to (0-x).
527 setOperationAction(ISD::FNEG, MVT::f16, Expand);
528 setOperationAction(ISD::FNEG, MVT::v2f16, Expand);
529
530 // (would be) Library functions.
531
532 // These map to conversion instructions for scalar FP types.
533 for (const auto &Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT,
534 ISD::FTRUNC}) {
535 setOperationAction(Op, MVT::f16, Legal);
536 setOperationAction(Op, MVT::f32, Legal);
537 setOperationAction(Op, MVT::f64, Legal);
538 setOperationAction(Op, MVT::v2f16, Expand);
539 }
540
541 setOperationAction(ISD::FROUND, MVT::f16, Promote);
542 setOperationAction(ISD::FROUND, MVT::v2f16, Expand);
543 setOperationAction(ISD::FROUND, MVT::f32, Custom);
544 setOperationAction(ISD::FROUND, MVT::f64, Custom);
545
546
547 // 'Expand' implements FCOPYSIGN without calling an external library.
548 setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
549 setOperationAction(ISD::FCOPYSIGN, MVT::v2f16, Expand);
550 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
551 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
552
553 // These map to corresponding instructions for f32/f64. f16 must be
554 // promoted to f32. v2f16 is expanded to f16, which is then promoted
555 // to f32.
556 for (const auto &Op : {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS,
557 ISD::FABS, ISD::FMINNUM, ISD::FMAXNUM}) {
558 setOperationAction(Op, MVT::f16, Promote);
559 setOperationAction(Op, MVT::f32, Legal);
560 setOperationAction(Op, MVT::f64, Legal);
561 setOperationAction(Op, MVT::v2f16, Expand);
562 }
563 setOperationAction(ISD::FMINNUM, MVT::f16, Promote);
564 setOperationAction(ISD::FMAXNUM, MVT::f16, Promote);
565 setOperationAction(ISD::FMINIMUM, MVT::f16, Promote);
566 setOperationAction(ISD::FMAXIMUM, MVT::f16, Promote);
567
568 // No FEXP2, FLOG2. The PTX ex2 and log2 functions are always approximate.
569 // No FPOW or FREM in PTX.
570
571 // Now deduce the information based on the above mentioned
572 // actions
573 computeRegisterProperties(STI.getRegisterInfo());
574 }
575
getTargetNodeName(unsigned Opcode) const576 const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
577 switch ((NVPTXISD::NodeType)Opcode) {
578 case NVPTXISD::FIRST_NUMBER:
579 break;
580 case NVPTXISD::CALL:
581 return "NVPTXISD::CALL";
582 case NVPTXISD::RET_FLAG:
583 return "NVPTXISD::RET_FLAG";
584 case NVPTXISD::LOAD_PARAM:
585 return "NVPTXISD::LOAD_PARAM";
586 case NVPTXISD::Wrapper:
587 return "NVPTXISD::Wrapper";
588 case NVPTXISD::DeclareParam:
589 return "NVPTXISD::DeclareParam";
590 case NVPTXISD::DeclareScalarParam:
591 return "NVPTXISD::DeclareScalarParam";
592 case NVPTXISD::DeclareRet:
593 return "NVPTXISD::DeclareRet";
594 case NVPTXISD::DeclareScalarRet:
595 return "NVPTXISD::DeclareScalarRet";
596 case NVPTXISD::DeclareRetParam:
597 return "NVPTXISD::DeclareRetParam";
598 case NVPTXISD::PrintCall:
599 return "NVPTXISD::PrintCall";
600 case NVPTXISD::PrintConvergentCall:
601 return "NVPTXISD::PrintConvergentCall";
602 case NVPTXISD::PrintCallUni:
603 return "NVPTXISD::PrintCallUni";
604 case NVPTXISD::PrintConvergentCallUni:
605 return "NVPTXISD::PrintConvergentCallUni";
606 case NVPTXISD::LoadParam:
607 return "NVPTXISD::LoadParam";
608 case NVPTXISD::LoadParamV2:
609 return "NVPTXISD::LoadParamV2";
610 case NVPTXISD::LoadParamV4:
611 return "NVPTXISD::LoadParamV4";
612 case NVPTXISD::StoreParam:
613 return "NVPTXISD::StoreParam";
614 case NVPTXISD::StoreParamV2:
615 return "NVPTXISD::StoreParamV2";
616 case NVPTXISD::StoreParamV4:
617 return "NVPTXISD::StoreParamV4";
618 case NVPTXISD::StoreParamS32:
619 return "NVPTXISD::StoreParamS32";
620 case NVPTXISD::StoreParamU32:
621 return "NVPTXISD::StoreParamU32";
622 case NVPTXISD::CallArgBegin:
623 return "NVPTXISD::CallArgBegin";
624 case NVPTXISD::CallArg:
625 return "NVPTXISD::CallArg";
626 case NVPTXISD::LastCallArg:
627 return "NVPTXISD::LastCallArg";
628 case NVPTXISD::CallArgEnd:
629 return "NVPTXISD::CallArgEnd";
630 case NVPTXISD::CallVoid:
631 return "NVPTXISD::CallVoid";
632 case NVPTXISD::CallVal:
633 return "NVPTXISD::CallVal";
634 case NVPTXISD::CallSymbol:
635 return "NVPTXISD::CallSymbol";
636 case NVPTXISD::Prototype:
637 return "NVPTXISD::Prototype";
638 case NVPTXISD::MoveParam:
639 return "NVPTXISD::MoveParam";
640 case NVPTXISD::StoreRetval:
641 return "NVPTXISD::StoreRetval";
642 case NVPTXISD::StoreRetvalV2:
643 return "NVPTXISD::StoreRetvalV2";
644 case NVPTXISD::StoreRetvalV4:
645 return "NVPTXISD::StoreRetvalV4";
646 case NVPTXISD::PseudoUseParam:
647 return "NVPTXISD::PseudoUseParam";
648 case NVPTXISD::RETURN:
649 return "NVPTXISD::RETURN";
650 case NVPTXISD::CallSeqBegin:
651 return "NVPTXISD::CallSeqBegin";
652 case NVPTXISD::CallSeqEnd:
653 return "NVPTXISD::CallSeqEnd";
654 case NVPTXISD::CallPrototype:
655 return "NVPTXISD::CallPrototype";
656 case NVPTXISD::ProxyReg:
657 return "NVPTXISD::ProxyReg";
658 case NVPTXISD::LoadV2:
659 return "NVPTXISD::LoadV2";
660 case NVPTXISD::LoadV4:
661 return "NVPTXISD::LoadV4";
662 case NVPTXISD::LDGV2:
663 return "NVPTXISD::LDGV2";
664 case NVPTXISD::LDGV4:
665 return "NVPTXISD::LDGV4";
666 case NVPTXISD::LDUV2:
667 return "NVPTXISD::LDUV2";
668 case NVPTXISD::LDUV4:
669 return "NVPTXISD::LDUV4";
670 case NVPTXISD::StoreV2:
671 return "NVPTXISD::StoreV2";
672 case NVPTXISD::StoreV4:
673 return "NVPTXISD::StoreV4";
674 case NVPTXISD::FUN_SHFL_CLAMP:
675 return "NVPTXISD::FUN_SHFL_CLAMP";
676 case NVPTXISD::FUN_SHFR_CLAMP:
677 return "NVPTXISD::FUN_SHFR_CLAMP";
678 case NVPTXISD::IMAD:
679 return "NVPTXISD::IMAD";
680 case NVPTXISD::SETP_F16X2:
681 return "NVPTXISD::SETP_F16X2";
682 case NVPTXISD::Dummy:
683 return "NVPTXISD::Dummy";
684 case NVPTXISD::MUL_WIDE_SIGNED:
685 return "NVPTXISD::MUL_WIDE_SIGNED";
686 case NVPTXISD::MUL_WIDE_UNSIGNED:
687 return "NVPTXISD::MUL_WIDE_UNSIGNED";
688 case NVPTXISD::Tex1DFloatS32: return "NVPTXISD::Tex1DFloatS32";
689 case NVPTXISD::Tex1DFloatFloat: return "NVPTXISD::Tex1DFloatFloat";
690 case NVPTXISD::Tex1DFloatFloatLevel:
691 return "NVPTXISD::Tex1DFloatFloatLevel";
692 case NVPTXISD::Tex1DFloatFloatGrad:
693 return "NVPTXISD::Tex1DFloatFloatGrad";
694 case NVPTXISD::Tex1DS32S32: return "NVPTXISD::Tex1DS32S32";
695 case NVPTXISD::Tex1DS32Float: return "NVPTXISD::Tex1DS32Float";
696 case NVPTXISD::Tex1DS32FloatLevel:
697 return "NVPTXISD::Tex1DS32FloatLevel";
698 case NVPTXISD::Tex1DS32FloatGrad:
699 return "NVPTXISD::Tex1DS32FloatGrad";
700 case NVPTXISD::Tex1DU32S32: return "NVPTXISD::Tex1DU32S32";
701 case NVPTXISD::Tex1DU32Float: return "NVPTXISD::Tex1DU32Float";
702 case NVPTXISD::Tex1DU32FloatLevel:
703 return "NVPTXISD::Tex1DU32FloatLevel";
704 case NVPTXISD::Tex1DU32FloatGrad:
705 return "NVPTXISD::Tex1DU32FloatGrad";
706 case NVPTXISD::Tex1DArrayFloatS32: return "NVPTXISD::Tex1DArrayFloatS32";
707 case NVPTXISD::Tex1DArrayFloatFloat: return "NVPTXISD::Tex1DArrayFloatFloat";
708 case NVPTXISD::Tex1DArrayFloatFloatLevel:
709 return "NVPTXISD::Tex1DArrayFloatFloatLevel";
710 case NVPTXISD::Tex1DArrayFloatFloatGrad:
711 return "NVPTXISD::Tex1DArrayFloatFloatGrad";
712 case NVPTXISD::Tex1DArrayS32S32: return "NVPTXISD::Tex1DArrayS32S32";
713 case NVPTXISD::Tex1DArrayS32Float: return "NVPTXISD::Tex1DArrayS32Float";
714 case NVPTXISD::Tex1DArrayS32FloatLevel:
715 return "NVPTXISD::Tex1DArrayS32FloatLevel";
716 case NVPTXISD::Tex1DArrayS32FloatGrad:
717 return "NVPTXISD::Tex1DArrayS32FloatGrad";
718 case NVPTXISD::Tex1DArrayU32S32: return "NVPTXISD::Tex1DArrayU32S32";
719 case NVPTXISD::Tex1DArrayU32Float: return "NVPTXISD::Tex1DArrayU32Float";
720 case NVPTXISD::Tex1DArrayU32FloatLevel:
721 return "NVPTXISD::Tex1DArrayU32FloatLevel";
722 case NVPTXISD::Tex1DArrayU32FloatGrad:
723 return "NVPTXISD::Tex1DArrayU32FloatGrad";
724 case NVPTXISD::Tex2DFloatS32: return "NVPTXISD::Tex2DFloatS32";
725 case NVPTXISD::Tex2DFloatFloat: return "NVPTXISD::Tex2DFloatFloat";
726 case NVPTXISD::Tex2DFloatFloatLevel:
727 return "NVPTXISD::Tex2DFloatFloatLevel";
728 case NVPTXISD::Tex2DFloatFloatGrad:
729 return "NVPTXISD::Tex2DFloatFloatGrad";
730 case NVPTXISD::Tex2DS32S32: return "NVPTXISD::Tex2DS32S32";
731 case NVPTXISD::Tex2DS32Float: return "NVPTXISD::Tex2DS32Float";
732 case NVPTXISD::Tex2DS32FloatLevel:
733 return "NVPTXISD::Tex2DS32FloatLevel";
734 case NVPTXISD::Tex2DS32FloatGrad:
735 return "NVPTXISD::Tex2DS32FloatGrad";
736 case NVPTXISD::Tex2DU32S32: return "NVPTXISD::Tex2DU32S32";
737 case NVPTXISD::Tex2DU32Float: return "NVPTXISD::Tex2DU32Float";
738 case NVPTXISD::Tex2DU32FloatLevel:
739 return "NVPTXISD::Tex2DU32FloatLevel";
740 case NVPTXISD::Tex2DU32FloatGrad:
741 return "NVPTXISD::Tex2DU32FloatGrad";
742 case NVPTXISD::Tex2DArrayFloatS32: return "NVPTXISD::Tex2DArrayFloatS32";
743 case NVPTXISD::Tex2DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
744 case NVPTXISD::Tex2DArrayFloatFloatLevel:
745 return "NVPTXISD::Tex2DArrayFloatFloatLevel";
746 case NVPTXISD::Tex2DArrayFloatFloatGrad:
747 return "NVPTXISD::Tex2DArrayFloatFloatGrad";
748 case NVPTXISD::Tex2DArrayS32S32: return "NVPTXISD::Tex2DArrayS32S32";
749 case NVPTXISD::Tex2DArrayS32Float: return "NVPTXISD::Tex2DArrayS32Float";
750 case NVPTXISD::Tex2DArrayS32FloatLevel:
751 return "NVPTXISD::Tex2DArrayS32FloatLevel";
752 case NVPTXISD::Tex2DArrayS32FloatGrad:
753 return "NVPTXISD::Tex2DArrayS32FloatGrad";
754 case NVPTXISD::Tex2DArrayU32S32: return "NVPTXISD::Tex2DArrayU32S32";
755 case NVPTXISD::Tex2DArrayU32Float: return "NVPTXISD::Tex2DArrayU32Float";
756 case NVPTXISD::Tex2DArrayU32FloatLevel:
757 return "NVPTXISD::Tex2DArrayU32FloatLevel";
758 case NVPTXISD::Tex2DArrayU32FloatGrad:
759 return "NVPTXISD::Tex2DArrayU32FloatGrad";
760 case NVPTXISD::Tex3DFloatS32: return "NVPTXISD::Tex3DFloatS32";
761 case NVPTXISD::Tex3DFloatFloat: return "NVPTXISD::Tex3DFloatFloat";
762 case NVPTXISD::Tex3DFloatFloatLevel:
763 return "NVPTXISD::Tex3DFloatFloatLevel";
764 case NVPTXISD::Tex3DFloatFloatGrad:
765 return "NVPTXISD::Tex3DFloatFloatGrad";
766 case NVPTXISD::Tex3DS32S32: return "NVPTXISD::Tex3DS32S32";
767 case NVPTXISD::Tex3DS32Float: return "NVPTXISD::Tex3DS32Float";
768 case NVPTXISD::Tex3DS32FloatLevel:
769 return "NVPTXISD::Tex3DS32FloatLevel";
770 case NVPTXISD::Tex3DS32FloatGrad:
771 return "NVPTXISD::Tex3DS32FloatGrad";
772 case NVPTXISD::Tex3DU32S32: return "NVPTXISD::Tex3DU32S32";
773 case NVPTXISD::Tex3DU32Float: return "NVPTXISD::Tex3DU32Float";
774 case NVPTXISD::Tex3DU32FloatLevel:
775 return "NVPTXISD::Tex3DU32FloatLevel";
776 case NVPTXISD::Tex3DU32FloatGrad:
777 return "NVPTXISD::Tex3DU32FloatGrad";
778 case NVPTXISD::TexCubeFloatFloat: return "NVPTXISD::TexCubeFloatFloat";
779 case NVPTXISD::TexCubeFloatFloatLevel:
780 return "NVPTXISD::TexCubeFloatFloatLevel";
781 case NVPTXISD::TexCubeS32Float: return "NVPTXISD::TexCubeS32Float";
782 case NVPTXISD::TexCubeS32FloatLevel:
783 return "NVPTXISD::TexCubeS32FloatLevel";
784 case NVPTXISD::TexCubeU32Float: return "NVPTXISD::TexCubeU32Float";
785 case NVPTXISD::TexCubeU32FloatLevel:
786 return "NVPTXISD::TexCubeU32FloatLevel";
787 case NVPTXISD::TexCubeArrayFloatFloat:
788 return "NVPTXISD::TexCubeArrayFloatFloat";
789 case NVPTXISD::TexCubeArrayFloatFloatLevel:
790 return "NVPTXISD::TexCubeArrayFloatFloatLevel";
791 case NVPTXISD::TexCubeArrayS32Float:
792 return "NVPTXISD::TexCubeArrayS32Float";
793 case NVPTXISD::TexCubeArrayS32FloatLevel:
794 return "NVPTXISD::TexCubeArrayS32FloatLevel";
795 case NVPTXISD::TexCubeArrayU32Float:
796 return "NVPTXISD::TexCubeArrayU32Float";
797 case NVPTXISD::TexCubeArrayU32FloatLevel:
798 return "NVPTXISD::TexCubeArrayU32FloatLevel";
799 case NVPTXISD::Tld4R2DFloatFloat:
800 return "NVPTXISD::Tld4R2DFloatFloat";
801 case NVPTXISD::Tld4G2DFloatFloat:
802 return "NVPTXISD::Tld4G2DFloatFloat";
803 case NVPTXISD::Tld4B2DFloatFloat:
804 return "NVPTXISD::Tld4B2DFloatFloat";
805 case NVPTXISD::Tld4A2DFloatFloat:
806 return "NVPTXISD::Tld4A2DFloatFloat";
807 case NVPTXISD::Tld4R2DS64Float:
808 return "NVPTXISD::Tld4R2DS64Float";
809 case NVPTXISD::Tld4G2DS64Float:
810 return "NVPTXISD::Tld4G2DS64Float";
811 case NVPTXISD::Tld4B2DS64Float:
812 return "NVPTXISD::Tld4B2DS64Float";
813 case NVPTXISD::Tld4A2DS64Float:
814 return "NVPTXISD::Tld4A2DS64Float";
815 case NVPTXISD::Tld4R2DU64Float:
816 return "NVPTXISD::Tld4R2DU64Float";
817 case NVPTXISD::Tld4G2DU64Float:
818 return "NVPTXISD::Tld4G2DU64Float";
819 case NVPTXISD::Tld4B2DU64Float:
820 return "NVPTXISD::Tld4B2DU64Float";
821 case NVPTXISD::Tld4A2DU64Float:
822 return "NVPTXISD::Tld4A2DU64Float";
823
824 case NVPTXISD::TexUnified1DFloatS32:
825 return "NVPTXISD::TexUnified1DFloatS32";
826 case NVPTXISD::TexUnified1DFloatFloat:
827 return "NVPTXISD::TexUnified1DFloatFloat";
828 case NVPTXISD::TexUnified1DFloatFloatLevel:
829 return "NVPTXISD::TexUnified1DFloatFloatLevel";
830 case NVPTXISD::TexUnified1DFloatFloatGrad:
831 return "NVPTXISD::TexUnified1DFloatFloatGrad";
832 case NVPTXISD::TexUnified1DS32S32:
833 return "NVPTXISD::TexUnified1DS32S32";
834 case NVPTXISD::TexUnified1DS32Float:
835 return "NVPTXISD::TexUnified1DS32Float";
836 case NVPTXISD::TexUnified1DS32FloatLevel:
837 return "NVPTXISD::TexUnified1DS32FloatLevel";
838 case NVPTXISD::TexUnified1DS32FloatGrad:
839 return "NVPTXISD::TexUnified1DS32FloatGrad";
840 case NVPTXISD::TexUnified1DU32S32:
841 return "NVPTXISD::TexUnified1DU32S32";
842 case NVPTXISD::TexUnified1DU32Float:
843 return "NVPTXISD::TexUnified1DU32Float";
844 case NVPTXISD::TexUnified1DU32FloatLevel:
845 return "NVPTXISD::TexUnified1DU32FloatLevel";
846 case NVPTXISD::TexUnified1DU32FloatGrad:
847 return "NVPTXISD::TexUnified1DU32FloatGrad";
848 case NVPTXISD::TexUnified1DArrayFloatS32:
849 return "NVPTXISD::TexUnified1DArrayFloatS32";
850 case NVPTXISD::TexUnified1DArrayFloatFloat:
851 return "NVPTXISD::TexUnified1DArrayFloatFloat";
852 case NVPTXISD::TexUnified1DArrayFloatFloatLevel:
853 return "NVPTXISD::TexUnified1DArrayFloatFloatLevel";
854 case NVPTXISD::TexUnified1DArrayFloatFloatGrad:
855 return "NVPTXISD::TexUnified1DArrayFloatFloatGrad";
856 case NVPTXISD::TexUnified1DArrayS32S32:
857 return "NVPTXISD::TexUnified1DArrayS32S32";
858 case NVPTXISD::TexUnified1DArrayS32Float:
859 return "NVPTXISD::TexUnified1DArrayS32Float";
860 case NVPTXISD::TexUnified1DArrayS32FloatLevel:
861 return "NVPTXISD::TexUnified1DArrayS32FloatLevel";
862 case NVPTXISD::TexUnified1DArrayS32FloatGrad:
863 return "NVPTXISD::TexUnified1DArrayS32FloatGrad";
864 case NVPTXISD::TexUnified1DArrayU32S32:
865 return "NVPTXISD::TexUnified1DArrayU32S32";
866 case NVPTXISD::TexUnified1DArrayU32Float:
867 return "NVPTXISD::TexUnified1DArrayU32Float";
868 case NVPTXISD::TexUnified1DArrayU32FloatLevel:
869 return "NVPTXISD::TexUnified1DArrayU32FloatLevel";
870 case NVPTXISD::TexUnified1DArrayU32FloatGrad:
871 return "NVPTXISD::TexUnified1DArrayU32FloatGrad";
872 case NVPTXISD::TexUnified2DFloatS32:
873 return "NVPTXISD::TexUnified2DFloatS32";
874 case NVPTXISD::TexUnified2DFloatFloat:
875 return "NVPTXISD::TexUnified2DFloatFloat";
876 case NVPTXISD::TexUnified2DFloatFloatLevel:
877 return "NVPTXISD::TexUnified2DFloatFloatLevel";
878 case NVPTXISD::TexUnified2DFloatFloatGrad:
879 return "NVPTXISD::TexUnified2DFloatFloatGrad";
880 case NVPTXISD::TexUnified2DS32S32:
881 return "NVPTXISD::TexUnified2DS32S32";
882 case NVPTXISD::TexUnified2DS32Float:
883 return "NVPTXISD::TexUnified2DS32Float";
884 case NVPTXISD::TexUnified2DS32FloatLevel:
885 return "NVPTXISD::TexUnified2DS32FloatLevel";
886 case NVPTXISD::TexUnified2DS32FloatGrad:
887 return "NVPTXISD::TexUnified2DS32FloatGrad";
888 case NVPTXISD::TexUnified2DU32S32:
889 return "NVPTXISD::TexUnified2DU32S32";
890 case NVPTXISD::TexUnified2DU32Float:
891 return "NVPTXISD::TexUnified2DU32Float";
892 case NVPTXISD::TexUnified2DU32FloatLevel:
893 return "NVPTXISD::TexUnified2DU32FloatLevel";
894 case NVPTXISD::TexUnified2DU32FloatGrad:
895 return "NVPTXISD::TexUnified2DU32FloatGrad";
896 case NVPTXISD::TexUnified2DArrayFloatS32:
897 return "NVPTXISD::TexUnified2DArrayFloatS32";
898 case NVPTXISD::TexUnified2DArrayFloatFloat:
899 return "NVPTXISD::TexUnified2DArrayFloatFloat";
900 case NVPTXISD::TexUnified2DArrayFloatFloatLevel:
901 return "NVPTXISD::TexUnified2DArrayFloatFloatLevel";
902 case NVPTXISD::TexUnified2DArrayFloatFloatGrad:
903 return "NVPTXISD::TexUnified2DArrayFloatFloatGrad";
904 case NVPTXISD::TexUnified2DArrayS32S32:
905 return "NVPTXISD::TexUnified2DArrayS32S32";
906 case NVPTXISD::TexUnified2DArrayS32Float:
907 return "NVPTXISD::TexUnified2DArrayS32Float";
908 case NVPTXISD::TexUnified2DArrayS32FloatLevel:
909 return "NVPTXISD::TexUnified2DArrayS32FloatLevel";
910 case NVPTXISD::TexUnified2DArrayS32FloatGrad:
911 return "NVPTXISD::TexUnified2DArrayS32FloatGrad";
912 case NVPTXISD::TexUnified2DArrayU32S32:
913 return "NVPTXISD::TexUnified2DArrayU32S32";
914 case NVPTXISD::TexUnified2DArrayU32Float:
915 return "NVPTXISD::TexUnified2DArrayU32Float";
916 case NVPTXISD::TexUnified2DArrayU32FloatLevel:
917 return "NVPTXISD::TexUnified2DArrayU32FloatLevel";
918 case NVPTXISD::TexUnified2DArrayU32FloatGrad:
919 return "NVPTXISD::TexUnified2DArrayU32FloatGrad";
920 case NVPTXISD::TexUnified3DFloatS32:
921 return "NVPTXISD::TexUnified3DFloatS32";
922 case NVPTXISD::TexUnified3DFloatFloat:
923 return "NVPTXISD::TexUnified3DFloatFloat";
924 case NVPTXISD::TexUnified3DFloatFloatLevel:
925 return "NVPTXISD::TexUnified3DFloatFloatLevel";
926 case NVPTXISD::TexUnified3DFloatFloatGrad:
927 return "NVPTXISD::TexUnified3DFloatFloatGrad";
928 case NVPTXISD::TexUnified3DS32S32:
929 return "NVPTXISD::TexUnified3DS32S32";
930 case NVPTXISD::TexUnified3DS32Float:
931 return "NVPTXISD::TexUnified3DS32Float";
932 case NVPTXISD::TexUnified3DS32FloatLevel:
933 return "NVPTXISD::TexUnified3DS32FloatLevel";
934 case NVPTXISD::TexUnified3DS32FloatGrad:
935 return "NVPTXISD::TexUnified3DS32FloatGrad";
936 case NVPTXISD::TexUnified3DU32S32:
937 return "NVPTXISD::TexUnified3DU32S32";
938 case NVPTXISD::TexUnified3DU32Float:
939 return "NVPTXISD::TexUnified3DU32Float";
940 case NVPTXISD::TexUnified3DU32FloatLevel:
941 return "NVPTXISD::TexUnified3DU32FloatLevel";
942 case NVPTXISD::TexUnified3DU32FloatGrad:
943 return "NVPTXISD::TexUnified3DU32FloatGrad";
944 case NVPTXISD::TexUnifiedCubeFloatFloat:
945 return "NVPTXISD::TexUnifiedCubeFloatFloat";
946 case NVPTXISD::TexUnifiedCubeFloatFloatLevel:
947 return "NVPTXISD::TexUnifiedCubeFloatFloatLevel";
948 case NVPTXISD::TexUnifiedCubeS32Float:
949 return "NVPTXISD::TexUnifiedCubeS32Float";
950 case NVPTXISD::TexUnifiedCubeS32FloatLevel:
951 return "NVPTXISD::TexUnifiedCubeS32FloatLevel";
952 case NVPTXISD::TexUnifiedCubeU32Float:
953 return "NVPTXISD::TexUnifiedCubeU32Float";
954 case NVPTXISD::TexUnifiedCubeU32FloatLevel:
955 return "NVPTXISD::TexUnifiedCubeU32FloatLevel";
956 case NVPTXISD::TexUnifiedCubeArrayFloatFloat:
957 return "NVPTXISD::TexUnifiedCubeArrayFloatFloat";
958 case NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel:
959 return "NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel";
960 case NVPTXISD::TexUnifiedCubeArrayS32Float:
961 return "NVPTXISD::TexUnifiedCubeArrayS32Float";
962 case NVPTXISD::TexUnifiedCubeArrayS32FloatLevel:
963 return "NVPTXISD::TexUnifiedCubeArrayS32FloatLevel";
964 case NVPTXISD::TexUnifiedCubeArrayU32Float:
965 return "NVPTXISD::TexUnifiedCubeArrayU32Float";
966 case NVPTXISD::TexUnifiedCubeArrayU32FloatLevel:
967 return "NVPTXISD::TexUnifiedCubeArrayU32FloatLevel";
968 case NVPTXISD::Tld4UnifiedR2DFloatFloat:
969 return "NVPTXISD::Tld4UnifiedR2DFloatFloat";
970 case NVPTXISD::Tld4UnifiedG2DFloatFloat:
971 return "NVPTXISD::Tld4UnifiedG2DFloatFloat";
972 case NVPTXISD::Tld4UnifiedB2DFloatFloat:
973 return "NVPTXISD::Tld4UnifiedB2DFloatFloat";
974 case NVPTXISD::Tld4UnifiedA2DFloatFloat:
975 return "NVPTXISD::Tld4UnifiedA2DFloatFloat";
976 case NVPTXISD::Tld4UnifiedR2DS64Float:
977 return "NVPTXISD::Tld4UnifiedR2DS64Float";
978 case NVPTXISD::Tld4UnifiedG2DS64Float:
979 return "NVPTXISD::Tld4UnifiedG2DS64Float";
980 case NVPTXISD::Tld4UnifiedB2DS64Float:
981 return "NVPTXISD::Tld4UnifiedB2DS64Float";
982 case NVPTXISD::Tld4UnifiedA2DS64Float:
983 return "NVPTXISD::Tld4UnifiedA2DS64Float";
984 case NVPTXISD::Tld4UnifiedR2DU64Float:
985 return "NVPTXISD::Tld4UnifiedR2DU64Float";
986 case NVPTXISD::Tld4UnifiedG2DU64Float:
987 return "NVPTXISD::Tld4UnifiedG2DU64Float";
988 case NVPTXISD::Tld4UnifiedB2DU64Float:
989 return "NVPTXISD::Tld4UnifiedB2DU64Float";
990 case NVPTXISD::Tld4UnifiedA2DU64Float:
991 return "NVPTXISD::Tld4UnifiedA2DU64Float";
992
993 case NVPTXISD::Suld1DI8Clamp: return "NVPTXISD::Suld1DI8Clamp";
994 case NVPTXISD::Suld1DI16Clamp: return "NVPTXISD::Suld1DI16Clamp";
995 case NVPTXISD::Suld1DI32Clamp: return "NVPTXISD::Suld1DI32Clamp";
996 case NVPTXISD::Suld1DI64Clamp: return "NVPTXISD::Suld1DI64Clamp";
997 case NVPTXISD::Suld1DV2I8Clamp: return "NVPTXISD::Suld1DV2I8Clamp";
998 case NVPTXISD::Suld1DV2I16Clamp: return "NVPTXISD::Suld1DV2I16Clamp";
999 case NVPTXISD::Suld1DV2I32Clamp: return "NVPTXISD::Suld1DV2I32Clamp";
1000 case NVPTXISD::Suld1DV2I64Clamp: return "NVPTXISD::Suld1DV2I64Clamp";
1001 case NVPTXISD::Suld1DV4I8Clamp: return "NVPTXISD::Suld1DV4I8Clamp";
1002 case NVPTXISD::Suld1DV4I16Clamp: return "NVPTXISD::Suld1DV4I16Clamp";
1003 case NVPTXISD::Suld1DV4I32Clamp: return "NVPTXISD::Suld1DV4I32Clamp";
1004
1005 case NVPTXISD::Suld1DArrayI8Clamp: return "NVPTXISD::Suld1DArrayI8Clamp";
1006 case NVPTXISD::Suld1DArrayI16Clamp: return "NVPTXISD::Suld1DArrayI16Clamp";
1007 case NVPTXISD::Suld1DArrayI32Clamp: return "NVPTXISD::Suld1DArrayI32Clamp";
1008 case NVPTXISD::Suld1DArrayI64Clamp: return "NVPTXISD::Suld1DArrayI64Clamp";
1009 case NVPTXISD::Suld1DArrayV2I8Clamp: return "NVPTXISD::Suld1DArrayV2I8Clamp";
1010 case NVPTXISD::Suld1DArrayV2I16Clamp:return "NVPTXISD::Suld1DArrayV2I16Clamp";
1011 case NVPTXISD::Suld1DArrayV2I32Clamp:return "NVPTXISD::Suld1DArrayV2I32Clamp";
1012 case NVPTXISD::Suld1DArrayV2I64Clamp:return "NVPTXISD::Suld1DArrayV2I64Clamp";
1013 case NVPTXISD::Suld1DArrayV4I8Clamp: return "NVPTXISD::Suld1DArrayV4I8Clamp";
1014 case NVPTXISD::Suld1DArrayV4I16Clamp:return "NVPTXISD::Suld1DArrayV4I16Clamp";
1015 case NVPTXISD::Suld1DArrayV4I32Clamp:return "NVPTXISD::Suld1DArrayV4I32Clamp";
1016
1017 case NVPTXISD::Suld2DI8Clamp: return "NVPTXISD::Suld2DI8Clamp";
1018 case NVPTXISD::Suld2DI16Clamp: return "NVPTXISD::Suld2DI16Clamp";
1019 case NVPTXISD::Suld2DI32Clamp: return "NVPTXISD::Suld2DI32Clamp";
1020 case NVPTXISD::Suld2DI64Clamp: return "NVPTXISD::Suld2DI64Clamp";
1021 case NVPTXISD::Suld2DV2I8Clamp: return "NVPTXISD::Suld2DV2I8Clamp";
1022 case NVPTXISD::Suld2DV2I16Clamp: return "NVPTXISD::Suld2DV2I16Clamp";
1023 case NVPTXISD::Suld2DV2I32Clamp: return "NVPTXISD::Suld2DV2I32Clamp";
1024 case NVPTXISD::Suld2DV2I64Clamp: return "NVPTXISD::Suld2DV2I64Clamp";
1025 case NVPTXISD::Suld2DV4I8Clamp: return "NVPTXISD::Suld2DV4I8Clamp";
1026 case NVPTXISD::Suld2DV4I16Clamp: return "NVPTXISD::Suld2DV4I16Clamp";
1027 case NVPTXISD::Suld2DV4I32Clamp: return "NVPTXISD::Suld2DV4I32Clamp";
1028
1029 case NVPTXISD::Suld2DArrayI8Clamp: return "NVPTXISD::Suld2DArrayI8Clamp";
1030 case NVPTXISD::Suld2DArrayI16Clamp: return "NVPTXISD::Suld2DArrayI16Clamp";
1031 case NVPTXISD::Suld2DArrayI32Clamp: return "NVPTXISD::Suld2DArrayI32Clamp";
1032 case NVPTXISD::Suld2DArrayI64Clamp: return "NVPTXISD::Suld2DArrayI64Clamp";
1033 case NVPTXISD::Suld2DArrayV2I8Clamp: return "NVPTXISD::Suld2DArrayV2I8Clamp";
1034 case NVPTXISD::Suld2DArrayV2I16Clamp:return "NVPTXISD::Suld2DArrayV2I16Clamp";
1035 case NVPTXISD::Suld2DArrayV2I32Clamp:return "NVPTXISD::Suld2DArrayV2I32Clamp";
1036 case NVPTXISD::Suld2DArrayV2I64Clamp:return "NVPTXISD::Suld2DArrayV2I64Clamp";
1037 case NVPTXISD::Suld2DArrayV4I8Clamp: return "NVPTXISD::Suld2DArrayV4I8Clamp";
1038 case NVPTXISD::Suld2DArrayV4I16Clamp:return "NVPTXISD::Suld2DArrayV4I16Clamp";
1039 case NVPTXISD::Suld2DArrayV4I32Clamp:return "NVPTXISD::Suld2DArrayV4I32Clamp";
1040
1041 case NVPTXISD::Suld3DI8Clamp: return "NVPTXISD::Suld3DI8Clamp";
1042 case NVPTXISD::Suld3DI16Clamp: return "NVPTXISD::Suld3DI16Clamp";
1043 case NVPTXISD::Suld3DI32Clamp: return "NVPTXISD::Suld3DI32Clamp";
1044 case NVPTXISD::Suld3DI64Clamp: return "NVPTXISD::Suld3DI64Clamp";
1045 case NVPTXISD::Suld3DV2I8Clamp: return "NVPTXISD::Suld3DV2I8Clamp";
1046 case NVPTXISD::Suld3DV2I16Clamp: return "NVPTXISD::Suld3DV2I16Clamp";
1047 case NVPTXISD::Suld3DV2I32Clamp: return "NVPTXISD::Suld3DV2I32Clamp";
1048 case NVPTXISD::Suld3DV2I64Clamp: return "NVPTXISD::Suld3DV2I64Clamp";
1049 case NVPTXISD::Suld3DV4I8Clamp: return "NVPTXISD::Suld3DV4I8Clamp";
1050 case NVPTXISD::Suld3DV4I16Clamp: return "NVPTXISD::Suld3DV4I16Clamp";
1051 case NVPTXISD::Suld3DV4I32Clamp: return "NVPTXISD::Suld3DV4I32Clamp";
1052
1053 case NVPTXISD::Suld1DI8Trap: return "NVPTXISD::Suld1DI8Trap";
1054 case NVPTXISD::Suld1DI16Trap: return "NVPTXISD::Suld1DI16Trap";
1055 case NVPTXISD::Suld1DI32Trap: return "NVPTXISD::Suld1DI32Trap";
1056 case NVPTXISD::Suld1DI64Trap: return "NVPTXISD::Suld1DI64Trap";
1057 case NVPTXISD::Suld1DV2I8Trap: return "NVPTXISD::Suld1DV2I8Trap";
1058 case NVPTXISD::Suld1DV2I16Trap: return "NVPTXISD::Suld1DV2I16Trap";
1059 case NVPTXISD::Suld1DV2I32Trap: return "NVPTXISD::Suld1DV2I32Trap";
1060 case NVPTXISD::Suld1DV2I64Trap: return "NVPTXISD::Suld1DV2I64Trap";
1061 case NVPTXISD::Suld1DV4I8Trap: return "NVPTXISD::Suld1DV4I8Trap";
1062 case NVPTXISD::Suld1DV4I16Trap: return "NVPTXISD::Suld1DV4I16Trap";
1063 case NVPTXISD::Suld1DV4I32Trap: return "NVPTXISD::Suld1DV4I32Trap";
1064
1065 case NVPTXISD::Suld1DArrayI8Trap: return "NVPTXISD::Suld1DArrayI8Trap";
1066 case NVPTXISD::Suld1DArrayI16Trap: return "NVPTXISD::Suld1DArrayI16Trap";
1067 case NVPTXISD::Suld1DArrayI32Trap: return "NVPTXISD::Suld1DArrayI32Trap";
1068 case NVPTXISD::Suld1DArrayI64Trap: return "NVPTXISD::Suld1DArrayI64Trap";
1069 case NVPTXISD::Suld1DArrayV2I8Trap: return "NVPTXISD::Suld1DArrayV2I8Trap";
1070 case NVPTXISD::Suld1DArrayV2I16Trap: return "NVPTXISD::Suld1DArrayV2I16Trap";
1071 case NVPTXISD::Suld1DArrayV2I32Trap: return "NVPTXISD::Suld1DArrayV2I32Trap";
1072 case NVPTXISD::Suld1DArrayV2I64Trap: return "NVPTXISD::Suld1DArrayV2I64Trap";
1073 case NVPTXISD::Suld1DArrayV4I8Trap: return "NVPTXISD::Suld1DArrayV4I8Trap";
1074 case NVPTXISD::Suld1DArrayV4I16Trap: return "NVPTXISD::Suld1DArrayV4I16Trap";
1075 case NVPTXISD::Suld1DArrayV4I32Trap: return "NVPTXISD::Suld1DArrayV4I32Trap";
1076
1077 case NVPTXISD::Suld2DI8Trap: return "NVPTXISD::Suld2DI8Trap";
1078 case NVPTXISD::Suld2DI16Trap: return "NVPTXISD::Suld2DI16Trap";
1079 case NVPTXISD::Suld2DI32Trap: return "NVPTXISD::Suld2DI32Trap";
1080 case NVPTXISD::Suld2DI64Trap: return "NVPTXISD::Suld2DI64Trap";
1081 case NVPTXISD::Suld2DV2I8Trap: return "NVPTXISD::Suld2DV2I8Trap";
1082 case NVPTXISD::Suld2DV2I16Trap: return "NVPTXISD::Suld2DV2I16Trap";
1083 case NVPTXISD::Suld2DV2I32Trap: return "NVPTXISD::Suld2DV2I32Trap";
1084 case NVPTXISD::Suld2DV2I64Trap: return "NVPTXISD::Suld2DV2I64Trap";
1085 case NVPTXISD::Suld2DV4I8Trap: return "NVPTXISD::Suld2DV4I8Trap";
1086 case NVPTXISD::Suld2DV4I16Trap: return "NVPTXISD::Suld2DV4I16Trap";
1087 case NVPTXISD::Suld2DV4I32Trap: return "NVPTXISD::Suld2DV4I32Trap";
1088
1089 case NVPTXISD::Suld2DArrayI8Trap: return "NVPTXISD::Suld2DArrayI8Trap";
1090 case NVPTXISD::Suld2DArrayI16Trap: return "NVPTXISD::Suld2DArrayI16Trap";
1091 case NVPTXISD::Suld2DArrayI32Trap: return "NVPTXISD::Suld2DArrayI32Trap";
1092 case NVPTXISD::Suld2DArrayI64Trap: return "NVPTXISD::Suld2DArrayI64Trap";
1093 case NVPTXISD::Suld2DArrayV2I8Trap: return "NVPTXISD::Suld2DArrayV2I8Trap";
1094 case NVPTXISD::Suld2DArrayV2I16Trap: return "NVPTXISD::Suld2DArrayV2I16Trap";
1095 case NVPTXISD::Suld2DArrayV2I32Trap: return "NVPTXISD::Suld2DArrayV2I32Trap";
1096 case NVPTXISD::Suld2DArrayV2I64Trap: return "NVPTXISD::Suld2DArrayV2I64Trap";
1097 case NVPTXISD::Suld2DArrayV4I8Trap: return "NVPTXISD::Suld2DArrayV4I8Trap";
1098 case NVPTXISD::Suld2DArrayV4I16Trap: return "NVPTXISD::Suld2DArrayV4I16Trap";
1099 case NVPTXISD::Suld2DArrayV4I32Trap: return "NVPTXISD::Suld2DArrayV4I32Trap";
1100
1101 case NVPTXISD::Suld3DI8Trap: return "NVPTXISD::Suld3DI8Trap";
1102 case NVPTXISD::Suld3DI16Trap: return "NVPTXISD::Suld3DI16Trap";
1103 case NVPTXISD::Suld3DI32Trap: return "NVPTXISD::Suld3DI32Trap";
1104 case NVPTXISD::Suld3DI64Trap: return "NVPTXISD::Suld3DI64Trap";
1105 case NVPTXISD::Suld3DV2I8Trap: return "NVPTXISD::Suld3DV2I8Trap";
1106 case NVPTXISD::Suld3DV2I16Trap: return "NVPTXISD::Suld3DV2I16Trap";
1107 case NVPTXISD::Suld3DV2I32Trap: return "NVPTXISD::Suld3DV2I32Trap";
1108 case NVPTXISD::Suld3DV2I64Trap: return "NVPTXISD::Suld3DV2I64Trap";
1109 case NVPTXISD::Suld3DV4I8Trap: return "NVPTXISD::Suld3DV4I8Trap";
1110 case NVPTXISD::Suld3DV4I16Trap: return "NVPTXISD::Suld3DV4I16Trap";
1111 case NVPTXISD::Suld3DV4I32Trap: return "NVPTXISD::Suld3DV4I32Trap";
1112
1113 case NVPTXISD::Suld1DI8Zero: return "NVPTXISD::Suld1DI8Zero";
1114 case NVPTXISD::Suld1DI16Zero: return "NVPTXISD::Suld1DI16Zero";
1115 case NVPTXISD::Suld1DI32Zero: return "NVPTXISD::Suld1DI32Zero";
1116 case NVPTXISD::Suld1DI64Zero: return "NVPTXISD::Suld1DI64Zero";
1117 case NVPTXISD::Suld1DV2I8Zero: return "NVPTXISD::Suld1DV2I8Zero";
1118 case NVPTXISD::Suld1DV2I16Zero: return "NVPTXISD::Suld1DV2I16Zero";
1119 case NVPTXISD::Suld1DV2I32Zero: return "NVPTXISD::Suld1DV2I32Zero";
1120 case NVPTXISD::Suld1DV2I64Zero: return "NVPTXISD::Suld1DV2I64Zero";
1121 case NVPTXISD::Suld1DV4I8Zero: return "NVPTXISD::Suld1DV4I8Zero";
1122 case NVPTXISD::Suld1DV4I16Zero: return "NVPTXISD::Suld1DV4I16Zero";
1123 case NVPTXISD::Suld1DV4I32Zero: return "NVPTXISD::Suld1DV4I32Zero";
1124
1125 case NVPTXISD::Suld1DArrayI8Zero: return "NVPTXISD::Suld1DArrayI8Zero";
1126 case NVPTXISD::Suld1DArrayI16Zero: return "NVPTXISD::Suld1DArrayI16Zero";
1127 case NVPTXISD::Suld1DArrayI32Zero: return "NVPTXISD::Suld1DArrayI32Zero";
1128 case NVPTXISD::Suld1DArrayI64Zero: return "NVPTXISD::Suld1DArrayI64Zero";
1129 case NVPTXISD::Suld1DArrayV2I8Zero: return "NVPTXISD::Suld1DArrayV2I8Zero";
1130 case NVPTXISD::Suld1DArrayV2I16Zero: return "NVPTXISD::Suld1DArrayV2I16Zero";
1131 case NVPTXISD::Suld1DArrayV2I32Zero: return "NVPTXISD::Suld1DArrayV2I32Zero";
1132 case NVPTXISD::Suld1DArrayV2I64Zero: return "NVPTXISD::Suld1DArrayV2I64Zero";
1133 case NVPTXISD::Suld1DArrayV4I8Zero: return "NVPTXISD::Suld1DArrayV4I8Zero";
1134 case NVPTXISD::Suld1DArrayV4I16Zero: return "NVPTXISD::Suld1DArrayV4I16Zero";
1135 case NVPTXISD::Suld1DArrayV4I32Zero: return "NVPTXISD::Suld1DArrayV4I32Zero";
1136
1137 case NVPTXISD::Suld2DI8Zero: return "NVPTXISD::Suld2DI8Zero";
1138 case NVPTXISD::Suld2DI16Zero: return "NVPTXISD::Suld2DI16Zero";
1139 case NVPTXISD::Suld2DI32Zero: return "NVPTXISD::Suld2DI32Zero";
1140 case NVPTXISD::Suld2DI64Zero: return "NVPTXISD::Suld2DI64Zero";
1141 case NVPTXISD::Suld2DV2I8Zero: return "NVPTXISD::Suld2DV2I8Zero";
1142 case NVPTXISD::Suld2DV2I16Zero: return "NVPTXISD::Suld2DV2I16Zero";
1143 case NVPTXISD::Suld2DV2I32Zero: return "NVPTXISD::Suld2DV2I32Zero";
1144 case NVPTXISD::Suld2DV2I64Zero: return "NVPTXISD::Suld2DV2I64Zero";
1145 case NVPTXISD::Suld2DV4I8Zero: return "NVPTXISD::Suld2DV4I8Zero";
1146 case NVPTXISD::Suld2DV4I16Zero: return "NVPTXISD::Suld2DV4I16Zero";
1147 case NVPTXISD::Suld2DV4I32Zero: return "NVPTXISD::Suld2DV4I32Zero";
1148
1149 case NVPTXISD::Suld2DArrayI8Zero: return "NVPTXISD::Suld2DArrayI8Zero";
1150 case NVPTXISD::Suld2DArrayI16Zero: return "NVPTXISD::Suld2DArrayI16Zero";
1151 case NVPTXISD::Suld2DArrayI32Zero: return "NVPTXISD::Suld2DArrayI32Zero";
1152 case NVPTXISD::Suld2DArrayI64Zero: return "NVPTXISD::Suld2DArrayI64Zero";
1153 case NVPTXISD::Suld2DArrayV2I8Zero: return "NVPTXISD::Suld2DArrayV2I8Zero";
1154 case NVPTXISD::Suld2DArrayV2I16Zero: return "NVPTXISD::Suld2DArrayV2I16Zero";
1155 case NVPTXISD::Suld2DArrayV2I32Zero: return "NVPTXISD::Suld2DArrayV2I32Zero";
1156 case NVPTXISD::Suld2DArrayV2I64Zero: return "NVPTXISD::Suld2DArrayV2I64Zero";
1157 case NVPTXISD::Suld2DArrayV4I8Zero: return "NVPTXISD::Suld2DArrayV4I8Zero";
1158 case NVPTXISD::Suld2DArrayV4I16Zero: return "NVPTXISD::Suld2DArrayV4I16Zero";
1159 case NVPTXISD::Suld2DArrayV4I32Zero: return "NVPTXISD::Suld2DArrayV4I32Zero";
1160
1161 case NVPTXISD::Suld3DI8Zero: return "NVPTXISD::Suld3DI8Zero";
1162 case NVPTXISD::Suld3DI16Zero: return "NVPTXISD::Suld3DI16Zero";
1163 case NVPTXISD::Suld3DI32Zero: return "NVPTXISD::Suld3DI32Zero";
1164 case NVPTXISD::Suld3DI64Zero: return "NVPTXISD::Suld3DI64Zero";
1165 case NVPTXISD::Suld3DV2I8Zero: return "NVPTXISD::Suld3DV2I8Zero";
1166 case NVPTXISD::Suld3DV2I16Zero: return "NVPTXISD::Suld3DV2I16Zero";
1167 case NVPTXISD::Suld3DV2I32Zero: return "NVPTXISD::Suld3DV2I32Zero";
1168 case NVPTXISD::Suld3DV2I64Zero: return "NVPTXISD::Suld3DV2I64Zero";
1169 case NVPTXISD::Suld3DV4I8Zero: return "NVPTXISD::Suld3DV4I8Zero";
1170 case NVPTXISD::Suld3DV4I16Zero: return "NVPTXISD::Suld3DV4I16Zero";
1171 case NVPTXISD::Suld3DV4I32Zero: return "NVPTXISD::Suld3DV4I32Zero";
1172 }
1173 return nullptr;
1174 }
1175
1176 TargetLoweringBase::LegalizeTypeAction
getPreferredVectorAction(MVT VT) const1177 NVPTXTargetLowering::getPreferredVectorAction(MVT VT) const {
1178 if (VT.getVectorNumElements() != 1 && VT.getScalarType() == MVT::i1)
1179 return TypeSplitVector;
1180 if (VT == MVT::v2f16)
1181 return TypeLegal;
1182 return TargetLoweringBase::getPreferredVectorAction(VT);
1183 }
1184
getSqrtEstimate(SDValue Operand,SelectionDAG & DAG,int Enabled,int & ExtraSteps,bool & UseOneConst,bool Reciprocal) const1185 SDValue NVPTXTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
1186 int Enabled, int &ExtraSteps,
1187 bool &UseOneConst,
1188 bool Reciprocal) const {
1189 if (!(Enabled == ReciprocalEstimate::Enabled ||
1190 (Enabled == ReciprocalEstimate::Unspecified && !usePrecSqrtF32())))
1191 return SDValue();
1192
1193 if (ExtraSteps == ReciprocalEstimate::Unspecified)
1194 ExtraSteps = 0;
1195
1196 SDLoc DL(Operand);
1197 EVT VT = Operand.getValueType();
1198 bool Ftz = useF32FTZ(DAG.getMachineFunction());
1199
1200 auto MakeIntrinsicCall = [&](Intrinsic::ID IID) {
1201 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
1202 DAG.getConstant(IID, DL, MVT::i32), Operand);
1203 };
1204
1205 // The sqrt and rsqrt refinement processes assume we always start out with an
1206 // approximation of the rsqrt. Therefore, if we're going to do any refinement
1207 // (i.e. ExtraSteps > 0), we must return an rsqrt. But if we're *not* doing
1208 // any refinement, we must return a regular sqrt.
1209 if (Reciprocal || ExtraSteps > 0) {
1210 if (VT == MVT::f32)
1211 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f
1212 : Intrinsic::nvvm_rsqrt_approx_f);
1213 else if (VT == MVT::f64)
1214 return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);
1215 else
1216 return SDValue();
1217 } else {
1218 if (VT == MVT::f32)
1219 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f
1220 : Intrinsic::nvvm_sqrt_approx_f);
1221 else {
1222 // There's no sqrt.approx.f64 instruction, so we emit
1223 // reciprocal(rsqrt(x)). This is faster than
1224 // select(x == 0, 0, x * rsqrt(x)). (In fact, it's faster than plain
1225 // x * rsqrt(x).)
1226 return DAG.getNode(
1227 ISD::INTRINSIC_WO_CHAIN, DL, VT,
1228 DAG.getConstant(Intrinsic::nvvm_rcp_approx_ftz_d, DL, MVT::i32),
1229 MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));
1230 }
1231 }
1232 }
1233
1234 SDValue
LowerGlobalAddress(SDValue Op,SelectionDAG & DAG) const1235 NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
1236 SDLoc dl(Op);
1237 const GlobalAddressSDNode *GAN = cast<GlobalAddressSDNode>(Op);
1238 auto PtrVT = getPointerTy(DAG.getDataLayout(), GAN->getAddressSpace());
1239 Op = DAG.getTargetGlobalAddress(GAN->getGlobal(), dl, PtrVT);
1240 return DAG.getNode(NVPTXISD::Wrapper, dl, PtrVT, Op);
1241 }
1242
getPrototype(const DataLayout & DL,Type * retTy,const ArgListTy & Args,const SmallVectorImpl<ISD::OutputArg> & Outs,MaybeAlign retAlignment,const CallBase & CB) const1243 std::string NVPTXTargetLowering::getPrototype(
1244 const DataLayout &DL, Type *retTy, const ArgListTy &Args,
1245 const SmallVectorImpl<ISD::OutputArg> &Outs, MaybeAlign retAlignment,
1246 const CallBase &CB) const {
1247 auto PtrVT = getPointerTy(DL);
1248
1249 bool isABI = (STI.getSmVersion() >= 20);
1250 assert(isABI && "Non-ABI compilation is not supported");
1251 if (!isABI)
1252 return "";
1253
1254 std::stringstream O;
1255 O << "prototype_" << uniqueCallSite << " : .callprototype ";
1256
1257 if (retTy->getTypeID() == Type::VoidTyID) {
1258 O << "()";
1259 } else {
1260 O << "(";
1261 if (retTy->isFloatingPointTy() || (retTy->isIntegerTy() && !retTy->isIntegerTy(128))) {
1262 unsigned size = 0;
1263 if (auto *ITy = dyn_cast<IntegerType>(retTy)) {
1264 size = ITy->getBitWidth();
1265 } else {
1266 assert(retTy->isFloatingPointTy() &&
1267 "Floating point type expected here");
1268 size = retTy->getPrimitiveSizeInBits();
1269 }
1270 // PTX ABI requires all scalar return values to be at least 32
1271 // bits in size. fp16 normally uses .b16 as its storage type in
1272 // PTX, so its size must be adjusted here, too.
1273 if (size < 32)
1274 size = 32;
1275
1276 O << ".param .b" << size << " _";
1277 } else if (isa<PointerType>(retTy)) {
1278 O << ".param .b" << PtrVT.getSizeInBits() << " _";
1279 } else if (retTy->isAggregateType() || retTy->isVectorTy() ||
1280 retTy->isIntegerTy(128)) {
1281 O << ".param .align " << (retAlignment ? retAlignment->value() : 0)
1282 << " .b8 _[" << DL.getTypeAllocSize(retTy) << "]";
1283 } else {
1284 llvm_unreachable("Unknown return type");
1285 }
1286 O << ") ";
1287 }
1288 O << "_ (";
1289
1290 bool first = true;
1291
1292 unsigned OIdx = 0;
1293 for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
1294 Type *Ty = Args[i].Ty;
1295 if (!first) {
1296 O << ", ";
1297 }
1298 first = false;
1299
1300 if (!Outs[OIdx].Flags.isByVal()) {
1301 if (Ty->isAggregateType() || Ty->isVectorTy() || Ty->isIntegerTy(128)) {
1302 unsigned align = 0;
1303 const CallInst *CallI = cast<CallInst>(&CB);
1304 // +1 because index 0 is reserved for return type alignment
1305 if (!getAlign(*CallI, i + 1, align))
1306 align = DL.getABITypeAlignment(Ty);
1307 unsigned sz = DL.getTypeAllocSize(Ty);
1308 O << ".param .align " << align << " .b8 ";
1309 O << "_";
1310 O << "[" << sz << "]";
1311 // update the index for Outs
1312 SmallVector<EVT, 16> vtparts;
1313 ComputeValueVTs(*this, DL, Ty, vtparts);
1314 if (unsigned len = vtparts.size())
1315 OIdx += len - 1;
1316 continue;
1317 }
1318 // i8 types in IR will be i16 types in SDAG
1319 assert((getValueType(DL, Ty) == Outs[OIdx].VT ||
1320 (getValueType(DL, Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
1321 "type mismatch between callee prototype and arguments");
1322 // scalar type
1323 unsigned sz = 0;
1324 if (isa<IntegerType>(Ty)) {
1325 sz = cast<IntegerType>(Ty)->getBitWidth();
1326 if (sz < 32)
1327 sz = 32;
1328 } else if (isa<PointerType>(Ty)) {
1329 sz = PtrVT.getSizeInBits();
1330 } else if (Ty->isHalfTy())
1331 // PTX ABI requires all scalar parameters to be at least 32
1332 // bits in size. fp16 normally uses .b16 as its storage type
1333 // in PTX, so its size must be adjusted here, too.
1334 sz = 32;
1335 else
1336 sz = Ty->getPrimitiveSizeInBits();
1337 O << ".param .b" << sz << " ";
1338 O << "_";
1339 continue;
1340 }
1341 auto *PTy = dyn_cast<PointerType>(Ty);
1342 assert(PTy && "Param with byval attribute should be a pointer type");
1343 Type *ETy = PTy->getElementType();
1344
1345 Align align = Outs[OIdx].Flags.getNonZeroByValAlign();
1346 unsigned sz = DL.getTypeAllocSize(ETy);
1347 O << ".param .align " << align.value() << " .b8 ";
1348 O << "_";
1349 O << "[" << sz << "]";
1350 }
1351 O << ");";
1352 return O.str();
1353 }
1354
getArgumentAlignment(SDValue Callee,const CallBase * CB,Type * Ty,unsigned Idx,const DataLayout & DL) const1355 Align NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
1356 const CallBase *CB, Type *Ty,
1357 unsigned Idx,
1358 const DataLayout &DL) const {
1359 if (!CB) {
1360 // CallSite is zero, fallback to ABI type alignment
1361 return DL.getABITypeAlign(Ty);
1362 }
1363
1364 unsigned Alignment = 0;
1365 const Function *DirectCallee = CB->getCalledFunction();
1366
1367 if (!DirectCallee) {
1368 // We don't have a direct function symbol, but that may be because of
1369 // constant cast instructions in the call.
1370
1371 // With bitcast'd call targets, the instruction will be the call
1372 if (const auto *CI = dyn_cast<CallInst>(CB)) {
1373 // Check if we have call alignment metadata
1374 if (getAlign(*CI, Idx, Alignment))
1375 return Align(Alignment);
1376
1377 const Value *CalleeV = CI->getCalledOperand();
1378 // Ignore any bitcast instructions
1379 while (isa<ConstantExpr>(CalleeV)) {
1380 const ConstantExpr *CE = cast<ConstantExpr>(CalleeV);
1381 if (!CE->isCast())
1382 break;
1383 // Look through the bitcast
1384 CalleeV = cast<ConstantExpr>(CalleeV)->getOperand(0);
1385 }
1386
1387 // We have now looked past all of the bitcasts. Do we finally have a
1388 // Function?
1389 if (const auto *CalleeF = dyn_cast<Function>(CalleeV))
1390 DirectCallee = CalleeF;
1391 }
1392 }
1393
1394 // Check for function alignment information if we found that the
1395 // ultimate target is a Function
1396 if (DirectCallee)
1397 if (getAlign(*DirectCallee, Idx, Alignment))
1398 return Align(Alignment);
1399
1400 // Call is indirect or alignment information is not available, fall back to
1401 // the ABI type alignment
1402 return DL.getABITypeAlign(Ty);
1403 }
1404
LowerCall(TargetLowering::CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const1405 SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1406 SmallVectorImpl<SDValue> &InVals) const {
1407 SelectionDAG &DAG = CLI.DAG;
1408 SDLoc dl = CLI.DL;
1409 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1410 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1411 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1412 SDValue Chain = CLI.Chain;
1413 SDValue Callee = CLI.Callee;
1414 bool &isTailCall = CLI.IsTailCall;
1415 ArgListTy &Args = CLI.getArgs();
1416 Type *RetTy = CLI.RetTy;
1417 const CallBase *CB = CLI.CB;
1418 const DataLayout &DL = DAG.getDataLayout();
1419
1420 bool isABI = (STI.getSmVersion() >= 20);
1421 assert(isABI && "Non-ABI compilation is not supported");
1422 if (!isABI)
1423 return Chain;
1424
1425 SDValue tempChain = Chain;
1426 Chain = DAG.getCALLSEQ_START(Chain, uniqueCallSite, 0, dl);
1427 SDValue InFlag = Chain.getValue(1);
1428
1429 unsigned paramCount = 0;
1430 // Args.size() and Outs.size() need not match.
1431 // Outs.size() will be larger
1432 // * if there is an aggregate argument with multiple fields (each field
1433 // showing up separately in Outs)
1434 // * if there is a vector argument with more than typical vector-length
1435 // elements (generally if more than 4) where each vector element is
1436 // individually present in Outs.
1437 // So a different index should be used for indexing into Outs/OutVals.
1438 // See similar issue in LowerFormalArguments.
1439 unsigned OIdx = 0;
1440 // Declare the .params or .reg need to pass values
1441 // to the function
1442 for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
1443 EVT VT = Outs[OIdx].VT;
1444 Type *Ty = Args[i].Ty;
1445
1446 if (!Outs[OIdx].Flags.isByVal()) {
1447 SmallVector<EVT, 16> VTs;
1448 SmallVector<uint64_t, 16> Offsets;
1449 ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets);
1450 Align ArgAlign = getArgumentAlignment(Callee, CB, Ty, paramCount + 1, DL);
1451 unsigned AllocSize = DL.getTypeAllocSize(Ty);
1452 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1453 bool NeedAlign; // Does argument declaration specify alignment?
1454 if (Ty->isAggregateType() || Ty->isVectorTy() || Ty->isIntegerTy(128)) {
1455 // declare .param .align <align> .b8 .param<n>[<size>];
1456 SDValue DeclareParamOps[] = {
1457 Chain, DAG.getConstant(ArgAlign.value(), dl, MVT::i32),
1458 DAG.getConstant(paramCount, dl, MVT::i32),
1459 DAG.getConstant(AllocSize, dl, MVT::i32), InFlag};
1460 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
1461 DeclareParamOps);
1462 NeedAlign = true;
1463 } else {
1464 // declare .param .b<size> .param<n>;
1465 if ((VT.isInteger() || VT.isFloatingPoint()) && AllocSize < 4) {
1466 // PTX ABI requires integral types to be at least 32 bits in
1467 // size. FP16 is loaded/stored using i16, so it's handled
1468 // here as well.
1469 AllocSize = 4;
1470 }
1471 SDValue DeclareScalarParamOps[] = {
1472 Chain, DAG.getConstant(paramCount, dl, MVT::i32),
1473 DAG.getConstant(AllocSize * 8, dl, MVT::i32),
1474 DAG.getConstant(0, dl, MVT::i32), InFlag};
1475 Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
1476 DeclareScalarParamOps);
1477 NeedAlign = false;
1478 }
1479 InFlag = Chain.getValue(1);
1480
1481 // PTX Interoperability Guide 3.3(A): [Integer] Values shorter
1482 // than 32-bits are sign extended or zero extended, depending on
1483 // whether they are signed or unsigned types. This case applies
1484 // only to scalar parameters and not to aggregate values.
1485 bool ExtendIntegerParam =
1486 Ty->isIntegerTy() && DL.getTypeAllocSizeInBits(Ty) < 32;
1487
1488 auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, ArgAlign);
1489 SmallVector<SDValue, 6> StoreOperands;
1490 for (unsigned j = 0, je = VTs.size(); j != je; ++j) {
1491 // New store.
1492 if (VectorInfo[j] & PVF_FIRST) {
1493 assert(StoreOperands.empty() && "Unfinished preceding store.");
1494 StoreOperands.push_back(Chain);
1495 StoreOperands.push_back(DAG.getConstant(paramCount, dl, MVT::i32));
1496 StoreOperands.push_back(DAG.getConstant(Offsets[j], dl, MVT::i32));
1497 }
1498
1499 EVT EltVT = VTs[j];
1500 SDValue StVal = OutVals[OIdx];
1501 if (ExtendIntegerParam) {
1502 assert(VTs.size() == 1 && "Scalar can't have multiple parts.");
1503 // zext/sext to i32
1504 StVal = DAG.getNode(Outs[OIdx].Flags.isSExt() ? ISD::SIGN_EXTEND
1505 : ISD::ZERO_EXTEND,
1506 dl, MVT::i32, StVal);
1507 } else if (EltVT.getSizeInBits() < 16) {
1508 // Use 16-bit registers for small stores as it's the
1509 // smallest general purpose register size supported by NVPTX.
1510 StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
1511 }
1512
1513 // Record the value to store.
1514 StoreOperands.push_back(StVal);
1515
1516 if (VectorInfo[j] & PVF_LAST) {
1517 unsigned NumElts = StoreOperands.size() - 3;
1518 NVPTXISD::NodeType Op;
1519 switch (NumElts) {
1520 case 1:
1521 Op = NVPTXISD::StoreParam;
1522 break;
1523 case 2:
1524 Op = NVPTXISD::StoreParamV2;
1525 break;
1526 case 4:
1527 Op = NVPTXISD::StoreParamV4;
1528 break;
1529 default:
1530 llvm_unreachable("Invalid vector info.");
1531 }
1532
1533 StoreOperands.push_back(InFlag);
1534
1535 // Adjust type of the store op if we've extended the scalar
1536 // return value.
1537 EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : VTs[j];
1538 MaybeAlign EltAlign;
1539 if (NeedAlign)
1540 EltAlign = commonAlignment(ArgAlign, Offsets[j]);
1541
1542 Chain = DAG.getMemIntrinsicNode(
1543 Op, dl, DAG.getVTList(MVT::Other, MVT::Glue), StoreOperands,
1544 TheStoreType, MachinePointerInfo(), EltAlign,
1545 MachineMemOperand::MOStore);
1546 InFlag = Chain.getValue(1);
1547
1548 // Cleanup.
1549 StoreOperands.clear();
1550 }
1551 ++OIdx;
1552 }
1553 assert(StoreOperands.empty() && "Unfinished parameter store.");
1554 if (VTs.size() > 0)
1555 --OIdx;
1556 ++paramCount;
1557 continue;
1558 }
1559
1560 // ByVal arguments
1561 SmallVector<EVT, 16> VTs;
1562 SmallVector<uint64_t, 16> Offsets;
1563 auto *PTy = dyn_cast<PointerType>(Args[i].Ty);
1564 assert(PTy && "Type of a byval parameter should be pointer");
1565 ComputePTXValueVTs(*this, DL, PTy->getElementType(), VTs, &Offsets, 0);
1566
1567 // declare .param .align <align> .b8 .param<n>[<size>];
1568 unsigned sz = Outs[OIdx].Flags.getByValSize();
1569 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1570 Align ArgAlign = Outs[OIdx].Flags.getNonZeroByValAlign();
1571 // The ByValAlign in the Outs[OIdx].Flags is alway set at this point,
1572 // so we don't need to worry about natural alignment or not.
1573 // See TargetLowering::LowerCallTo().
1574
1575 // Enforce minumum alignment of 4 to work around ptxas miscompile
1576 // for sm_50+. See corresponding alignment adjustment in
1577 // emitFunctionParamList() for details.
1578 if (ArgAlign < Align(4))
1579 ArgAlign = Align(4);
1580 SDValue DeclareParamOps[] = {
1581 Chain, DAG.getConstant(ArgAlign.value(), dl, MVT::i32),
1582 DAG.getConstant(paramCount, dl, MVT::i32),
1583 DAG.getConstant(sz, dl, MVT::i32), InFlag};
1584 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
1585 DeclareParamOps);
1586 InFlag = Chain.getValue(1);
1587 for (unsigned j = 0, je = VTs.size(); j != je; ++j) {
1588 EVT elemtype = VTs[j];
1589 int curOffset = Offsets[j];
1590 unsigned PartAlign = GreatestCommonDivisor64(ArgAlign.value(), curOffset);
1591 auto PtrVT = getPointerTy(DL);
1592 SDValue srcAddr = DAG.getNode(ISD::ADD, dl, PtrVT, OutVals[OIdx],
1593 DAG.getConstant(curOffset, dl, PtrVT));
1594 SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
1595 MachinePointerInfo(), PartAlign);
1596 if (elemtype.getSizeInBits() < 16) {
1597 theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
1598 }
1599 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1600 SDValue CopyParamOps[] = { Chain,
1601 DAG.getConstant(paramCount, dl, MVT::i32),
1602 DAG.getConstant(curOffset, dl, MVT::i32),
1603 theVal, InFlag };
1604 Chain = DAG.getMemIntrinsicNode(
1605 NVPTXISD::StoreParam, dl, CopyParamVTs, CopyParamOps, elemtype,
1606 MachinePointerInfo(), /* Align */ None, MachineMemOperand::MOStore);
1607
1608 InFlag = Chain.getValue(1);
1609 }
1610 ++paramCount;
1611 }
1612
1613 GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
1614 MaybeAlign retAlignment = None;
1615
1616 // Handle Result
1617 if (Ins.size() > 0) {
1618 SmallVector<EVT, 16> resvtparts;
1619 ComputeValueVTs(*this, DL, RetTy, resvtparts);
1620
1621 // Declare
1622 // .param .align 16 .b8 retval0[<size-in-bytes>], or
1623 // .param .b<size-in-bits> retval0
1624 unsigned resultsz = DL.getTypeAllocSizeInBits(RetTy);
1625 // Emit ".param .b<size-in-bits> retval0" instead of byte arrays only for
1626 // these three types to match the logic in
1627 // NVPTXAsmPrinter::printReturnValStr and NVPTXTargetLowering::getPrototype.
1628 // Plus, this behavior is consistent with nvcc's.
1629 if (RetTy->isFloatingPointTy() || RetTy->isPointerTy() ||
1630 (RetTy->isIntegerTy() && !RetTy->isIntegerTy(128))) {
1631 // Scalar needs to be at least 32bit wide
1632 if (resultsz < 32)
1633 resultsz = 32;
1634 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1635 SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),
1636 DAG.getConstant(resultsz, dl, MVT::i32),
1637 DAG.getConstant(0, dl, MVT::i32), InFlag };
1638 Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
1639 DeclareRetOps);
1640 InFlag = Chain.getValue(1);
1641 } else {
1642 retAlignment = getArgumentAlignment(Callee, CB, RetTy, 0, DL);
1643 assert(retAlignment && "retAlignment is guaranteed to be set");
1644 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1645 SDValue DeclareRetOps[] = {
1646 Chain, DAG.getConstant(retAlignment->value(), dl, MVT::i32),
1647 DAG.getConstant(resultsz / 8, dl, MVT::i32),
1648 DAG.getConstant(0, dl, MVT::i32), InFlag};
1649 Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
1650 DeclareRetOps);
1651 InFlag = Chain.getValue(1);
1652 }
1653 }
1654
1655 // Both indirect calls and libcalls have nullptr Func. In order to distinguish
1656 // between them we must rely on the call site value which is valid for
1657 // indirect calls but is always null for libcalls.
1658 bool isIndirectCall = !Func && CB;
1659
1660 if (isa<ExternalSymbolSDNode>(Callee)) {
1661 Function* CalleeFunc = nullptr;
1662
1663 // Try to find the callee in the current module.
1664 Callee = DAG.getSymbolFunctionGlobalAddress(Callee, &CalleeFunc);
1665 assert(CalleeFunc != nullptr && "Libcall callee must be set.");
1666
1667 // Set the "libcall callee" attribute to indicate that the function
1668 // must always have a declaration.
1669 CalleeFunc->addFnAttr("nvptx-libcall-callee", "true");
1670 }
1671
1672 if (isIndirectCall) {
1673 // This is indirect function call case : PTX requires a prototype of the
1674 // form
1675 // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
1676 // to be emitted, and the label has to used as the last arg of call
1677 // instruction.
1678 // The prototype is embedded in a string and put as the operand for a
1679 // CallPrototype SDNode which will print out to the value of the string.
1680 SDVTList ProtoVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1681 std::string Proto = getPrototype(DL, RetTy, Args, Outs, retAlignment, *CB);
1682 const char *ProtoStr =
1683 nvTM->getManagedStrPool()->getManagedString(Proto.c_str())->c_str();
1684 SDValue ProtoOps[] = {
1685 Chain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32), InFlag,
1686 };
1687 Chain = DAG.getNode(NVPTXISD::CallPrototype, dl, ProtoVTs, ProtoOps);
1688 InFlag = Chain.getValue(1);
1689 }
1690 // Op to just print "call"
1691 SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1692 SDValue PrintCallOps[] = {
1693 Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, dl, MVT::i32), InFlag
1694 };
1695 // We model convergent calls as separate opcodes.
1696 unsigned Opcode = isIndirectCall ? NVPTXISD::PrintCall : NVPTXISD::PrintCallUni;
1697 if (CLI.IsConvergent)
1698 Opcode = Opcode == NVPTXISD::PrintCallUni ? NVPTXISD::PrintConvergentCallUni
1699 : NVPTXISD::PrintConvergentCall;
1700 Chain = DAG.getNode(Opcode, dl, PrintCallVTs, PrintCallOps);
1701 InFlag = Chain.getValue(1);
1702
1703 // Ops to print out the function name
1704 SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1705 SDValue CallVoidOps[] = { Chain, Callee, InFlag };
1706 Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps);
1707 InFlag = Chain.getValue(1);
1708
1709 // Ops to print out the param list
1710 SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1711 SDValue CallArgBeginOps[] = { Chain, InFlag };
1712 Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
1713 CallArgBeginOps);
1714 InFlag = Chain.getValue(1);
1715
1716 for (unsigned i = 0, e = paramCount; i != e; ++i) {
1717 unsigned opcode;
1718 if (i == (e - 1))
1719 opcode = NVPTXISD::LastCallArg;
1720 else
1721 opcode = NVPTXISD::CallArg;
1722 SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1723 SDValue CallArgOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),
1724 DAG.getConstant(i, dl, MVT::i32), InFlag };
1725 Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps);
1726 InFlag = Chain.getValue(1);
1727 }
1728 SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1729 SDValue CallArgEndOps[] = { Chain,
1730 DAG.getConstant(isIndirectCall ? 0 : 1, dl, MVT::i32),
1731 InFlag };
1732 Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps);
1733 InFlag = Chain.getValue(1);
1734
1735 if (isIndirectCall) {
1736 SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1737 SDValue PrototypeOps[] = { Chain,
1738 DAG.getConstant(uniqueCallSite, dl, MVT::i32),
1739 InFlag };
1740 Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps);
1741 InFlag = Chain.getValue(1);
1742 }
1743
1744 SmallVector<SDValue, 16> ProxyRegOps;
1745 SmallVector<Optional<MVT>, 16> ProxyRegTruncates;
1746
1747 // Generate loads from param memory/moves from registers for result
1748 if (Ins.size() > 0) {
1749 SmallVector<EVT, 16> VTs;
1750 SmallVector<uint64_t, 16> Offsets;
1751 ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets, 0);
1752 assert(VTs.size() == Ins.size() && "Bad value decomposition");
1753
1754 Align RetAlign = getArgumentAlignment(Callee, CB, RetTy, 0, DL);
1755 auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
1756
1757 SmallVector<EVT, 6> LoadVTs;
1758 int VecIdx = -1; // Index of the first element of the vector.
1759
1760 // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
1761 // 32-bits are sign extended or zero extended, depending on whether
1762 // they are signed or unsigned types.
1763 bool ExtendIntegerRetVal =
1764 RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
1765
1766 for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
1767 bool needTruncate = false;
1768 EVT TheLoadType = VTs[i];
1769 EVT EltType = Ins[i].VT;
1770 Align EltAlign = commonAlignment(RetAlign, Offsets[i]);
1771 if (ExtendIntegerRetVal) {
1772 TheLoadType = MVT::i32;
1773 EltType = MVT::i32;
1774 needTruncate = true;
1775 } else if (TheLoadType.getSizeInBits() < 16) {
1776 if (VTs[i].isInteger())
1777 needTruncate = true;
1778 EltType = MVT::i16;
1779 }
1780
1781 // Record index of the very first element of the vector.
1782 if (VectorInfo[i] & PVF_FIRST) {
1783 assert(VecIdx == -1 && LoadVTs.empty() && "Orphaned operand list.");
1784 VecIdx = i;
1785 }
1786
1787 LoadVTs.push_back(EltType);
1788
1789 if (VectorInfo[i] & PVF_LAST) {
1790 unsigned NumElts = LoadVTs.size();
1791 LoadVTs.push_back(MVT::Other);
1792 LoadVTs.push_back(MVT::Glue);
1793 NVPTXISD::NodeType Op;
1794 switch (NumElts) {
1795 case 1:
1796 Op = NVPTXISD::LoadParam;
1797 break;
1798 case 2:
1799 Op = NVPTXISD::LoadParamV2;
1800 break;
1801 case 4:
1802 Op = NVPTXISD::LoadParamV4;
1803 break;
1804 default:
1805 llvm_unreachable("Invalid vector info.");
1806 }
1807
1808 SDValue LoadOperands[] = {
1809 Chain, DAG.getConstant(1, dl, MVT::i32),
1810 DAG.getConstant(Offsets[VecIdx], dl, MVT::i32), InFlag};
1811 SDValue RetVal = DAG.getMemIntrinsicNode(
1812 Op, dl, DAG.getVTList(LoadVTs), LoadOperands, TheLoadType,
1813 MachinePointerInfo(), EltAlign,
1814 MachineMemOperand::MOLoad);
1815
1816 for (unsigned j = 0; j < NumElts; ++j) {
1817 ProxyRegOps.push_back(RetVal.getValue(j));
1818
1819 if (needTruncate)
1820 ProxyRegTruncates.push_back(Optional<MVT>(Ins[VecIdx + j].VT));
1821 else
1822 ProxyRegTruncates.push_back(Optional<MVT>());
1823 }
1824
1825 Chain = RetVal.getValue(NumElts);
1826 InFlag = RetVal.getValue(NumElts + 1);
1827
1828 // Cleanup
1829 VecIdx = -1;
1830 LoadVTs.clear();
1831 }
1832 }
1833 }
1834
1835 Chain = DAG.getCALLSEQ_END(Chain,
1836 DAG.getIntPtrConstant(uniqueCallSite, dl, true),
1837 DAG.getIntPtrConstant(uniqueCallSite + 1, dl,
1838 true),
1839 InFlag, dl);
1840 InFlag = Chain.getValue(1);
1841 uniqueCallSite++;
1842
1843 // Append ProxyReg instructions to the chain to make sure that `callseq_end`
1844 // will not get lost. Otherwise, during libcalls expansion, the nodes can become
1845 // dangling.
1846 for (unsigned i = 0; i < ProxyRegOps.size(); ++i) {
1847 SDValue Ret = DAG.getNode(
1848 NVPTXISD::ProxyReg, dl,
1849 DAG.getVTList(ProxyRegOps[i].getSimpleValueType(), MVT::Other, MVT::Glue),
1850 { Chain, ProxyRegOps[i], InFlag }
1851 );
1852
1853 Chain = Ret.getValue(1);
1854 InFlag = Ret.getValue(2);
1855
1856 if (ProxyRegTruncates[i].hasValue()) {
1857 Ret = DAG.getNode(ISD::TRUNCATE, dl, ProxyRegTruncates[i].getValue(), Ret);
1858 }
1859
1860 InVals.push_back(Ret);
1861 }
1862
1863 // set isTailCall to false for now, until we figure out how to express
1864 // tail call optimization in PTX
1865 isTailCall = false;
1866 return Chain;
1867 }
1868
1869 // By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
1870 // (see LegalizeDAG.cpp). This is slow and uses local memory.
1871 // We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
1872 SDValue
LowerCONCAT_VECTORS(SDValue Op,SelectionDAG & DAG) const1873 NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
1874 SDNode *Node = Op.getNode();
1875 SDLoc dl(Node);
1876 SmallVector<SDValue, 8> Ops;
1877 unsigned NumOperands = Node->getNumOperands();
1878 for (unsigned i = 0; i < NumOperands; ++i) {
1879 SDValue SubOp = Node->getOperand(i);
1880 EVT VVT = SubOp.getNode()->getValueType(0);
1881 EVT EltVT = VVT.getVectorElementType();
1882 unsigned NumSubElem = VVT.getVectorNumElements();
1883 for (unsigned j = 0; j < NumSubElem; ++j) {
1884 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
1885 DAG.getIntPtrConstant(j, dl)));
1886 }
1887 }
1888 return DAG.getBuildVector(Node->getValueType(0), dl, Ops);
1889 }
1890
1891 // We can init constant f16x2 with a single .b32 move. Normally it
1892 // would get lowered as two constant loads and vector-packing move.
1893 // mov.b16 %h1, 0x4000;
1894 // mov.b16 %h2, 0x3C00;
1895 // mov.b32 %hh2, {%h2, %h1};
1896 // Instead we want just a constant move:
1897 // mov.b32 %hh2, 0x40003C00
1898 //
1899 // This results in better SASS code with CUDA 7.x. Ptxas in CUDA 8.0
1900 // generates good SASS in both cases.
LowerBUILD_VECTOR(SDValue Op,SelectionDAG & DAG) const1901 SDValue NVPTXTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1902 SelectionDAG &DAG) const {
1903 //return Op;
1904 if (!(Op->getValueType(0) == MVT::v2f16 &&
1905 isa<ConstantFPSDNode>(Op->getOperand(0)) &&
1906 isa<ConstantFPSDNode>(Op->getOperand(1))))
1907 return Op;
1908
1909 APInt E0 =
1910 cast<ConstantFPSDNode>(Op->getOperand(0))->getValueAPF().bitcastToAPInt();
1911 APInt E1 =
1912 cast<ConstantFPSDNode>(Op->getOperand(1))->getValueAPF().bitcastToAPInt();
1913 SDValue Const =
1914 DAG.getConstant(E1.zext(32).shl(16) | E0.zext(32), SDLoc(Op), MVT::i32);
1915 return DAG.getNode(ISD::BITCAST, SDLoc(Op), MVT::v2f16, Const);
1916 }
1917
LowerEXTRACT_VECTOR_ELT(SDValue Op,SelectionDAG & DAG) const1918 SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
1919 SelectionDAG &DAG) const {
1920 SDValue Index = Op->getOperand(1);
1921 // Constant index will be matched by tablegen.
1922 if (isa<ConstantSDNode>(Index.getNode()))
1923 return Op;
1924
1925 // Extract individual elements and select one of them.
1926 SDValue Vector = Op->getOperand(0);
1927 EVT VectorVT = Vector.getValueType();
1928 assert(VectorVT == MVT::v2f16 && "Unexpected vector type.");
1929 EVT EltVT = VectorVT.getVectorElementType();
1930
1931 SDLoc dl(Op.getNode());
1932 SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
1933 DAG.getIntPtrConstant(0, dl));
1934 SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
1935 DAG.getIntPtrConstant(1, dl));
1936 return DAG.getSelectCC(dl, Index, DAG.getIntPtrConstant(0, dl), E0, E1,
1937 ISD::CondCode::SETEQ);
1938 }
1939
1940 /// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which
1941 /// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
1942 /// amount, or
1943 /// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
1944 /// amount.
LowerShiftRightParts(SDValue Op,SelectionDAG & DAG) const1945 SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
1946 SelectionDAG &DAG) const {
1947 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
1948 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
1949
1950 EVT VT = Op.getValueType();
1951 unsigned VTBits = VT.getSizeInBits();
1952 SDLoc dl(Op);
1953 SDValue ShOpLo = Op.getOperand(0);
1954 SDValue ShOpHi = Op.getOperand(1);
1955 SDValue ShAmt = Op.getOperand(2);
1956 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
1957
1958 if (VTBits == 32 && STI.getSmVersion() >= 35) {
1959 // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
1960 // {dHi, dLo} = {aHi, aLo} >> Amt
1961 // dHi = aHi >> Amt
1962 // dLo = shf.r.clamp aLo, aHi, Amt
1963
1964 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
1965 SDValue Lo = DAG.getNode(NVPTXISD::FUN_SHFR_CLAMP, dl, VT, ShOpLo, ShOpHi,
1966 ShAmt);
1967
1968 SDValue Ops[2] = { Lo, Hi };
1969 return DAG.getMergeValues(Ops, dl);
1970 }
1971 else {
1972 // {dHi, dLo} = {aHi, aLo} >> Amt
1973 // - if (Amt>=size) then
1974 // dLo = aHi >> (Amt-size)
1975 // dHi = aHi >> Amt (this is either all 0 or all 1)
1976 // else
1977 // dLo = (aLo >>logic Amt) | (aHi << (size-Amt))
1978 // dHi = aHi >> Amt
1979
1980 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
1981 DAG.getConstant(VTBits, dl, MVT::i32),
1982 ShAmt);
1983 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
1984 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
1985 DAG.getConstant(VTBits, dl, MVT::i32));
1986 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
1987 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
1988 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
1989
1990 SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
1991 DAG.getConstant(VTBits, dl, MVT::i32),
1992 ISD::SETGE);
1993 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
1994 SDValue Lo = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
1995
1996 SDValue Ops[2] = { Lo, Hi };
1997 return DAG.getMergeValues(Ops, dl);
1998 }
1999 }
2000
2001 /// LowerShiftLeftParts - Lower SHL_PARTS, which
2002 /// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
2003 /// amount, or
2004 /// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
2005 /// amount.
LowerShiftLeftParts(SDValue Op,SelectionDAG & DAG) const2006 SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
2007 SelectionDAG &DAG) const {
2008 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
2009 assert(Op.getOpcode() == ISD::SHL_PARTS);
2010
2011 EVT VT = Op.getValueType();
2012 unsigned VTBits = VT.getSizeInBits();
2013 SDLoc dl(Op);
2014 SDValue ShOpLo = Op.getOperand(0);
2015 SDValue ShOpHi = Op.getOperand(1);
2016 SDValue ShAmt = Op.getOperand(2);
2017
2018 if (VTBits == 32 && STI.getSmVersion() >= 35) {
2019 // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
2020 // {dHi, dLo} = {aHi, aLo} << Amt
2021 // dHi = shf.l.clamp aLo, aHi, Amt
2022 // dLo = aLo << Amt
2023
2024 SDValue Hi = DAG.getNode(NVPTXISD::FUN_SHFL_CLAMP, dl, VT, ShOpLo, ShOpHi,
2025 ShAmt);
2026 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
2027
2028 SDValue Ops[2] = { Lo, Hi };
2029 return DAG.getMergeValues(Ops, dl);
2030 }
2031 else {
2032 // {dHi, dLo} = {aHi, aLo} << Amt
2033 // - if (Amt>=size) then
2034 // dLo = aLo << Amt (all 0)
2035 // dLo = aLo << (Amt-size)
2036 // else
2037 // dLo = aLo << Amt
2038 // dHi = (aHi << Amt) | (aLo >> (size-Amt))
2039
2040 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
2041 DAG.getConstant(VTBits, dl, MVT::i32),
2042 ShAmt);
2043 SDValue Tmp1 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
2044 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
2045 DAG.getConstant(VTBits, dl, MVT::i32));
2046 SDValue Tmp2 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
2047 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
2048 SDValue TrueVal = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
2049
2050 SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
2051 DAG.getConstant(VTBits, dl, MVT::i32),
2052 ISD::SETGE);
2053 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
2054 SDValue Hi = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
2055
2056 SDValue Ops[2] = { Lo, Hi };
2057 return DAG.getMergeValues(Ops, dl);
2058 }
2059 }
2060
LowerFROUND(SDValue Op,SelectionDAG & DAG) const2061 SDValue NVPTXTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
2062 EVT VT = Op.getValueType();
2063
2064 if (VT == MVT::f32)
2065 return LowerFROUND32(Op, DAG);
2066
2067 if (VT == MVT::f64)
2068 return LowerFROUND64(Op, DAG);
2069
2070 llvm_unreachable("unhandled type");
2071 }
2072
2073 // This is the the rounding method used in CUDA libdevice in C like code:
2074 // float roundf(float A)
2075 // {
2076 // float RoundedA = (float) (int) ( A > 0 ? (A + 0.5f) : (A - 0.5f));
2077 // RoundedA = abs(A) > 0x1.0p23 ? A : RoundedA;
2078 // return abs(A) < 0.5 ? (float)(int)A : RoundedA;
2079 // }
LowerFROUND32(SDValue Op,SelectionDAG & DAG) const2080 SDValue NVPTXTargetLowering::LowerFROUND32(SDValue Op,
2081 SelectionDAG &DAG) const {
2082 SDLoc SL(Op);
2083 SDValue A = Op.getOperand(0);
2084 EVT VT = Op.getValueType();
2085
2086 SDValue AbsA = DAG.getNode(ISD::FABS, SL, VT, A);
2087
2088 // RoundedA = (float) (int) ( A > 0 ? (A + 0.5f) : (A - 0.5f))
2089 SDValue Bitcast = DAG.getNode(ISD::BITCAST, SL, MVT::i32, A);
2090 const int SignBitMask = 0x80000000;
2091 SDValue Sign = DAG.getNode(ISD::AND, SL, MVT::i32, Bitcast,
2092 DAG.getConstant(SignBitMask, SL, MVT::i32));
2093 const int PointFiveInBits = 0x3F000000;
2094 SDValue PointFiveWithSignRaw =
2095 DAG.getNode(ISD::OR, SL, MVT::i32, Sign,
2096 DAG.getConstant(PointFiveInBits, SL, MVT::i32));
2097 SDValue PointFiveWithSign =
2098 DAG.getNode(ISD::BITCAST, SL, VT, PointFiveWithSignRaw);
2099 SDValue AdjustedA = DAG.getNode(ISD::FADD, SL, VT, A, PointFiveWithSign);
2100 SDValue RoundedA = DAG.getNode(ISD::FTRUNC, SL, VT, AdjustedA);
2101
2102 // RoundedA = abs(A) > 0x1.0p23 ? A : RoundedA;
2103 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2104 SDValue IsLarge =
2105 DAG.getSetCC(SL, SetCCVT, AbsA, DAG.getConstantFP(pow(2.0, 23.0), SL, VT),
2106 ISD::SETOGT);
2107 RoundedA = DAG.getNode(ISD::SELECT, SL, VT, IsLarge, A, RoundedA);
2108
2109 // return abs(A) < 0.5 ? (float)(int)A : RoundedA;
2110 SDValue IsSmall =DAG.getSetCC(SL, SetCCVT, AbsA,
2111 DAG.getConstantFP(0.5, SL, VT), ISD::SETOLT);
2112 SDValue RoundedAForSmallA = DAG.getNode(ISD::FTRUNC, SL, VT, A);
2113 return DAG.getNode(ISD::SELECT, SL, VT, IsSmall, RoundedAForSmallA, RoundedA);
2114 }
2115
2116 // The implementation of round(double) is similar to that of round(float) in
2117 // that they both separate the value range into three regions and use a method
2118 // specific to the region to round the values. However, round(double) first
2119 // calculates the round of the absolute value and then adds the sign back while
2120 // round(float) directly rounds the value with sign.
LowerFROUND64(SDValue Op,SelectionDAG & DAG) const2121 SDValue NVPTXTargetLowering::LowerFROUND64(SDValue Op,
2122 SelectionDAG &DAG) const {
2123 SDLoc SL(Op);
2124 SDValue A = Op.getOperand(0);
2125 EVT VT = Op.getValueType();
2126
2127 SDValue AbsA = DAG.getNode(ISD::FABS, SL, VT, A);
2128
2129 // double RoundedA = (double) (int) (abs(A) + 0.5f);
2130 SDValue AdjustedA = DAG.getNode(ISD::FADD, SL, VT, AbsA,
2131 DAG.getConstantFP(0.5, SL, VT));
2132 SDValue RoundedA = DAG.getNode(ISD::FTRUNC, SL, VT, AdjustedA);
2133
2134 // RoundedA = abs(A) < 0.5 ? (double)0 : RoundedA;
2135 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2136 SDValue IsSmall =DAG.getSetCC(SL, SetCCVT, AbsA,
2137 DAG.getConstantFP(0.5, SL, VT), ISD::SETOLT);
2138 RoundedA = DAG.getNode(ISD::SELECT, SL, VT, IsSmall,
2139 DAG.getConstantFP(0, SL, VT),
2140 RoundedA);
2141
2142 // Add sign to rounded_A
2143 RoundedA = DAG.getNode(ISD::FCOPYSIGN, SL, VT, RoundedA, A);
2144 DAG.getNode(ISD::FTRUNC, SL, VT, A);
2145
2146 // RoundedA = abs(A) > 0x1.0p52 ? A : RoundedA;
2147 SDValue IsLarge =
2148 DAG.getSetCC(SL, SetCCVT, AbsA, DAG.getConstantFP(pow(2.0, 52.0), SL, VT),
2149 ISD::SETOGT);
2150 return DAG.getNode(ISD::SELECT, SL, VT, IsLarge, A, RoundedA);
2151 }
2152
2153
2154
2155 SDValue
LowerOperation(SDValue Op,SelectionDAG & DAG) const2156 NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
2157 switch (Op.getOpcode()) {
2158 case ISD::RETURNADDR:
2159 return SDValue();
2160 case ISD::FRAMEADDR:
2161 return SDValue();
2162 case ISD::GlobalAddress:
2163 return LowerGlobalAddress(Op, DAG);
2164 case ISD::INTRINSIC_W_CHAIN:
2165 return Op;
2166 case ISD::BUILD_VECTOR:
2167 return LowerBUILD_VECTOR(Op, DAG);
2168 case ISD::EXTRACT_SUBVECTOR:
2169 return Op;
2170 case ISD::EXTRACT_VECTOR_ELT:
2171 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
2172 case ISD::CONCAT_VECTORS:
2173 return LowerCONCAT_VECTORS(Op, DAG);
2174 case ISD::STORE:
2175 return LowerSTORE(Op, DAG);
2176 case ISD::LOAD:
2177 return LowerLOAD(Op, DAG);
2178 case ISD::SHL_PARTS:
2179 return LowerShiftLeftParts(Op, DAG);
2180 case ISD::SRA_PARTS:
2181 case ISD::SRL_PARTS:
2182 return LowerShiftRightParts(Op, DAG);
2183 case ISD::SELECT:
2184 return LowerSelect(Op, DAG);
2185 case ISD::FROUND:
2186 return LowerFROUND(Op, DAG);
2187 default:
2188 llvm_unreachable("Custom lowering not defined for operation");
2189 }
2190 }
2191
LowerSelect(SDValue Op,SelectionDAG & DAG) const2192 SDValue NVPTXTargetLowering::LowerSelect(SDValue Op, SelectionDAG &DAG) const {
2193 SDValue Op0 = Op->getOperand(0);
2194 SDValue Op1 = Op->getOperand(1);
2195 SDValue Op2 = Op->getOperand(2);
2196 SDLoc DL(Op.getNode());
2197
2198 assert(Op.getValueType() == MVT::i1 && "Custom lowering enabled only for i1");
2199
2200 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
2201 Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
2202 SDValue Select = DAG.getNode(ISD::SELECT, DL, MVT::i32, Op0, Op1, Op2);
2203 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Select);
2204
2205 return Trunc;
2206 }
2207
LowerLOAD(SDValue Op,SelectionDAG & DAG) const2208 SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
2209 if (Op.getValueType() == MVT::i1)
2210 return LowerLOADi1(Op, DAG);
2211
2212 // v2f16 is legal, so we can't rely on legalizer to handle unaligned
2213 // loads and have to handle it here.
2214 if (Op.getValueType() == MVT::v2f16) {
2215 LoadSDNode *Load = cast<LoadSDNode>(Op);
2216 EVT MemVT = Load->getMemoryVT();
2217 if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2218 MemVT, *Load->getMemOperand())) {
2219 SDValue Ops[2];
2220 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
2221 return DAG.getMergeValues(Ops, SDLoc(Op));
2222 }
2223 }
2224
2225 return SDValue();
2226 }
2227
2228 // v = ld i1* addr
2229 // =>
2230 // v1 = ld i8* addr (-> i16)
2231 // v = trunc i16 to i1
LowerLOADi1(SDValue Op,SelectionDAG & DAG) const2232 SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
2233 SDNode *Node = Op.getNode();
2234 LoadSDNode *LD = cast<LoadSDNode>(Node);
2235 SDLoc dl(Node);
2236 assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
2237 assert(Node->getValueType(0) == MVT::i1 &&
2238 "Custom lowering for i1 load only");
2239 SDValue newLD = DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),
2240 LD->getPointerInfo(), LD->getAlignment(),
2241 LD->getMemOperand()->getFlags());
2242 SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
2243 // The legalizer (the caller) is expecting two values from the legalized
2244 // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
2245 // in LegalizeDAG.cpp which also uses MergeValues.
2246 SDValue Ops[] = { result, LD->getChain() };
2247 return DAG.getMergeValues(Ops, dl);
2248 }
2249
LowerSTORE(SDValue Op,SelectionDAG & DAG) const2250 SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
2251 StoreSDNode *Store = cast<StoreSDNode>(Op);
2252 EVT VT = Store->getMemoryVT();
2253
2254 if (VT == MVT::i1)
2255 return LowerSTOREi1(Op, DAG);
2256
2257 // v2f16 is legal, so we can't rely on legalizer to handle unaligned
2258 // stores and have to handle it here.
2259 if (VT == MVT::v2f16 &&
2260 !allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2261 VT, *Store->getMemOperand()))
2262 return expandUnalignedStore(Store, DAG);
2263
2264 if (VT.isVector())
2265 return LowerSTOREVector(Op, DAG);
2266
2267 return SDValue();
2268 }
2269
2270 SDValue
LowerSTOREVector(SDValue Op,SelectionDAG & DAG) const2271 NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
2272 SDNode *N = Op.getNode();
2273 SDValue Val = N->getOperand(1);
2274 SDLoc DL(N);
2275 EVT ValVT = Val.getValueType();
2276
2277 if (ValVT.isVector()) {
2278 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
2279 // legal. We can (and should) split that into 2 stores of <2 x double> here
2280 // but I'm leaving that as a TODO for now.
2281 if (!ValVT.isSimple())
2282 return SDValue();
2283 switch (ValVT.getSimpleVT().SimpleTy) {
2284 default:
2285 return SDValue();
2286 case MVT::v2i8:
2287 case MVT::v2i16:
2288 case MVT::v2i32:
2289 case MVT::v2i64:
2290 case MVT::v2f16:
2291 case MVT::v2f32:
2292 case MVT::v2f64:
2293 case MVT::v4i8:
2294 case MVT::v4i16:
2295 case MVT::v4i32:
2296 case MVT::v4f16:
2297 case MVT::v4f32:
2298 case MVT::v8f16: // <4 x f16x2>
2299 // This is a "native" vector type
2300 break;
2301 }
2302
2303 MemSDNode *MemSD = cast<MemSDNode>(N);
2304 const DataLayout &TD = DAG.getDataLayout();
2305
2306 Align Alignment = MemSD->getAlign();
2307 Align PrefAlign =
2308 TD.getPrefTypeAlign(ValVT.getTypeForEVT(*DAG.getContext()));
2309 if (Alignment < PrefAlign) {
2310 // This store is not sufficiently aligned, so bail out and let this vector
2311 // store be scalarized. Note that we may still be able to emit smaller
2312 // vector stores. For example, if we are storing a <4 x float> with an
2313 // alignment of 8, this check will fail but the legalizer will try again
2314 // with 2 x <2 x float>, which will succeed with an alignment of 8.
2315 return SDValue();
2316 }
2317
2318 unsigned Opcode = 0;
2319 EVT EltVT = ValVT.getVectorElementType();
2320 unsigned NumElts = ValVT.getVectorNumElements();
2321
2322 // Since StoreV2 is a target node, we cannot rely on DAG type legalization.
2323 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
2324 // stored type to i16 and propagate the "real" type as the memory type.
2325 bool NeedExt = false;
2326 if (EltVT.getSizeInBits() < 16)
2327 NeedExt = true;
2328
2329 bool StoreF16x2 = false;
2330 switch (NumElts) {
2331 default:
2332 return SDValue();
2333 case 2:
2334 Opcode = NVPTXISD::StoreV2;
2335 break;
2336 case 4:
2337 Opcode = NVPTXISD::StoreV4;
2338 break;
2339 case 8:
2340 // v8f16 is a special case. PTX doesn't have st.v8.f16
2341 // instruction. Instead, we split the vector into v2f16 chunks and
2342 // store them with st.v4.b32.
2343 assert(EltVT == MVT::f16 && "Wrong type for the vector.");
2344 Opcode = NVPTXISD::StoreV4;
2345 StoreF16x2 = true;
2346 break;
2347 }
2348
2349 SmallVector<SDValue, 8> Ops;
2350
2351 // First is the chain
2352 Ops.push_back(N->getOperand(0));
2353
2354 if (StoreF16x2) {
2355 // Combine f16,f16 -> v2f16
2356 NumElts /= 2;
2357 for (unsigned i = 0; i < NumElts; ++i) {
2358 SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Val,
2359 DAG.getIntPtrConstant(i * 2, DL));
2360 SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Val,
2361 DAG.getIntPtrConstant(i * 2 + 1, DL));
2362 SDValue V2 = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2f16, E0, E1);
2363 Ops.push_back(V2);
2364 }
2365 } else {
2366 // Then the split values
2367 for (unsigned i = 0; i < NumElts; ++i) {
2368 SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
2369 DAG.getIntPtrConstant(i, DL));
2370 if (NeedExt)
2371 ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
2372 Ops.push_back(ExtVal);
2373 }
2374 }
2375
2376 // Then any remaining arguments
2377 Ops.append(N->op_begin() + 2, N->op_end());
2378
2379 SDValue NewSt =
2380 DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,
2381 MemSD->getMemoryVT(), MemSD->getMemOperand());
2382
2383 // return DCI.CombineTo(N, NewSt, true);
2384 return NewSt;
2385 }
2386
2387 return SDValue();
2388 }
2389
2390 // st i1 v, addr
2391 // =>
2392 // v1 = zxt v to i16
2393 // st.u8 i16, addr
LowerSTOREi1(SDValue Op,SelectionDAG & DAG) const2394 SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
2395 SDNode *Node = Op.getNode();
2396 SDLoc dl(Node);
2397 StoreSDNode *ST = cast<StoreSDNode>(Node);
2398 SDValue Tmp1 = ST->getChain();
2399 SDValue Tmp2 = ST->getBasePtr();
2400 SDValue Tmp3 = ST->getValue();
2401 assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
2402 Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
2403 SDValue Result =
2404 DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), MVT::i8,
2405 ST->getAlignment(), ST->getMemOperand()->getFlags());
2406 return Result;
2407 }
2408
2409 SDValue
getParamSymbol(SelectionDAG & DAG,int idx,EVT v) const2410 NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
2411 std::string ParamSym;
2412 raw_string_ostream ParamStr(ParamSym);
2413
2414 ParamStr << DAG.getMachineFunction().getName() << "_param_" << idx;
2415 ParamStr.flush();
2416
2417 std::string *SavedStr =
2418 nvTM->getManagedStrPool()->getManagedString(ParamSym.c_str());
2419 return DAG.getTargetExternalSymbol(SavedStr->c_str(), v);
2420 }
2421
2422 // Check to see if the kernel argument is image*_t or sampler_t
2423
isImageOrSamplerVal(const Value * arg,const Module * context)2424 static bool isImageOrSamplerVal(const Value *arg, const Module *context) {
2425 static const char *const specialTypes[] = { "struct._image2d_t",
2426 "struct._image3d_t",
2427 "struct._sampler_t" };
2428
2429 Type *Ty = arg->getType();
2430 auto *PTy = dyn_cast<PointerType>(Ty);
2431
2432 if (!PTy)
2433 return false;
2434
2435 if (!context)
2436 return false;
2437
2438 auto *STy = dyn_cast<StructType>(PTy->getElementType());
2439 if (!STy || STy->isLiteral())
2440 return false;
2441
2442 return llvm::is_contained(specialTypes, STy->getName());
2443 }
2444
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const2445 SDValue NVPTXTargetLowering::LowerFormalArguments(
2446 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
2447 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
2448 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
2449 MachineFunction &MF = DAG.getMachineFunction();
2450 const DataLayout &DL = DAG.getDataLayout();
2451 auto PtrVT = getPointerTy(DAG.getDataLayout());
2452
2453 const Function *F = &MF.getFunction();
2454 const AttributeList &PAL = F->getAttributes();
2455 const TargetLowering *TLI = STI.getTargetLowering();
2456
2457 SDValue Root = DAG.getRoot();
2458 std::vector<SDValue> OutChains;
2459
2460 bool isABI = (STI.getSmVersion() >= 20);
2461 assert(isABI && "Non-ABI compilation is not supported");
2462 if (!isABI)
2463 return Chain;
2464
2465 std::vector<Type *> argTypes;
2466 std::vector<const Argument *> theArgs;
2467 for (const Argument &I : F->args()) {
2468 theArgs.push_back(&I);
2469 argTypes.push_back(I.getType());
2470 }
2471 // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
2472 // Ins.size() will be larger
2473 // * if there is an aggregate argument with multiple fields (each field
2474 // showing up separately in Ins)
2475 // * if there is a vector argument with more than typical vector-length
2476 // elements (generally if more than 4) where each vector element is
2477 // individually present in Ins.
2478 // So a different index should be used for indexing into Ins.
2479 // See similar issue in LowerCall.
2480 unsigned InsIdx = 0;
2481
2482 int idx = 0;
2483 for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) {
2484 Type *Ty = argTypes[i];
2485
2486 // If the kernel argument is image*_t or sampler_t, convert it to
2487 // a i32 constant holding the parameter position. This can later
2488 // matched in the AsmPrinter to output the correct mangled name.
2489 if (isImageOrSamplerVal(
2490 theArgs[i],
2491 (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()
2492 : nullptr))) {
2493 assert(isKernelFunction(*F) &&
2494 "Only kernels can have image/sampler params");
2495 InVals.push_back(DAG.getConstant(i + 1, dl, MVT::i32));
2496 continue;
2497 }
2498
2499 if (theArgs[i]->use_empty()) {
2500 // argument is dead
2501 if (Ty->isAggregateType() || Ty->isIntegerTy(128)) {
2502 SmallVector<EVT, 16> vtparts;
2503
2504 ComputePTXValueVTs(*this, DAG.getDataLayout(), Ty, vtparts);
2505 assert(vtparts.size() > 0 && "empty aggregate type not expected");
2506 for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
2507 ++parti) {
2508 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
2509 ++InsIdx;
2510 }
2511 if (vtparts.size() > 0)
2512 --InsIdx;
2513 continue;
2514 }
2515 if (Ty->isVectorTy()) {
2516 EVT ObjectVT = getValueType(DL, Ty);
2517 unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT);
2518 for (unsigned parti = 0; parti < NumRegs; ++parti) {
2519 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
2520 ++InsIdx;
2521 }
2522 if (NumRegs > 0)
2523 --InsIdx;
2524 continue;
2525 }
2526 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
2527 continue;
2528 }
2529
2530 // In the following cases, assign a node order of "idx+1"
2531 // to newly created nodes. The SDNodes for params have to
2532 // appear in the same order as their order of appearance
2533 // in the original function. "idx+1" holds that order.
2534 if (!PAL.hasParamAttribute(i, Attribute::ByVal)) {
2535 bool aggregateIsPacked = false;
2536 if (StructType *STy = dyn_cast<StructType>(Ty))
2537 aggregateIsPacked = STy->isPacked();
2538
2539 SmallVector<EVT, 16> VTs;
2540 SmallVector<uint64_t, 16> Offsets;
2541 ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets, 0);
2542 assert(VTs.size() > 0 && "Unexpected empty type.");
2543 auto VectorInfo =
2544 VectorizePTXValueVTs(VTs, Offsets, DL.getABITypeAlign(Ty));
2545
2546 SDValue Arg = getParamSymbol(DAG, idx, PtrVT);
2547 int VecIdx = -1; // Index of the first element of the current vector.
2548 for (unsigned parti = 0, parte = VTs.size(); parti != parte; ++parti) {
2549 if (VectorInfo[parti] & PVF_FIRST) {
2550 assert(VecIdx == -1 && "Orphaned vector.");
2551 VecIdx = parti;
2552 }
2553
2554 // That's the last element of this store op.
2555 if (VectorInfo[parti] & PVF_LAST) {
2556 unsigned NumElts = parti - VecIdx + 1;
2557 EVT EltVT = VTs[parti];
2558 // i1 is loaded/stored as i8.
2559 EVT LoadVT = EltVT;
2560 if (EltVT == MVT::i1)
2561 LoadVT = MVT::i8;
2562 else if (EltVT == MVT::v2f16)
2563 // getLoad needs a vector type, but it can't handle
2564 // vectors which contain v2f16 elements. So we must load
2565 // using i32 here and then bitcast back.
2566 LoadVT = MVT::i32;
2567
2568 EVT VecVT = EVT::getVectorVT(F->getContext(), LoadVT, NumElts);
2569 SDValue VecAddr =
2570 DAG.getNode(ISD::ADD, dl, PtrVT, Arg,
2571 DAG.getConstant(Offsets[VecIdx], dl, PtrVT));
2572 Value *srcValue = Constant::getNullValue(PointerType::get(
2573 EltVT.getTypeForEVT(F->getContext()), ADDRESS_SPACE_PARAM));
2574 SDValue P =
2575 DAG.getLoad(VecVT, dl, Root, VecAddr,
2576 MachinePointerInfo(srcValue), aggregateIsPacked,
2577 MachineMemOperand::MODereferenceable |
2578 MachineMemOperand::MOInvariant);
2579 if (P.getNode())
2580 P.getNode()->setIROrder(idx + 1);
2581 for (unsigned j = 0; j < NumElts; ++j) {
2582 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, LoadVT, P,
2583 DAG.getIntPtrConstant(j, dl));
2584 // We've loaded i1 as an i8 and now must truncate it back to i1
2585 if (EltVT == MVT::i1)
2586 Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Elt);
2587 // v2f16 was loaded as an i32. Now we must bitcast it back.
2588 else if (EltVT == MVT::v2f16)
2589 Elt = DAG.getNode(ISD::BITCAST, dl, MVT::v2f16, Elt);
2590 // Extend the element if necessary (e.g. an i8 is loaded
2591 // into an i16 register)
2592 if (Ins[InsIdx].VT.isInteger() &&
2593 Ins[InsIdx].VT.getFixedSizeInBits() >
2594 LoadVT.getFixedSizeInBits()) {
2595 unsigned Extend = Ins[InsIdx].Flags.isSExt() ? ISD::SIGN_EXTEND
2596 : ISD::ZERO_EXTEND;
2597 Elt = DAG.getNode(Extend, dl, Ins[InsIdx].VT, Elt);
2598 }
2599 InVals.push_back(Elt);
2600 }
2601
2602 // Reset vector tracking state.
2603 VecIdx = -1;
2604 }
2605 ++InsIdx;
2606 }
2607 if (VTs.size() > 0)
2608 --InsIdx;
2609 continue;
2610 }
2611
2612 // Param has ByVal attribute
2613 // Return MoveParam(param symbol).
2614 // Ideally, the param symbol can be returned directly,
2615 // but when SDNode builder decides to use it in a CopyToReg(),
2616 // machine instruction fails because TargetExternalSymbol
2617 // (not lowered) is target dependent, and CopyToReg assumes
2618 // the source is lowered.
2619 EVT ObjectVT = getValueType(DL, Ty);
2620 assert(ObjectVT == Ins[InsIdx].VT &&
2621 "Ins type did not match function type");
2622 SDValue Arg = getParamSymbol(DAG, idx, PtrVT);
2623 SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
2624 if (p.getNode())
2625 p.getNode()->setIROrder(idx + 1);
2626 InVals.push_back(p);
2627 }
2628
2629 // Clang will check explicit VarArg and issue error if any. However, Clang
2630 // will let code with
2631 // implicit var arg like f() pass. See bug 617733.
2632 // We treat this case as if the arg list is empty.
2633 // if (F.isVarArg()) {
2634 // assert(0 && "VarArg not supported yet!");
2635 //}
2636
2637 if (!OutChains.empty())
2638 DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains));
2639
2640 return Chain;
2641 }
2642
2643 SDValue
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SDLoc & dl,SelectionDAG & DAG) const2644 NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2645 bool isVarArg,
2646 const SmallVectorImpl<ISD::OutputArg> &Outs,
2647 const SmallVectorImpl<SDValue> &OutVals,
2648 const SDLoc &dl, SelectionDAG &DAG) const {
2649 MachineFunction &MF = DAG.getMachineFunction();
2650 Type *RetTy = MF.getFunction().getReturnType();
2651
2652 bool isABI = (STI.getSmVersion() >= 20);
2653 assert(isABI && "Non-ABI compilation is not supported");
2654 if (!isABI)
2655 return Chain;
2656
2657 const DataLayout DL = DAG.getDataLayout();
2658 SmallVector<EVT, 16> VTs;
2659 SmallVector<uint64_t, 16> Offsets;
2660 ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets);
2661 assert(VTs.size() == OutVals.size() && "Bad return value decomposition");
2662
2663 auto VectorInfo = VectorizePTXValueVTs(
2664 VTs, Offsets, RetTy->isSized() ? DL.getABITypeAlign(RetTy) : Align(1));
2665
2666 // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
2667 // 32-bits are sign extended or zero extended, depending on whether
2668 // they are signed or unsigned types.
2669 bool ExtendIntegerRetVal =
2670 RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
2671
2672 SmallVector<SDValue, 6> StoreOperands;
2673 for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
2674 // New load/store. Record chain and offset operands.
2675 if (VectorInfo[i] & PVF_FIRST) {
2676 assert(StoreOperands.empty() && "Orphaned operand list.");
2677 StoreOperands.push_back(Chain);
2678 StoreOperands.push_back(DAG.getConstant(Offsets[i], dl, MVT::i32));
2679 }
2680
2681 SDValue RetVal = OutVals[i];
2682 if (ExtendIntegerRetVal) {
2683 RetVal = DAG.getNode(Outs[i].Flags.isSExt() ? ISD::SIGN_EXTEND
2684 : ISD::ZERO_EXTEND,
2685 dl, MVT::i32, RetVal);
2686 } else if (RetVal.getValueSizeInBits() < 16) {
2687 // Use 16-bit registers for small load-stores as it's the
2688 // smallest general purpose register size supported by NVPTX.
2689 RetVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, RetVal);
2690 }
2691
2692 // Record the value to return.
2693 StoreOperands.push_back(RetVal);
2694
2695 // That's the last element of this store op.
2696 if (VectorInfo[i] & PVF_LAST) {
2697 NVPTXISD::NodeType Op;
2698 unsigned NumElts = StoreOperands.size() - 2;
2699 switch (NumElts) {
2700 case 1:
2701 Op = NVPTXISD::StoreRetval;
2702 break;
2703 case 2:
2704 Op = NVPTXISD::StoreRetvalV2;
2705 break;
2706 case 4:
2707 Op = NVPTXISD::StoreRetvalV4;
2708 break;
2709 default:
2710 llvm_unreachable("Invalid vector info.");
2711 }
2712
2713 // Adjust type of load/store op if we've extended the scalar
2714 // return value.
2715 EVT TheStoreType = ExtendIntegerRetVal ? MVT::i32 : VTs[i];
2716 Chain = DAG.getMemIntrinsicNode(
2717 Op, dl, DAG.getVTList(MVT::Other), StoreOperands, TheStoreType,
2718 MachinePointerInfo(), Align(1), MachineMemOperand::MOStore);
2719 // Cleanup vector state.
2720 StoreOperands.clear();
2721 }
2722 }
2723
2724 return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
2725 }
2726
LowerAsmOperandForConstraint(SDValue Op,std::string & Constraint,std::vector<SDValue> & Ops,SelectionDAG & DAG) const2727 void NVPTXTargetLowering::LowerAsmOperandForConstraint(
2728 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
2729 SelectionDAG &DAG) const {
2730 if (Constraint.length() > 1)
2731 return;
2732 else
2733 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
2734 }
2735
getOpcForTextureInstr(unsigned Intrinsic)2736 static unsigned getOpcForTextureInstr(unsigned Intrinsic) {
2737 switch (Intrinsic) {
2738 default:
2739 return 0;
2740
2741 case Intrinsic::nvvm_tex_1d_v4f32_s32:
2742 return NVPTXISD::Tex1DFloatS32;
2743 case Intrinsic::nvvm_tex_1d_v4f32_f32:
2744 return NVPTXISD::Tex1DFloatFloat;
2745 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
2746 return NVPTXISD::Tex1DFloatFloatLevel;
2747 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
2748 return NVPTXISD::Tex1DFloatFloatGrad;
2749 case Intrinsic::nvvm_tex_1d_v4s32_s32:
2750 return NVPTXISD::Tex1DS32S32;
2751 case Intrinsic::nvvm_tex_1d_v4s32_f32:
2752 return NVPTXISD::Tex1DS32Float;
2753 case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
2754 return NVPTXISD::Tex1DS32FloatLevel;
2755 case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
2756 return NVPTXISD::Tex1DS32FloatGrad;
2757 case Intrinsic::nvvm_tex_1d_v4u32_s32:
2758 return NVPTXISD::Tex1DU32S32;
2759 case Intrinsic::nvvm_tex_1d_v4u32_f32:
2760 return NVPTXISD::Tex1DU32Float;
2761 case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
2762 return NVPTXISD::Tex1DU32FloatLevel;
2763 case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
2764 return NVPTXISD::Tex1DU32FloatGrad;
2765
2766 case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
2767 return NVPTXISD::Tex1DArrayFloatS32;
2768 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
2769 return NVPTXISD::Tex1DArrayFloatFloat;
2770 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
2771 return NVPTXISD::Tex1DArrayFloatFloatLevel;
2772 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
2773 return NVPTXISD::Tex1DArrayFloatFloatGrad;
2774 case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
2775 return NVPTXISD::Tex1DArrayS32S32;
2776 case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
2777 return NVPTXISD::Tex1DArrayS32Float;
2778 case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
2779 return NVPTXISD::Tex1DArrayS32FloatLevel;
2780 case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
2781 return NVPTXISD::Tex1DArrayS32FloatGrad;
2782 case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
2783 return NVPTXISD::Tex1DArrayU32S32;
2784 case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
2785 return NVPTXISD::Tex1DArrayU32Float;
2786 case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
2787 return NVPTXISD::Tex1DArrayU32FloatLevel;
2788 case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
2789 return NVPTXISD::Tex1DArrayU32FloatGrad;
2790
2791 case Intrinsic::nvvm_tex_2d_v4f32_s32:
2792 return NVPTXISD::Tex2DFloatS32;
2793 case Intrinsic::nvvm_tex_2d_v4f32_f32:
2794 return NVPTXISD::Tex2DFloatFloat;
2795 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
2796 return NVPTXISD::Tex2DFloatFloatLevel;
2797 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
2798 return NVPTXISD::Tex2DFloatFloatGrad;
2799 case Intrinsic::nvvm_tex_2d_v4s32_s32:
2800 return NVPTXISD::Tex2DS32S32;
2801 case Intrinsic::nvvm_tex_2d_v4s32_f32:
2802 return NVPTXISD::Tex2DS32Float;
2803 case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
2804 return NVPTXISD::Tex2DS32FloatLevel;
2805 case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
2806 return NVPTXISD::Tex2DS32FloatGrad;
2807 case Intrinsic::nvvm_tex_2d_v4u32_s32:
2808 return NVPTXISD::Tex2DU32S32;
2809 case Intrinsic::nvvm_tex_2d_v4u32_f32:
2810 return NVPTXISD::Tex2DU32Float;
2811 case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
2812 return NVPTXISD::Tex2DU32FloatLevel;
2813 case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
2814 return NVPTXISD::Tex2DU32FloatGrad;
2815
2816 case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
2817 return NVPTXISD::Tex2DArrayFloatS32;
2818 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
2819 return NVPTXISD::Tex2DArrayFloatFloat;
2820 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
2821 return NVPTXISD::Tex2DArrayFloatFloatLevel;
2822 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
2823 return NVPTXISD::Tex2DArrayFloatFloatGrad;
2824 case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
2825 return NVPTXISD::Tex2DArrayS32S32;
2826 case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
2827 return NVPTXISD::Tex2DArrayS32Float;
2828 case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
2829 return NVPTXISD::Tex2DArrayS32FloatLevel;
2830 case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
2831 return NVPTXISD::Tex2DArrayS32FloatGrad;
2832 case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
2833 return NVPTXISD::Tex2DArrayU32S32;
2834 case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
2835 return NVPTXISD::Tex2DArrayU32Float;
2836 case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
2837 return NVPTXISD::Tex2DArrayU32FloatLevel;
2838 case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
2839 return NVPTXISD::Tex2DArrayU32FloatGrad;
2840
2841 case Intrinsic::nvvm_tex_3d_v4f32_s32:
2842 return NVPTXISD::Tex3DFloatS32;
2843 case Intrinsic::nvvm_tex_3d_v4f32_f32:
2844 return NVPTXISD::Tex3DFloatFloat;
2845 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
2846 return NVPTXISD::Tex3DFloatFloatLevel;
2847 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
2848 return NVPTXISD::Tex3DFloatFloatGrad;
2849 case Intrinsic::nvvm_tex_3d_v4s32_s32:
2850 return NVPTXISD::Tex3DS32S32;
2851 case Intrinsic::nvvm_tex_3d_v4s32_f32:
2852 return NVPTXISD::Tex3DS32Float;
2853 case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
2854 return NVPTXISD::Tex3DS32FloatLevel;
2855 case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
2856 return NVPTXISD::Tex3DS32FloatGrad;
2857 case Intrinsic::nvvm_tex_3d_v4u32_s32:
2858 return NVPTXISD::Tex3DU32S32;
2859 case Intrinsic::nvvm_tex_3d_v4u32_f32:
2860 return NVPTXISD::Tex3DU32Float;
2861 case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
2862 return NVPTXISD::Tex3DU32FloatLevel;
2863 case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
2864 return NVPTXISD::Tex3DU32FloatGrad;
2865
2866 case Intrinsic::nvvm_tex_cube_v4f32_f32:
2867 return NVPTXISD::TexCubeFloatFloat;
2868 case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
2869 return NVPTXISD::TexCubeFloatFloatLevel;
2870 case Intrinsic::nvvm_tex_cube_v4s32_f32:
2871 return NVPTXISD::TexCubeS32Float;
2872 case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
2873 return NVPTXISD::TexCubeS32FloatLevel;
2874 case Intrinsic::nvvm_tex_cube_v4u32_f32:
2875 return NVPTXISD::TexCubeU32Float;
2876 case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
2877 return NVPTXISD::TexCubeU32FloatLevel;
2878
2879 case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
2880 return NVPTXISD::TexCubeArrayFloatFloat;
2881 case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
2882 return NVPTXISD::TexCubeArrayFloatFloatLevel;
2883 case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
2884 return NVPTXISD::TexCubeArrayS32Float;
2885 case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
2886 return NVPTXISD::TexCubeArrayS32FloatLevel;
2887 case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
2888 return NVPTXISD::TexCubeArrayU32Float;
2889 case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
2890 return NVPTXISD::TexCubeArrayU32FloatLevel;
2891
2892 case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
2893 return NVPTXISD::Tld4R2DFloatFloat;
2894 case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
2895 return NVPTXISD::Tld4G2DFloatFloat;
2896 case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
2897 return NVPTXISD::Tld4B2DFloatFloat;
2898 case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
2899 return NVPTXISD::Tld4A2DFloatFloat;
2900 case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
2901 return NVPTXISD::Tld4R2DS64Float;
2902 case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
2903 return NVPTXISD::Tld4G2DS64Float;
2904 case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
2905 return NVPTXISD::Tld4B2DS64Float;
2906 case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
2907 return NVPTXISD::Tld4A2DS64Float;
2908 case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
2909 return NVPTXISD::Tld4R2DU64Float;
2910 case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
2911 return NVPTXISD::Tld4G2DU64Float;
2912 case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
2913 return NVPTXISD::Tld4B2DU64Float;
2914 case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
2915 return NVPTXISD::Tld4A2DU64Float;
2916
2917 case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
2918 return NVPTXISD::TexUnified1DFloatS32;
2919 case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
2920 return NVPTXISD::TexUnified1DFloatFloat;
2921 case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
2922 return NVPTXISD::TexUnified1DFloatFloatLevel;
2923 case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
2924 return NVPTXISD::TexUnified1DFloatFloatGrad;
2925 case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
2926 return NVPTXISD::TexUnified1DS32S32;
2927 case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
2928 return NVPTXISD::TexUnified1DS32Float;
2929 case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
2930 return NVPTXISD::TexUnified1DS32FloatLevel;
2931 case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
2932 return NVPTXISD::TexUnified1DS32FloatGrad;
2933 case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
2934 return NVPTXISD::TexUnified1DU32S32;
2935 case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
2936 return NVPTXISD::TexUnified1DU32Float;
2937 case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
2938 return NVPTXISD::TexUnified1DU32FloatLevel;
2939 case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
2940 return NVPTXISD::TexUnified1DU32FloatGrad;
2941
2942 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
2943 return NVPTXISD::TexUnified1DArrayFloatS32;
2944 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
2945 return NVPTXISD::TexUnified1DArrayFloatFloat;
2946 case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
2947 return NVPTXISD::TexUnified1DArrayFloatFloatLevel;
2948 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
2949 return NVPTXISD::TexUnified1DArrayFloatFloatGrad;
2950 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
2951 return NVPTXISD::TexUnified1DArrayS32S32;
2952 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
2953 return NVPTXISD::TexUnified1DArrayS32Float;
2954 case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
2955 return NVPTXISD::TexUnified1DArrayS32FloatLevel;
2956 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
2957 return NVPTXISD::TexUnified1DArrayS32FloatGrad;
2958 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
2959 return NVPTXISD::TexUnified1DArrayU32S32;
2960 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
2961 return NVPTXISD::TexUnified1DArrayU32Float;
2962 case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
2963 return NVPTXISD::TexUnified1DArrayU32FloatLevel;
2964 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
2965 return NVPTXISD::TexUnified1DArrayU32FloatGrad;
2966
2967 case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
2968 return NVPTXISD::TexUnified2DFloatS32;
2969 case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
2970 return NVPTXISD::TexUnified2DFloatFloat;
2971 case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
2972 return NVPTXISD::TexUnified2DFloatFloatLevel;
2973 case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
2974 return NVPTXISD::TexUnified2DFloatFloatGrad;
2975 case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
2976 return NVPTXISD::TexUnified2DS32S32;
2977 case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
2978 return NVPTXISD::TexUnified2DS32Float;
2979 case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
2980 return NVPTXISD::TexUnified2DS32FloatLevel;
2981 case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
2982 return NVPTXISD::TexUnified2DS32FloatGrad;
2983 case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
2984 return NVPTXISD::TexUnified2DU32S32;
2985 case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
2986 return NVPTXISD::TexUnified2DU32Float;
2987 case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
2988 return NVPTXISD::TexUnified2DU32FloatLevel;
2989 case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
2990 return NVPTXISD::TexUnified2DU32FloatGrad;
2991
2992 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
2993 return NVPTXISD::TexUnified2DArrayFloatS32;
2994 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
2995 return NVPTXISD::TexUnified2DArrayFloatFloat;
2996 case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
2997 return NVPTXISD::TexUnified2DArrayFloatFloatLevel;
2998 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
2999 return NVPTXISD::TexUnified2DArrayFloatFloatGrad;
3000 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
3001 return NVPTXISD::TexUnified2DArrayS32S32;
3002 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
3003 return NVPTXISD::TexUnified2DArrayS32Float;
3004 case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
3005 return NVPTXISD::TexUnified2DArrayS32FloatLevel;
3006 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
3007 return NVPTXISD::TexUnified2DArrayS32FloatGrad;
3008 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
3009 return NVPTXISD::TexUnified2DArrayU32S32;
3010 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
3011 return NVPTXISD::TexUnified2DArrayU32Float;
3012 case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
3013 return NVPTXISD::TexUnified2DArrayU32FloatLevel;
3014 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
3015 return NVPTXISD::TexUnified2DArrayU32FloatGrad;
3016
3017 case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
3018 return NVPTXISD::TexUnified3DFloatS32;
3019 case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
3020 return NVPTXISD::TexUnified3DFloatFloat;
3021 case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
3022 return NVPTXISD::TexUnified3DFloatFloatLevel;
3023 case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
3024 return NVPTXISD::TexUnified3DFloatFloatGrad;
3025 case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
3026 return NVPTXISD::TexUnified3DS32S32;
3027 case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
3028 return NVPTXISD::TexUnified3DS32Float;
3029 case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
3030 return NVPTXISD::TexUnified3DS32FloatLevel;
3031 case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
3032 return NVPTXISD::TexUnified3DS32FloatGrad;
3033 case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
3034 return NVPTXISD::TexUnified3DU32S32;
3035 case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
3036 return NVPTXISD::TexUnified3DU32Float;
3037 case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
3038 return NVPTXISD::TexUnified3DU32FloatLevel;
3039 case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
3040 return NVPTXISD::TexUnified3DU32FloatGrad;
3041
3042 case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
3043 return NVPTXISD::TexUnifiedCubeFloatFloat;
3044 case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
3045 return NVPTXISD::TexUnifiedCubeFloatFloatLevel;
3046 case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
3047 return NVPTXISD::TexUnifiedCubeS32Float;
3048 case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
3049 return NVPTXISD::TexUnifiedCubeS32FloatLevel;
3050 case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
3051 return NVPTXISD::TexUnifiedCubeU32Float;
3052 case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
3053 return NVPTXISD::TexUnifiedCubeU32FloatLevel;
3054
3055 case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
3056 return NVPTXISD::TexUnifiedCubeArrayFloatFloat;
3057 case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
3058 return NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel;
3059 case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
3060 return NVPTXISD::TexUnifiedCubeArrayS32Float;
3061 case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
3062 return NVPTXISD::TexUnifiedCubeArrayS32FloatLevel;
3063 case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
3064 return NVPTXISD::TexUnifiedCubeArrayU32Float;
3065 case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
3066 return NVPTXISD::TexUnifiedCubeArrayU32FloatLevel;
3067
3068 case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
3069 return NVPTXISD::Tld4UnifiedR2DFloatFloat;
3070 case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
3071 return NVPTXISD::Tld4UnifiedG2DFloatFloat;
3072 case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
3073 return NVPTXISD::Tld4UnifiedB2DFloatFloat;
3074 case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
3075 return NVPTXISD::Tld4UnifiedA2DFloatFloat;
3076 case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
3077 return NVPTXISD::Tld4UnifiedR2DS64Float;
3078 case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
3079 return NVPTXISD::Tld4UnifiedG2DS64Float;
3080 case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
3081 return NVPTXISD::Tld4UnifiedB2DS64Float;
3082 case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
3083 return NVPTXISD::Tld4UnifiedA2DS64Float;
3084 case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
3085 return NVPTXISD::Tld4UnifiedR2DU64Float;
3086 case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
3087 return NVPTXISD::Tld4UnifiedG2DU64Float;
3088 case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
3089 return NVPTXISD::Tld4UnifiedB2DU64Float;
3090 case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
3091 return NVPTXISD::Tld4UnifiedA2DU64Float;
3092 }
3093 }
3094
getOpcForSurfaceInstr(unsigned Intrinsic)3095 static unsigned getOpcForSurfaceInstr(unsigned Intrinsic) {
3096 switch (Intrinsic) {
3097 default:
3098 return 0;
3099 case Intrinsic::nvvm_suld_1d_i8_clamp:
3100 return NVPTXISD::Suld1DI8Clamp;
3101 case Intrinsic::nvvm_suld_1d_i16_clamp:
3102 return NVPTXISD::Suld1DI16Clamp;
3103 case Intrinsic::nvvm_suld_1d_i32_clamp:
3104 return NVPTXISD::Suld1DI32Clamp;
3105 case Intrinsic::nvvm_suld_1d_i64_clamp:
3106 return NVPTXISD::Suld1DI64Clamp;
3107 case Intrinsic::nvvm_suld_1d_v2i8_clamp:
3108 return NVPTXISD::Suld1DV2I8Clamp;
3109 case Intrinsic::nvvm_suld_1d_v2i16_clamp:
3110 return NVPTXISD::Suld1DV2I16Clamp;
3111 case Intrinsic::nvvm_suld_1d_v2i32_clamp:
3112 return NVPTXISD::Suld1DV2I32Clamp;
3113 case Intrinsic::nvvm_suld_1d_v2i64_clamp:
3114 return NVPTXISD::Suld1DV2I64Clamp;
3115 case Intrinsic::nvvm_suld_1d_v4i8_clamp:
3116 return NVPTXISD::Suld1DV4I8Clamp;
3117 case Intrinsic::nvvm_suld_1d_v4i16_clamp:
3118 return NVPTXISD::Suld1DV4I16Clamp;
3119 case Intrinsic::nvvm_suld_1d_v4i32_clamp:
3120 return NVPTXISD::Suld1DV4I32Clamp;
3121 case Intrinsic::nvvm_suld_1d_array_i8_clamp:
3122 return NVPTXISD::Suld1DArrayI8Clamp;
3123 case Intrinsic::nvvm_suld_1d_array_i16_clamp:
3124 return NVPTXISD::Suld1DArrayI16Clamp;
3125 case Intrinsic::nvvm_suld_1d_array_i32_clamp:
3126 return NVPTXISD::Suld1DArrayI32Clamp;
3127 case Intrinsic::nvvm_suld_1d_array_i64_clamp:
3128 return NVPTXISD::Suld1DArrayI64Clamp;
3129 case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
3130 return NVPTXISD::Suld1DArrayV2I8Clamp;
3131 case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
3132 return NVPTXISD::Suld1DArrayV2I16Clamp;
3133 case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
3134 return NVPTXISD::Suld1DArrayV2I32Clamp;
3135 case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
3136 return NVPTXISD::Suld1DArrayV2I64Clamp;
3137 case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
3138 return NVPTXISD::Suld1DArrayV4I8Clamp;
3139 case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
3140 return NVPTXISD::Suld1DArrayV4I16Clamp;
3141 case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
3142 return NVPTXISD::Suld1DArrayV4I32Clamp;
3143 case Intrinsic::nvvm_suld_2d_i8_clamp:
3144 return NVPTXISD::Suld2DI8Clamp;
3145 case Intrinsic::nvvm_suld_2d_i16_clamp:
3146 return NVPTXISD::Suld2DI16Clamp;
3147 case Intrinsic::nvvm_suld_2d_i32_clamp:
3148 return NVPTXISD::Suld2DI32Clamp;
3149 case Intrinsic::nvvm_suld_2d_i64_clamp:
3150 return NVPTXISD::Suld2DI64Clamp;
3151 case Intrinsic::nvvm_suld_2d_v2i8_clamp:
3152 return NVPTXISD::Suld2DV2I8Clamp;
3153 case Intrinsic::nvvm_suld_2d_v2i16_clamp:
3154 return NVPTXISD::Suld2DV2I16Clamp;
3155 case Intrinsic::nvvm_suld_2d_v2i32_clamp:
3156 return NVPTXISD::Suld2DV2I32Clamp;
3157 case Intrinsic::nvvm_suld_2d_v2i64_clamp:
3158 return NVPTXISD::Suld2DV2I64Clamp;
3159 case Intrinsic::nvvm_suld_2d_v4i8_clamp:
3160 return NVPTXISD::Suld2DV4I8Clamp;
3161 case Intrinsic::nvvm_suld_2d_v4i16_clamp:
3162 return NVPTXISD::Suld2DV4I16Clamp;
3163 case Intrinsic::nvvm_suld_2d_v4i32_clamp:
3164 return NVPTXISD::Suld2DV4I32Clamp;
3165 case Intrinsic::nvvm_suld_2d_array_i8_clamp:
3166 return NVPTXISD::Suld2DArrayI8Clamp;
3167 case Intrinsic::nvvm_suld_2d_array_i16_clamp:
3168 return NVPTXISD::Suld2DArrayI16Clamp;
3169 case Intrinsic::nvvm_suld_2d_array_i32_clamp:
3170 return NVPTXISD::Suld2DArrayI32Clamp;
3171 case Intrinsic::nvvm_suld_2d_array_i64_clamp:
3172 return NVPTXISD::Suld2DArrayI64Clamp;
3173 case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
3174 return NVPTXISD::Suld2DArrayV2I8Clamp;
3175 case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
3176 return NVPTXISD::Suld2DArrayV2I16Clamp;
3177 case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
3178 return NVPTXISD::Suld2DArrayV2I32Clamp;
3179 case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
3180 return NVPTXISD::Suld2DArrayV2I64Clamp;
3181 case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
3182 return NVPTXISD::Suld2DArrayV4I8Clamp;
3183 case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
3184 return NVPTXISD::Suld2DArrayV4I16Clamp;
3185 case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
3186 return NVPTXISD::Suld2DArrayV4I32Clamp;
3187 case Intrinsic::nvvm_suld_3d_i8_clamp:
3188 return NVPTXISD::Suld3DI8Clamp;
3189 case Intrinsic::nvvm_suld_3d_i16_clamp:
3190 return NVPTXISD::Suld3DI16Clamp;
3191 case Intrinsic::nvvm_suld_3d_i32_clamp:
3192 return NVPTXISD::Suld3DI32Clamp;
3193 case Intrinsic::nvvm_suld_3d_i64_clamp:
3194 return NVPTXISD::Suld3DI64Clamp;
3195 case Intrinsic::nvvm_suld_3d_v2i8_clamp:
3196 return NVPTXISD::Suld3DV2I8Clamp;
3197 case Intrinsic::nvvm_suld_3d_v2i16_clamp:
3198 return NVPTXISD::Suld3DV2I16Clamp;
3199 case Intrinsic::nvvm_suld_3d_v2i32_clamp:
3200 return NVPTXISD::Suld3DV2I32Clamp;
3201 case Intrinsic::nvvm_suld_3d_v2i64_clamp:
3202 return NVPTXISD::Suld3DV2I64Clamp;
3203 case Intrinsic::nvvm_suld_3d_v4i8_clamp:
3204 return NVPTXISD::Suld3DV4I8Clamp;
3205 case Intrinsic::nvvm_suld_3d_v4i16_clamp:
3206 return NVPTXISD::Suld3DV4I16Clamp;
3207 case Intrinsic::nvvm_suld_3d_v4i32_clamp:
3208 return NVPTXISD::Suld3DV4I32Clamp;
3209 case Intrinsic::nvvm_suld_1d_i8_trap:
3210 return NVPTXISD::Suld1DI8Trap;
3211 case Intrinsic::nvvm_suld_1d_i16_trap:
3212 return NVPTXISD::Suld1DI16Trap;
3213 case Intrinsic::nvvm_suld_1d_i32_trap:
3214 return NVPTXISD::Suld1DI32Trap;
3215 case Intrinsic::nvvm_suld_1d_i64_trap:
3216 return NVPTXISD::Suld1DI64Trap;
3217 case Intrinsic::nvvm_suld_1d_v2i8_trap:
3218 return NVPTXISD::Suld1DV2I8Trap;
3219 case Intrinsic::nvvm_suld_1d_v2i16_trap:
3220 return NVPTXISD::Suld1DV2I16Trap;
3221 case Intrinsic::nvvm_suld_1d_v2i32_trap:
3222 return NVPTXISD::Suld1DV2I32Trap;
3223 case Intrinsic::nvvm_suld_1d_v2i64_trap:
3224 return NVPTXISD::Suld1DV2I64Trap;
3225 case Intrinsic::nvvm_suld_1d_v4i8_trap:
3226 return NVPTXISD::Suld1DV4I8Trap;
3227 case Intrinsic::nvvm_suld_1d_v4i16_trap:
3228 return NVPTXISD::Suld1DV4I16Trap;
3229 case Intrinsic::nvvm_suld_1d_v4i32_trap:
3230 return NVPTXISD::Suld1DV4I32Trap;
3231 case Intrinsic::nvvm_suld_1d_array_i8_trap:
3232 return NVPTXISD::Suld1DArrayI8Trap;
3233 case Intrinsic::nvvm_suld_1d_array_i16_trap:
3234 return NVPTXISD::Suld1DArrayI16Trap;
3235 case Intrinsic::nvvm_suld_1d_array_i32_trap:
3236 return NVPTXISD::Suld1DArrayI32Trap;
3237 case Intrinsic::nvvm_suld_1d_array_i64_trap:
3238 return NVPTXISD::Suld1DArrayI64Trap;
3239 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
3240 return NVPTXISD::Suld1DArrayV2I8Trap;
3241 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
3242 return NVPTXISD::Suld1DArrayV2I16Trap;
3243 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
3244 return NVPTXISD::Suld1DArrayV2I32Trap;
3245 case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
3246 return NVPTXISD::Suld1DArrayV2I64Trap;
3247 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
3248 return NVPTXISD::Suld1DArrayV4I8Trap;
3249 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
3250 return NVPTXISD::Suld1DArrayV4I16Trap;
3251 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
3252 return NVPTXISD::Suld1DArrayV4I32Trap;
3253 case Intrinsic::nvvm_suld_2d_i8_trap:
3254 return NVPTXISD::Suld2DI8Trap;
3255 case Intrinsic::nvvm_suld_2d_i16_trap:
3256 return NVPTXISD::Suld2DI16Trap;
3257 case Intrinsic::nvvm_suld_2d_i32_trap:
3258 return NVPTXISD::Suld2DI32Trap;
3259 case Intrinsic::nvvm_suld_2d_i64_trap:
3260 return NVPTXISD::Suld2DI64Trap;
3261 case Intrinsic::nvvm_suld_2d_v2i8_trap:
3262 return NVPTXISD::Suld2DV2I8Trap;
3263 case Intrinsic::nvvm_suld_2d_v2i16_trap:
3264 return NVPTXISD::Suld2DV2I16Trap;
3265 case Intrinsic::nvvm_suld_2d_v2i32_trap:
3266 return NVPTXISD::Suld2DV2I32Trap;
3267 case Intrinsic::nvvm_suld_2d_v2i64_trap:
3268 return NVPTXISD::Suld2DV2I64Trap;
3269 case Intrinsic::nvvm_suld_2d_v4i8_trap:
3270 return NVPTXISD::Suld2DV4I8Trap;
3271 case Intrinsic::nvvm_suld_2d_v4i16_trap:
3272 return NVPTXISD::Suld2DV4I16Trap;
3273 case Intrinsic::nvvm_suld_2d_v4i32_trap:
3274 return NVPTXISD::Suld2DV4I32Trap;
3275 case Intrinsic::nvvm_suld_2d_array_i8_trap:
3276 return NVPTXISD::Suld2DArrayI8Trap;
3277 case Intrinsic::nvvm_suld_2d_array_i16_trap:
3278 return NVPTXISD::Suld2DArrayI16Trap;
3279 case Intrinsic::nvvm_suld_2d_array_i32_trap:
3280 return NVPTXISD::Suld2DArrayI32Trap;
3281 case Intrinsic::nvvm_suld_2d_array_i64_trap:
3282 return NVPTXISD::Suld2DArrayI64Trap;
3283 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
3284 return NVPTXISD::Suld2DArrayV2I8Trap;
3285 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
3286 return NVPTXISD::Suld2DArrayV2I16Trap;
3287 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
3288 return NVPTXISD::Suld2DArrayV2I32Trap;
3289 case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
3290 return NVPTXISD::Suld2DArrayV2I64Trap;
3291 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
3292 return NVPTXISD::Suld2DArrayV4I8Trap;
3293 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
3294 return NVPTXISD::Suld2DArrayV4I16Trap;
3295 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
3296 return NVPTXISD::Suld2DArrayV4I32Trap;
3297 case Intrinsic::nvvm_suld_3d_i8_trap:
3298 return NVPTXISD::Suld3DI8Trap;
3299 case Intrinsic::nvvm_suld_3d_i16_trap:
3300 return NVPTXISD::Suld3DI16Trap;
3301 case Intrinsic::nvvm_suld_3d_i32_trap:
3302 return NVPTXISD::Suld3DI32Trap;
3303 case Intrinsic::nvvm_suld_3d_i64_trap:
3304 return NVPTXISD::Suld3DI64Trap;
3305 case Intrinsic::nvvm_suld_3d_v2i8_trap:
3306 return NVPTXISD::Suld3DV2I8Trap;
3307 case Intrinsic::nvvm_suld_3d_v2i16_trap:
3308 return NVPTXISD::Suld3DV2I16Trap;
3309 case Intrinsic::nvvm_suld_3d_v2i32_trap:
3310 return NVPTXISD::Suld3DV2I32Trap;
3311 case Intrinsic::nvvm_suld_3d_v2i64_trap:
3312 return NVPTXISD::Suld3DV2I64Trap;
3313 case Intrinsic::nvvm_suld_3d_v4i8_trap:
3314 return NVPTXISD::Suld3DV4I8Trap;
3315 case Intrinsic::nvvm_suld_3d_v4i16_trap:
3316 return NVPTXISD::Suld3DV4I16Trap;
3317 case Intrinsic::nvvm_suld_3d_v4i32_trap:
3318 return NVPTXISD::Suld3DV4I32Trap;
3319 case Intrinsic::nvvm_suld_1d_i8_zero:
3320 return NVPTXISD::Suld1DI8Zero;
3321 case Intrinsic::nvvm_suld_1d_i16_zero:
3322 return NVPTXISD::Suld1DI16Zero;
3323 case Intrinsic::nvvm_suld_1d_i32_zero:
3324 return NVPTXISD::Suld1DI32Zero;
3325 case Intrinsic::nvvm_suld_1d_i64_zero:
3326 return NVPTXISD::Suld1DI64Zero;
3327 case Intrinsic::nvvm_suld_1d_v2i8_zero:
3328 return NVPTXISD::Suld1DV2I8Zero;
3329 case Intrinsic::nvvm_suld_1d_v2i16_zero:
3330 return NVPTXISD::Suld1DV2I16Zero;
3331 case Intrinsic::nvvm_suld_1d_v2i32_zero:
3332 return NVPTXISD::Suld1DV2I32Zero;
3333 case Intrinsic::nvvm_suld_1d_v2i64_zero:
3334 return NVPTXISD::Suld1DV2I64Zero;
3335 case Intrinsic::nvvm_suld_1d_v4i8_zero:
3336 return NVPTXISD::Suld1DV4I8Zero;
3337 case Intrinsic::nvvm_suld_1d_v4i16_zero:
3338 return NVPTXISD::Suld1DV4I16Zero;
3339 case Intrinsic::nvvm_suld_1d_v4i32_zero:
3340 return NVPTXISD::Suld1DV4I32Zero;
3341 case Intrinsic::nvvm_suld_1d_array_i8_zero:
3342 return NVPTXISD::Suld1DArrayI8Zero;
3343 case Intrinsic::nvvm_suld_1d_array_i16_zero:
3344 return NVPTXISD::Suld1DArrayI16Zero;
3345 case Intrinsic::nvvm_suld_1d_array_i32_zero:
3346 return NVPTXISD::Suld1DArrayI32Zero;
3347 case Intrinsic::nvvm_suld_1d_array_i64_zero:
3348 return NVPTXISD::Suld1DArrayI64Zero;
3349 case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
3350 return NVPTXISD::Suld1DArrayV2I8Zero;
3351 case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
3352 return NVPTXISD::Suld1DArrayV2I16Zero;
3353 case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
3354 return NVPTXISD::Suld1DArrayV2I32Zero;
3355 case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
3356 return NVPTXISD::Suld1DArrayV2I64Zero;
3357 case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
3358 return NVPTXISD::Suld1DArrayV4I8Zero;
3359 case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
3360 return NVPTXISD::Suld1DArrayV4I16Zero;
3361 case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
3362 return NVPTXISD::Suld1DArrayV4I32Zero;
3363 case Intrinsic::nvvm_suld_2d_i8_zero:
3364 return NVPTXISD::Suld2DI8Zero;
3365 case Intrinsic::nvvm_suld_2d_i16_zero:
3366 return NVPTXISD::Suld2DI16Zero;
3367 case Intrinsic::nvvm_suld_2d_i32_zero:
3368 return NVPTXISD::Suld2DI32Zero;
3369 case Intrinsic::nvvm_suld_2d_i64_zero:
3370 return NVPTXISD::Suld2DI64Zero;
3371 case Intrinsic::nvvm_suld_2d_v2i8_zero:
3372 return NVPTXISD::Suld2DV2I8Zero;
3373 case Intrinsic::nvvm_suld_2d_v2i16_zero:
3374 return NVPTXISD::Suld2DV2I16Zero;
3375 case Intrinsic::nvvm_suld_2d_v2i32_zero:
3376 return NVPTXISD::Suld2DV2I32Zero;
3377 case Intrinsic::nvvm_suld_2d_v2i64_zero:
3378 return NVPTXISD::Suld2DV2I64Zero;
3379 case Intrinsic::nvvm_suld_2d_v4i8_zero:
3380 return NVPTXISD::Suld2DV4I8Zero;
3381 case Intrinsic::nvvm_suld_2d_v4i16_zero:
3382 return NVPTXISD::Suld2DV4I16Zero;
3383 case Intrinsic::nvvm_suld_2d_v4i32_zero:
3384 return NVPTXISD::Suld2DV4I32Zero;
3385 case Intrinsic::nvvm_suld_2d_array_i8_zero:
3386 return NVPTXISD::Suld2DArrayI8Zero;
3387 case Intrinsic::nvvm_suld_2d_array_i16_zero:
3388 return NVPTXISD::Suld2DArrayI16Zero;
3389 case Intrinsic::nvvm_suld_2d_array_i32_zero:
3390 return NVPTXISD::Suld2DArrayI32Zero;
3391 case Intrinsic::nvvm_suld_2d_array_i64_zero:
3392 return NVPTXISD::Suld2DArrayI64Zero;
3393 case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
3394 return NVPTXISD::Suld2DArrayV2I8Zero;
3395 case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
3396 return NVPTXISD::Suld2DArrayV2I16Zero;
3397 case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
3398 return NVPTXISD::Suld2DArrayV2I32Zero;
3399 case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
3400 return NVPTXISD::Suld2DArrayV2I64Zero;
3401 case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
3402 return NVPTXISD::Suld2DArrayV4I8Zero;
3403 case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
3404 return NVPTXISD::Suld2DArrayV4I16Zero;
3405 case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
3406 return NVPTXISD::Suld2DArrayV4I32Zero;
3407 case Intrinsic::nvvm_suld_3d_i8_zero:
3408 return NVPTXISD::Suld3DI8Zero;
3409 case Intrinsic::nvvm_suld_3d_i16_zero:
3410 return NVPTXISD::Suld3DI16Zero;
3411 case Intrinsic::nvvm_suld_3d_i32_zero:
3412 return NVPTXISD::Suld3DI32Zero;
3413 case Intrinsic::nvvm_suld_3d_i64_zero:
3414 return NVPTXISD::Suld3DI64Zero;
3415 case Intrinsic::nvvm_suld_3d_v2i8_zero:
3416 return NVPTXISD::Suld3DV2I8Zero;
3417 case Intrinsic::nvvm_suld_3d_v2i16_zero:
3418 return NVPTXISD::Suld3DV2I16Zero;
3419 case Intrinsic::nvvm_suld_3d_v2i32_zero:
3420 return NVPTXISD::Suld3DV2I32Zero;
3421 case Intrinsic::nvvm_suld_3d_v2i64_zero:
3422 return NVPTXISD::Suld3DV2I64Zero;
3423 case Intrinsic::nvvm_suld_3d_v4i8_zero:
3424 return NVPTXISD::Suld3DV4I8Zero;
3425 case Intrinsic::nvvm_suld_3d_v4i16_zero:
3426 return NVPTXISD::Suld3DV4I16Zero;
3427 case Intrinsic::nvvm_suld_3d_v4i32_zero:
3428 return NVPTXISD::Suld3DV4I32Zero;
3429 }
3430 }
3431
3432 // llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
3433 // TgtMemIntrinsic
3434 // because we need the information that is only available in the "Value" type
3435 // of destination
3436 // pointer. In particular, the address space information.
getTgtMemIntrinsic(IntrinsicInfo & Info,const CallInst & I,MachineFunction & MF,unsigned Intrinsic) const3437 bool NVPTXTargetLowering::getTgtMemIntrinsic(
3438 IntrinsicInfo &Info, const CallInst &I,
3439 MachineFunction &MF, unsigned Intrinsic) const {
3440 switch (Intrinsic) {
3441 default:
3442 return false;
3443 case Intrinsic::nvvm_match_all_sync_i32p:
3444 case Intrinsic::nvvm_match_all_sync_i64p:
3445 Info.opc = ISD::INTRINSIC_W_CHAIN;
3446 // memVT is bogus. These intrinsics have IntrInaccessibleMemOnly attribute
3447 // in order to model data exchange with other threads, but perform no real
3448 // memory accesses.
3449 Info.memVT = MVT::i1;
3450
3451 // Our result depends on both our and other thread's arguments.
3452 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
3453 return true;
3454 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col:
3455 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row:
3456 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col_stride:
3457 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row_stride:
3458 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col:
3459 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row:
3460 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col_stride:
3461 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row_stride:
3462 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col:
3463 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row:
3464 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col_stride:
3465 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row_stride:
3466 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col:
3467 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row:
3468 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col_stride:
3469 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row_stride:
3470 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col:
3471 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row:
3472 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col_stride:
3473 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row_stride:
3474 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col:
3475 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row:
3476 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col_stride:
3477 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row_stride: {
3478 Info.opc = ISD::INTRINSIC_W_CHAIN;
3479 Info.memVT = MVT::v8f16;
3480 Info.ptrVal = I.getArgOperand(0);
3481 Info.offset = 0;
3482 Info.flags = MachineMemOperand::MOLoad;
3483 Info.align = Align(16);
3484 return true;
3485 }
3486 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col:
3487 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col_stride:
3488 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col_stride:
3489 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col:
3490 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row:
3491 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row_stride:
3492 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row_stride:
3493 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row:
3494 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col:
3495 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col_stride:
3496 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col_stride:
3497 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col:
3498 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row:
3499 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row_stride:
3500 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row_stride:
3501 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row: {
3502 Info.opc = ISD::INTRINSIC_W_CHAIN;
3503 Info.memVT = MVT::v2i32;
3504 Info.ptrVal = I.getArgOperand(0);
3505 Info.offset = 0;
3506 Info.flags = MachineMemOperand::MOLoad;
3507 Info.align = Align(8);
3508 return true;
3509 }
3510
3511 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col:
3512 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col_stride:
3513 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col_stride:
3514 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col:
3515 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row:
3516 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row_stride:
3517 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row_stride:
3518 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row:
3519
3520 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col:
3521 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col_stride:
3522 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col_stride:
3523 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col:
3524 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row:
3525 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row_stride:
3526 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row_stride:
3527 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row: {
3528 Info.opc = ISD::INTRINSIC_W_CHAIN;
3529 Info.memVT = MVT::v4i32;
3530 Info.ptrVal = I.getArgOperand(0);
3531 Info.offset = 0;
3532 Info.flags = MachineMemOperand::MOLoad;
3533 Info.align = Align(16);
3534 return true;
3535 }
3536
3537 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col:
3538 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col_stride:
3539 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col_stride:
3540 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col:
3541 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row:
3542 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row_stride:
3543 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row_stride:
3544 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row:
3545
3546 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col:
3547 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col_stride:
3548 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col_stride:
3549 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col:
3550 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row:
3551 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row_stride:
3552 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row_stride:
3553 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row:
3554 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row:
3555 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row_stride:
3556 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col:
3557 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col_stride:
3558 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row:
3559 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row_stride:
3560 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row_stride:
3561 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row:
3562 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col:
3563 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col_stride:
3564 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col_stride:
3565 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col: {
3566 Info.opc = ISD::INTRINSIC_W_CHAIN;
3567 Info.memVT = MVT::i32;
3568 Info.ptrVal = I.getArgOperand(0);
3569 Info.offset = 0;
3570 Info.flags = MachineMemOperand::MOLoad;
3571 Info.align = Align(4);
3572 return true;
3573 }
3574
3575 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col:
3576 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row:
3577 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col_stride:
3578 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row_stride:
3579 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col:
3580 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row:
3581 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col_stride:
3582 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row_stride:
3583 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col:
3584 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row:
3585 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col_stride:
3586 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row_stride: {
3587 Info.opc = ISD::INTRINSIC_W_CHAIN;
3588 Info.memVT = MVT::v4f16;
3589 Info.ptrVal = I.getArgOperand(0);
3590 Info.offset = 0;
3591 Info.flags = MachineMemOperand::MOLoad;
3592 Info.align = Align(16);
3593 return true;
3594 }
3595
3596 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col:
3597 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row:
3598 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col_stride:
3599 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row_stride:
3600 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col:
3601 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row:
3602 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col_stride:
3603 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row_stride:
3604 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col:
3605 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row:
3606 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col_stride:
3607 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row_stride: {
3608 Info.opc = ISD::INTRINSIC_W_CHAIN;
3609 Info.memVT = MVT::v8f32;
3610 Info.ptrVal = I.getArgOperand(0);
3611 Info.offset = 0;
3612 Info.flags = MachineMemOperand::MOLoad;
3613 Info.align = Align(16);
3614 return true;
3615 }
3616
3617 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col:
3618 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col_stride:
3619 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row:
3620 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row_stride:
3621 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col:
3622 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col_stride:
3623 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row:
3624 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row_stride:
3625 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col:
3626 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col_stride:
3627 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row:
3628 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row_stride: {
3629 Info.opc = ISD::INTRINSIC_W_CHAIN;
3630 Info.memVT = MVT::v8i32;
3631 Info.ptrVal = I.getArgOperand(0);
3632 Info.offset = 0;
3633 Info.flags = MachineMemOperand::MOLoad;
3634 Info.align = Align(16);
3635 return true;
3636 }
3637
3638 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col:
3639 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col_stride:
3640 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row:
3641 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row_stride:
3642 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col:
3643 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col_stride:
3644 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row:
3645 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row_stride: {
3646 Info.opc = ISD::INTRINSIC_W_CHAIN;
3647 Info.memVT = MVT::v2i32;
3648 Info.ptrVal = I.getArgOperand(0);
3649 Info.offset = 0;
3650 Info.flags = MachineMemOperand::MOLoad;
3651 Info.align = Align(8);
3652 return true;
3653 }
3654
3655 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col:
3656 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row:
3657 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col_stride:
3658 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row_stride:
3659 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col:
3660 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row:
3661 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col_stride:
3662 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row_stride:
3663 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col:
3664 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row:
3665 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col_stride:
3666 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row_stride: {
3667 Info.opc = ISD::INTRINSIC_VOID;
3668 Info.memVT = MVT::v4f16;
3669 Info.ptrVal = I.getArgOperand(0);
3670 Info.offset = 0;
3671 Info.flags = MachineMemOperand::MOStore;
3672 Info.align = Align(16);
3673 return true;
3674 }
3675
3676 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col:
3677 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row:
3678 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col_stride:
3679 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row_stride:
3680 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col:
3681 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row:
3682 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col_stride:
3683 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row_stride:
3684 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col:
3685 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row:
3686 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col_stride:
3687 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row_stride: {
3688 Info.opc = ISD::INTRINSIC_VOID;
3689 Info.memVT = MVT::v8f32;
3690 Info.ptrVal = I.getArgOperand(0);
3691 Info.offset = 0;
3692 Info.flags = MachineMemOperand::MOStore;
3693 Info.align = Align(16);
3694 return true;
3695 }
3696
3697 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col:
3698 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col_stride:
3699 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row:
3700 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row_stride:
3701 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col:
3702 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col_stride:
3703 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row:
3704 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row_stride:
3705 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col:
3706 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col_stride:
3707 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row:
3708 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row_stride: {
3709 Info.opc = ISD::INTRINSIC_VOID;
3710 Info.memVT = MVT::v8i32;
3711 Info.ptrVal = I.getArgOperand(0);
3712 Info.offset = 0;
3713 Info.flags = MachineMemOperand::MOStore;
3714 Info.align = Align(16);
3715 return true;
3716 }
3717
3718 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col:
3719 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col_stride:
3720 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row:
3721 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row_stride:
3722 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col:
3723 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col_stride:
3724 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row:
3725 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row_stride: {
3726 Info.opc = ISD::INTRINSIC_VOID;
3727 Info.memVT = MVT::v2i32;
3728 Info.ptrVal = I.getArgOperand(0);
3729 Info.offset = 0;
3730 Info.flags = MachineMemOperand::MOStore;
3731 Info.align = Align(8);
3732 return true;
3733 }
3734
3735 case Intrinsic::nvvm_atomic_load_inc_32:
3736 case Intrinsic::nvvm_atomic_load_dec_32:
3737
3738 case Intrinsic::nvvm_atomic_add_gen_f_cta:
3739 case Intrinsic::nvvm_atomic_add_gen_f_sys:
3740 case Intrinsic::nvvm_atomic_add_gen_i_cta:
3741 case Intrinsic::nvvm_atomic_add_gen_i_sys:
3742 case Intrinsic::nvvm_atomic_and_gen_i_cta:
3743 case Intrinsic::nvvm_atomic_and_gen_i_sys:
3744 case Intrinsic::nvvm_atomic_cas_gen_i_cta:
3745 case Intrinsic::nvvm_atomic_cas_gen_i_sys:
3746 case Intrinsic::nvvm_atomic_dec_gen_i_cta:
3747 case Intrinsic::nvvm_atomic_dec_gen_i_sys:
3748 case Intrinsic::nvvm_atomic_inc_gen_i_cta:
3749 case Intrinsic::nvvm_atomic_inc_gen_i_sys:
3750 case Intrinsic::nvvm_atomic_max_gen_i_cta:
3751 case Intrinsic::nvvm_atomic_max_gen_i_sys:
3752 case Intrinsic::nvvm_atomic_min_gen_i_cta:
3753 case Intrinsic::nvvm_atomic_min_gen_i_sys:
3754 case Intrinsic::nvvm_atomic_or_gen_i_cta:
3755 case Intrinsic::nvvm_atomic_or_gen_i_sys:
3756 case Intrinsic::nvvm_atomic_exch_gen_i_cta:
3757 case Intrinsic::nvvm_atomic_exch_gen_i_sys:
3758 case Intrinsic::nvvm_atomic_xor_gen_i_cta:
3759 case Intrinsic::nvvm_atomic_xor_gen_i_sys: {
3760 auto &DL = I.getModule()->getDataLayout();
3761 Info.opc = ISD::INTRINSIC_W_CHAIN;
3762 Info.memVT = getValueType(DL, I.getType());
3763 Info.ptrVal = I.getArgOperand(0);
3764 Info.offset = 0;
3765 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
3766 Info.align.reset();
3767 return true;
3768 }
3769
3770 case Intrinsic::nvvm_ldu_global_i:
3771 case Intrinsic::nvvm_ldu_global_f:
3772 case Intrinsic::nvvm_ldu_global_p: {
3773 auto &DL = I.getModule()->getDataLayout();
3774 Info.opc = ISD::INTRINSIC_W_CHAIN;
3775 if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
3776 Info.memVT = getValueType(DL, I.getType());
3777 else if(Intrinsic == Intrinsic::nvvm_ldu_global_p)
3778 Info.memVT = getPointerTy(DL);
3779 else
3780 Info.memVT = getValueType(DL, I.getType());
3781 Info.ptrVal = I.getArgOperand(0);
3782 Info.offset = 0;
3783 Info.flags = MachineMemOperand::MOLoad;
3784 Info.align = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue();
3785
3786 return true;
3787 }
3788 case Intrinsic::nvvm_ldg_global_i:
3789 case Intrinsic::nvvm_ldg_global_f:
3790 case Intrinsic::nvvm_ldg_global_p: {
3791 auto &DL = I.getModule()->getDataLayout();
3792
3793 Info.opc = ISD::INTRINSIC_W_CHAIN;
3794 if (Intrinsic == Intrinsic::nvvm_ldg_global_i)
3795 Info.memVT = getValueType(DL, I.getType());
3796 else if(Intrinsic == Intrinsic::nvvm_ldg_global_p)
3797 Info.memVT = getPointerTy(DL);
3798 else
3799 Info.memVT = getValueType(DL, I.getType());
3800 Info.ptrVal = I.getArgOperand(0);
3801 Info.offset = 0;
3802 Info.flags = MachineMemOperand::MOLoad;
3803 Info.align = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue();
3804
3805 return true;
3806 }
3807
3808 case Intrinsic::nvvm_tex_1d_v4f32_s32:
3809 case Intrinsic::nvvm_tex_1d_v4f32_f32:
3810 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
3811 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
3812 case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
3813 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
3814 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
3815 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
3816 case Intrinsic::nvvm_tex_2d_v4f32_s32:
3817 case Intrinsic::nvvm_tex_2d_v4f32_f32:
3818 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
3819 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
3820 case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
3821 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
3822 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
3823 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
3824 case Intrinsic::nvvm_tex_3d_v4f32_s32:
3825 case Intrinsic::nvvm_tex_3d_v4f32_f32:
3826 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
3827 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
3828 case Intrinsic::nvvm_tex_cube_v4f32_f32:
3829 case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
3830 case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
3831 case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
3832 case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
3833 case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
3834 case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
3835 case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
3836 case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
3837 case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
3838 case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
3839 case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
3840 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
3841 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
3842 case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
3843 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
3844 case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
3845 case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
3846 case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
3847 case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
3848 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
3849 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
3850 case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
3851 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
3852 case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
3853 case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
3854 case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
3855 case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
3856 case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
3857 case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
3858 case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
3859 case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
3860 case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
3861 case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
3862 case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
3863 case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
3864 Info.opc = getOpcForTextureInstr(Intrinsic);
3865 Info.memVT = MVT::v4f32;
3866 Info.ptrVal = nullptr;
3867 Info.offset = 0;
3868 Info.flags = MachineMemOperand::MOLoad;
3869 Info.align = Align(16);
3870 return true;
3871
3872 case Intrinsic::nvvm_tex_1d_v4s32_s32:
3873 case Intrinsic::nvvm_tex_1d_v4s32_f32:
3874 case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
3875 case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
3876 case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
3877 case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
3878 case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
3879 case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
3880 case Intrinsic::nvvm_tex_2d_v4s32_s32:
3881 case Intrinsic::nvvm_tex_2d_v4s32_f32:
3882 case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
3883 case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
3884 case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
3885 case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
3886 case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
3887 case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
3888 case Intrinsic::nvvm_tex_3d_v4s32_s32:
3889 case Intrinsic::nvvm_tex_3d_v4s32_f32:
3890 case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
3891 case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
3892 case Intrinsic::nvvm_tex_cube_v4s32_f32:
3893 case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
3894 case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
3895 case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
3896 case Intrinsic::nvvm_tex_cube_v4u32_f32:
3897 case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
3898 case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
3899 case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
3900 case Intrinsic::nvvm_tex_1d_v4u32_s32:
3901 case Intrinsic::nvvm_tex_1d_v4u32_f32:
3902 case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
3903 case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
3904 case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
3905 case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
3906 case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
3907 case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
3908 case Intrinsic::nvvm_tex_2d_v4u32_s32:
3909 case Intrinsic::nvvm_tex_2d_v4u32_f32:
3910 case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
3911 case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
3912 case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
3913 case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
3914 case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
3915 case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
3916 case Intrinsic::nvvm_tex_3d_v4u32_s32:
3917 case Intrinsic::nvvm_tex_3d_v4u32_f32:
3918 case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
3919 case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
3920 case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
3921 case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
3922 case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
3923 case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
3924 case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
3925 case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
3926 case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
3927 case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
3928 case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
3929 case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
3930 case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
3931 case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
3932 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
3933 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
3934 case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
3935 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
3936 case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
3937 case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
3938 case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
3939 case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
3940 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
3941 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
3942 case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
3943 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
3944 case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
3945 case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
3946 case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
3947 case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
3948 case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
3949 case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
3950 case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
3951 case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
3952 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
3953 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
3954 case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
3955 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
3956 case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
3957 case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
3958 case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
3959 case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
3960 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
3961 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
3962 case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
3963 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
3964 case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
3965 case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
3966 case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
3967 case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
3968 case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
3969 case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
3970 case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
3971 case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
3972 case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
3973 case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
3974 case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
3975 case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
3976 case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
3977 case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
3978 case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
3979 case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
3980 case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
3981 case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
3982 case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
3983 case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
3984 Info.opc = getOpcForTextureInstr(Intrinsic);
3985 Info.memVT = MVT::v4i32;
3986 Info.ptrVal = nullptr;
3987 Info.offset = 0;
3988 Info.flags = MachineMemOperand::MOLoad;
3989 Info.align = Align(16);
3990 return true;
3991
3992 case Intrinsic::nvvm_suld_1d_i8_clamp:
3993 case Intrinsic::nvvm_suld_1d_v2i8_clamp:
3994 case Intrinsic::nvvm_suld_1d_v4i8_clamp:
3995 case Intrinsic::nvvm_suld_1d_array_i8_clamp:
3996 case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
3997 case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
3998 case Intrinsic::nvvm_suld_2d_i8_clamp:
3999 case Intrinsic::nvvm_suld_2d_v2i8_clamp:
4000 case Intrinsic::nvvm_suld_2d_v4i8_clamp:
4001 case Intrinsic::nvvm_suld_2d_array_i8_clamp:
4002 case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
4003 case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
4004 case Intrinsic::nvvm_suld_3d_i8_clamp:
4005 case Intrinsic::nvvm_suld_3d_v2i8_clamp:
4006 case Intrinsic::nvvm_suld_3d_v4i8_clamp:
4007 case Intrinsic::nvvm_suld_1d_i8_trap:
4008 case Intrinsic::nvvm_suld_1d_v2i8_trap:
4009 case Intrinsic::nvvm_suld_1d_v4i8_trap:
4010 case Intrinsic::nvvm_suld_1d_array_i8_trap:
4011 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
4012 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
4013 case Intrinsic::nvvm_suld_2d_i8_trap:
4014 case Intrinsic::nvvm_suld_2d_v2i8_trap:
4015 case Intrinsic::nvvm_suld_2d_v4i8_trap:
4016 case Intrinsic::nvvm_suld_2d_array_i8_trap:
4017 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
4018 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
4019 case Intrinsic::nvvm_suld_3d_i8_trap:
4020 case Intrinsic::nvvm_suld_3d_v2i8_trap:
4021 case Intrinsic::nvvm_suld_3d_v4i8_trap:
4022 case Intrinsic::nvvm_suld_1d_i8_zero:
4023 case Intrinsic::nvvm_suld_1d_v2i8_zero:
4024 case Intrinsic::nvvm_suld_1d_v4i8_zero:
4025 case Intrinsic::nvvm_suld_1d_array_i8_zero:
4026 case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
4027 case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
4028 case Intrinsic::nvvm_suld_2d_i8_zero:
4029 case Intrinsic::nvvm_suld_2d_v2i8_zero:
4030 case Intrinsic::nvvm_suld_2d_v4i8_zero:
4031 case Intrinsic::nvvm_suld_2d_array_i8_zero:
4032 case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
4033 case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
4034 case Intrinsic::nvvm_suld_3d_i8_zero:
4035 case Intrinsic::nvvm_suld_3d_v2i8_zero:
4036 case Intrinsic::nvvm_suld_3d_v4i8_zero:
4037 Info.opc = getOpcForSurfaceInstr(Intrinsic);
4038 Info.memVT = MVT::i8;
4039 Info.ptrVal = nullptr;
4040 Info.offset = 0;
4041 Info.flags = MachineMemOperand::MOLoad;
4042 Info.align = Align(16);
4043 return true;
4044
4045 case Intrinsic::nvvm_suld_1d_i16_clamp:
4046 case Intrinsic::nvvm_suld_1d_v2i16_clamp:
4047 case Intrinsic::nvvm_suld_1d_v4i16_clamp:
4048 case Intrinsic::nvvm_suld_1d_array_i16_clamp:
4049 case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
4050 case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
4051 case Intrinsic::nvvm_suld_2d_i16_clamp:
4052 case Intrinsic::nvvm_suld_2d_v2i16_clamp:
4053 case Intrinsic::nvvm_suld_2d_v4i16_clamp:
4054 case Intrinsic::nvvm_suld_2d_array_i16_clamp:
4055 case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
4056 case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
4057 case Intrinsic::nvvm_suld_3d_i16_clamp:
4058 case Intrinsic::nvvm_suld_3d_v2i16_clamp:
4059 case Intrinsic::nvvm_suld_3d_v4i16_clamp:
4060 case Intrinsic::nvvm_suld_1d_i16_trap:
4061 case Intrinsic::nvvm_suld_1d_v2i16_trap:
4062 case Intrinsic::nvvm_suld_1d_v4i16_trap:
4063 case Intrinsic::nvvm_suld_1d_array_i16_trap:
4064 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
4065 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
4066 case Intrinsic::nvvm_suld_2d_i16_trap:
4067 case Intrinsic::nvvm_suld_2d_v2i16_trap:
4068 case Intrinsic::nvvm_suld_2d_v4i16_trap:
4069 case Intrinsic::nvvm_suld_2d_array_i16_trap:
4070 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
4071 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
4072 case Intrinsic::nvvm_suld_3d_i16_trap:
4073 case Intrinsic::nvvm_suld_3d_v2i16_trap:
4074 case Intrinsic::nvvm_suld_3d_v4i16_trap:
4075 case Intrinsic::nvvm_suld_1d_i16_zero:
4076 case Intrinsic::nvvm_suld_1d_v2i16_zero:
4077 case Intrinsic::nvvm_suld_1d_v4i16_zero:
4078 case Intrinsic::nvvm_suld_1d_array_i16_zero:
4079 case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
4080 case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
4081 case Intrinsic::nvvm_suld_2d_i16_zero:
4082 case Intrinsic::nvvm_suld_2d_v2i16_zero:
4083 case Intrinsic::nvvm_suld_2d_v4i16_zero:
4084 case Intrinsic::nvvm_suld_2d_array_i16_zero:
4085 case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
4086 case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
4087 case Intrinsic::nvvm_suld_3d_i16_zero:
4088 case Intrinsic::nvvm_suld_3d_v2i16_zero:
4089 case Intrinsic::nvvm_suld_3d_v4i16_zero:
4090 Info.opc = getOpcForSurfaceInstr(Intrinsic);
4091 Info.memVT = MVT::i16;
4092 Info.ptrVal = nullptr;
4093 Info.offset = 0;
4094 Info.flags = MachineMemOperand::MOLoad;
4095 Info.align = Align(16);
4096 return true;
4097
4098 case Intrinsic::nvvm_suld_1d_i32_clamp:
4099 case Intrinsic::nvvm_suld_1d_v2i32_clamp:
4100 case Intrinsic::nvvm_suld_1d_v4i32_clamp:
4101 case Intrinsic::nvvm_suld_1d_array_i32_clamp:
4102 case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
4103 case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
4104 case Intrinsic::nvvm_suld_2d_i32_clamp:
4105 case Intrinsic::nvvm_suld_2d_v2i32_clamp:
4106 case Intrinsic::nvvm_suld_2d_v4i32_clamp:
4107 case Intrinsic::nvvm_suld_2d_array_i32_clamp:
4108 case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
4109 case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
4110 case Intrinsic::nvvm_suld_3d_i32_clamp:
4111 case Intrinsic::nvvm_suld_3d_v2i32_clamp:
4112 case Intrinsic::nvvm_suld_3d_v4i32_clamp:
4113 case Intrinsic::nvvm_suld_1d_i32_trap:
4114 case Intrinsic::nvvm_suld_1d_v2i32_trap:
4115 case Intrinsic::nvvm_suld_1d_v4i32_trap:
4116 case Intrinsic::nvvm_suld_1d_array_i32_trap:
4117 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
4118 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
4119 case Intrinsic::nvvm_suld_2d_i32_trap:
4120 case Intrinsic::nvvm_suld_2d_v2i32_trap:
4121 case Intrinsic::nvvm_suld_2d_v4i32_trap:
4122 case Intrinsic::nvvm_suld_2d_array_i32_trap:
4123 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
4124 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
4125 case Intrinsic::nvvm_suld_3d_i32_trap:
4126 case Intrinsic::nvvm_suld_3d_v2i32_trap:
4127 case Intrinsic::nvvm_suld_3d_v4i32_trap:
4128 case Intrinsic::nvvm_suld_1d_i32_zero:
4129 case Intrinsic::nvvm_suld_1d_v2i32_zero:
4130 case Intrinsic::nvvm_suld_1d_v4i32_zero:
4131 case Intrinsic::nvvm_suld_1d_array_i32_zero:
4132 case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
4133 case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
4134 case Intrinsic::nvvm_suld_2d_i32_zero:
4135 case Intrinsic::nvvm_suld_2d_v2i32_zero:
4136 case Intrinsic::nvvm_suld_2d_v4i32_zero:
4137 case Intrinsic::nvvm_suld_2d_array_i32_zero:
4138 case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
4139 case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
4140 case Intrinsic::nvvm_suld_3d_i32_zero:
4141 case Intrinsic::nvvm_suld_3d_v2i32_zero:
4142 case Intrinsic::nvvm_suld_3d_v4i32_zero:
4143 Info.opc = getOpcForSurfaceInstr(Intrinsic);
4144 Info.memVT = MVT::i32;
4145 Info.ptrVal = nullptr;
4146 Info.offset = 0;
4147 Info.flags = MachineMemOperand::MOLoad;
4148 Info.align = Align(16);
4149 return true;
4150
4151 case Intrinsic::nvvm_suld_1d_i64_clamp:
4152 case Intrinsic::nvvm_suld_1d_v2i64_clamp:
4153 case Intrinsic::nvvm_suld_1d_array_i64_clamp:
4154 case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
4155 case Intrinsic::nvvm_suld_2d_i64_clamp:
4156 case Intrinsic::nvvm_suld_2d_v2i64_clamp:
4157 case Intrinsic::nvvm_suld_2d_array_i64_clamp:
4158 case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
4159 case Intrinsic::nvvm_suld_3d_i64_clamp:
4160 case Intrinsic::nvvm_suld_3d_v2i64_clamp:
4161 case Intrinsic::nvvm_suld_1d_i64_trap:
4162 case Intrinsic::nvvm_suld_1d_v2i64_trap:
4163 case Intrinsic::nvvm_suld_1d_array_i64_trap:
4164 case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
4165 case Intrinsic::nvvm_suld_2d_i64_trap:
4166 case Intrinsic::nvvm_suld_2d_v2i64_trap:
4167 case Intrinsic::nvvm_suld_2d_array_i64_trap:
4168 case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
4169 case Intrinsic::nvvm_suld_3d_i64_trap:
4170 case Intrinsic::nvvm_suld_3d_v2i64_trap:
4171 case Intrinsic::nvvm_suld_1d_i64_zero:
4172 case Intrinsic::nvvm_suld_1d_v2i64_zero:
4173 case Intrinsic::nvvm_suld_1d_array_i64_zero:
4174 case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
4175 case Intrinsic::nvvm_suld_2d_i64_zero:
4176 case Intrinsic::nvvm_suld_2d_v2i64_zero:
4177 case Intrinsic::nvvm_suld_2d_array_i64_zero:
4178 case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
4179 case Intrinsic::nvvm_suld_3d_i64_zero:
4180 case Intrinsic::nvvm_suld_3d_v2i64_zero:
4181 Info.opc = getOpcForSurfaceInstr(Intrinsic);
4182 Info.memVT = MVT::i64;
4183 Info.ptrVal = nullptr;
4184 Info.offset = 0;
4185 Info.flags = MachineMemOperand::MOLoad;
4186 Info.align = Align(16);
4187 return true;
4188 }
4189 return false;
4190 }
4191
4192 /// isLegalAddressingMode - Return true if the addressing mode represented
4193 /// by AM is legal for this target, for a load/store of the specified type.
4194 /// Used to guide target specific optimizations, like loop strength reduction
4195 /// (LoopStrengthReduce.cpp) and memory optimization for address mode
4196 /// (CodeGenPrepare.cpp)
isLegalAddressingMode(const DataLayout & DL,const AddrMode & AM,Type * Ty,unsigned AS,Instruction * I) const4197 bool NVPTXTargetLowering::isLegalAddressingMode(const DataLayout &DL,
4198 const AddrMode &AM, Type *Ty,
4199 unsigned AS, Instruction *I) const {
4200 // AddrMode - This represents an addressing mode of:
4201 // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
4202 //
4203 // The legal address modes are
4204 // - [avar]
4205 // - [areg]
4206 // - [areg+immoff]
4207 // - [immAddr]
4208
4209 if (AM.BaseGV) {
4210 return !AM.BaseOffs && !AM.HasBaseReg && !AM.Scale;
4211 }
4212
4213 switch (AM.Scale) {
4214 case 0: // "r", "r+i" or "i" is allowed
4215 break;
4216 case 1:
4217 if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
4218 return false;
4219 // Otherwise we have r+i.
4220 break;
4221 default:
4222 // No scale > 1 is allowed
4223 return false;
4224 }
4225 return true;
4226 }
4227
4228 //===----------------------------------------------------------------------===//
4229 // NVPTX Inline Assembly Support
4230 //===----------------------------------------------------------------------===//
4231
4232 /// getConstraintType - Given a constraint letter, return the type of
4233 /// constraint it is for this target.
4234 NVPTXTargetLowering::ConstraintType
getConstraintType(StringRef Constraint) const4235 NVPTXTargetLowering::getConstraintType(StringRef Constraint) const {
4236 if (Constraint.size() == 1) {
4237 switch (Constraint[0]) {
4238 default:
4239 break;
4240 case 'b':
4241 case 'r':
4242 case 'h':
4243 case 'c':
4244 case 'l':
4245 case 'f':
4246 case 'd':
4247 case '0':
4248 case 'N':
4249 return C_RegisterClass;
4250 }
4251 }
4252 return TargetLowering::getConstraintType(Constraint);
4253 }
4254
4255 std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo * TRI,StringRef Constraint,MVT VT) const4256 NVPTXTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
4257 StringRef Constraint,
4258 MVT VT) const {
4259 if (Constraint.size() == 1) {
4260 switch (Constraint[0]) {
4261 case 'b':
4262 return std::make_pair(0U, &NVPTX::Int1RegsRegClass);
4263 case 'c':
4264 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
4265 case 'h':
4266 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
4267 case 'r':
4268 return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
4269 case 'l':
4270 case 'N':
4271 return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
4272 case 'f':
4273 return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
4274 case 'd':
4275 return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
4276 }
4277 }
4278 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
4279 }
4280
4281 //===----------------------------------------------------------------------===//
4282 // NVPTX DAG Combining
4283 //===----------------------------------------------------------------------===//
4284
allowFMA(MachineFunction & MF,CodeGenOpt::Level OptLevel) const4285 bool NVPTXTargetLowering::allowFMA(MachineFunction &MF,
4286 CodeGenOpt::Level OptLevel) const {
4287 // Always honor command-line argument
4288 if (FMAContractLevelOpt.getNumOccurrences() > 0)
4289 return FMAContractLevelOpt > 0;
4290
4291 // Do not contract if we're not optimizing the code.
4292 if (OptLevel == 0)
4293 return false;
4294
4295 // Honor TargetOptions flags that explicitly say fusion is okay.
4296 if (MF.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast)
4297 return true;
4298
4299 return allowUnsafeFPMath(MF);
4300 }
4301
allowUnsafeFPMath(MachineFunction & MF) const4302 bool NVPTXTargetLowering::allowUnsafeFPMath(MachineFunction &MF) const {
4303 // Honor TargetOptions flags that explicitly say unsafe math is okay.
4304 if (MF.getTarget().Options.UnsafeFPMath)
4305 return true;
4306
4307 // Allow unsafe math if unsafe-fp-math attribute explicitly says so.
4308 const Function &F = MF.getFunction();
4309 if (F.hasFnAttribute("unsafe-fp-math")) {
4310 Attribute Attr = F.getFnAttribute("unsafe-fp-math");
4311 StringRef Val = Attr.getValueAsString();
4312 if (Val == "true")
4313 return true;
4314 }
4315
4316 return false;
4317 }
4318
4319 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
4320 /// operands N0 and N1. This is a helper for PerformADDCombine that is
4321 /// called with the default operands, and if that fails, with commuted
4322 /// operands.
PerformADDCombineWithOperands(SDNode * N,SDValue N0,SDValue N1,TargetLowering::DAGCombinerInfo & DCI,const NVPTXSubtarget & Subtarget,CodeGenOpt::Level OptLevel)4323 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
4324 TargetLowering::DAGCombinerInfo &DCI,
4325 const NVPTXSubtarget &Subtarget,
4326 CodeGenOpt::Level OptLevel) {
4327 SelectionDAG &DAG = DCI.DAG;
4328 // Skip non-integer, non-scalar case
4329 EVT VT=N0.getValueType();
4330 if (VT.isVector())
4331 return SDValue();
4332
4333 // fold (add (mul a, b), c) -> (mad a, b, c)
4334 //
4335 if (N0.getOpcode() == ISD::MUL) {
4336 assert (VT.isInteger());
4337 // For integer:
4338 // Since integer multiply-add costs the same as integer multiply
4339 // but is more costly than integer add, do the fusion only when
4340 // the mul is only used in the add.
4341 if (OptLevel==CodeGenOpt::None || VT != MVT::i32 ||
4342 !N0.getNode()->hasOneUse())
4343 return SDValue();
4344
4345 // Do the folding
4346 return DAG.getNode(NVPTXISD::IMAD, SDLoc(N), VT,
4347 N0.getOperand(0), N0.getOperand(1), N1);
4348 }
4349 else if (N0.getOpcode() == ISD::FMUL) {
4350 if (VT == MVT::f32 || VT == MVT::f64) {
4351 const auto *TLI = static_cast<const NVPTXTargetLowering *>(
4352 &DAG.getTargetLoweringInfo());
4353 if (!TLI->allowFMA(DAG.getMachineFunction(), OptLevel))
4354 return SDValue();
4355
4356 // For floating point:
4357 // Do the fusion only when the mul has less than 5 uses and all
4358 // are add.
4359 // The heuristic is that if a use is not an add, then that use
4360 // cannot be fused into fma, therefore mul is still needed anyway.
4361 // If there are more than 4 uses, even if they are all add, fusing
4362 // them will increase register pressue.
4363 //
4364 int numUses = 0;
4365 int nonAddCount = 0;
4366 for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
4367 UE = N0.getNode()->use_end();
4368 UI != UE; ++UI) {
4369 numUses++;
4370 SDNode *User = *UI;
4371 if (User->getOpcode() != ISD::FADD)
4372 ++nonAddCount;
4373 }
4374 if (numUses >= 5)
4375 return SDValue();
4376 if (nonAddCount) {
4377 int orderNo = N->getIROrder();
4378 int orderNo2 = N0.getNode()->getIROrder();
4379 // simple heuristics here for considering potential register
4380 // pressure, the logics here is that the differnce are used
4381 // to measure the distance between def and use, the longer distance
4382 // more likely cause register pressure.
4383 if (orderNo - orderNo2 < 500)
4384 return SDValue();
4385
4386 // Now, check if at least one of the FMUL's operands is live beyond the node N,
4387 // which guarantees that the FMA will not increase register pressure at node N.
4388 bool opIsLive = false;
4389 const SDNode *left = N0.getOperand(0).getNode();
4390 const SDNode *right = N0.getOperand(1).getNode();
4391
4392 if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right))
4393 opIsLive = true;
4394
4395 if (!opIsLive)
4396 for (SDNode::use_iterator UI = left->use_begin(), UE = left->use_end(); UI != UE; ++UI) {
4397 SDNode *User = *UI;
4398 int orderNo3 = User->getIROrder();
4399 if (orderNo3 > orderNo) {
4400 opIsLive = true;
4401 break;
4402 }
4403 }
4404
4405 if (!opIsLive)
4406 for (SDNode::use_iterator UI = right->use_begin(), UE = right->use_end(); UI != UE; ++UI) {
4407 SDNode *User = *UI;
4408 int orderNo3 = User->getIROrder();
4409 if (orderNo3 > orderNo) {
4410 opIsLive = true;
4411 break;
4412 }
4413 }
4414
4415 if (!opIsLive)
4416 return SDValue();
4417 }
4418
4419 return DAG.getNode(ISD::FMA, SDLoc(N), VT,
4420 N0.getOperand(0), N0.getOperand(1), N1);
4421 }
4422 }
4423
4424 return SDValue();
4425 }
4426
4427 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
4428 ///
PerformADDCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const NVPTXSubtarget & Subtarget,CodeGenOpt::Level OptLevel)4429 static SDValue PerformADDCombine(SDNode *N,
4430 TargetLowering::DAGCombinerInfo &DCI,
4431 const NVPTXSubtarget &Subtarget,
4432 CodeGenOpt::Level OptLevel) {
4433 SDValue N0 = N->getOperand(0);
4434 SDValue N1 = N->getOperand(1);
4435
4436 // First try with the default operand order.
4437 if (SDValue Result =
4438 PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget, OptLevel))
4439 return Result;
4440
4441 // If that didn't work, try again with the operands commuted.
4442 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget, OptLevel);
4443 }
4444
PerformANDCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)4445 static SDValue PerformANDCombine(SDNode *N,
4446 TargetLowering::DAGCombinerInfo &DCI) {
4447 // The type legalizer turns a vector load of i8 values into a zextload to i16
4448 // registers, optionally ANY_EXTENDs it (if target type is integer),
4449 // and ANDs off the high 8 bits. Since we turn this load into a
4450 // target-specific DAG node, the DAG combiner fails to eliminate these AND
4451 // nodes. Do that here.
4452 SDValue Val = N->getOperand(0);
4453 SDValue Mask = N->getOperand(1);
4454
4455 if (isa<ConstantSDNode>(Val)) {
4456 std::swap(Val, Mask);
4457 }
4458
4459 SDValue AExt;
4460 // Generally, we will see zextload -> IMOV16rr -> ANY_EXTEND -> and
4461 if (Val.getOpcode() == ISD::ANY_EXTEND) {
4462 AExt = Val;
4463 Val = Val->getOperand(0);
4464 }
4465
4466 if (Val->isMachineOpcode() && Val->getMachineOpcode() == NVPTX::IMOV16rr) {
4467 Val = Val->getOperand(0);
4468 }
4469
4470 if (Val->getOpcode() == NVPTXISD::LoadV2 ||
4471 Val->getOpcode() == NVPTXISD::LoadV4) {
4472 ConstantSDNode *MaskCnst = dyn_cast<ConstantSDNode>(Mask);
4473 if (!MaskCnst) {
4474 // Not an AND with a constant
4475 return SDValue();
4476 }
4477
4478 uint64_t MaskVal = MaskCnst->getZExtValue();
4479 if (MaskVal != 0xff) {
4480 // Not an AND that chops off top 8 bits
4481 return SDValue();
4482 }
4483
4484 MemSDNode *Mem = dyn_cast<MemSDNode>(Val);
4485 if (!Mem) {
4486 // Not a MemSDNode?!?
4487 return SDValue();
4488 }
4489
4490 EVT MemVT = Mem->getMemoryVT();
4491 if (MemVT != MVT::v2i8 && MemVT != MVT::v4i8) {
4492 // We only handle the i8 case
4493 return SDValue();
4494 }
4495
4496 unsigned ExtType =
4497 cast<ConstantSDNode>(Val->getOperand(Val->getNumOperands()-1))->
4498 getZExtValue();
4499 if (ExtType == ISD::SEXTLOAD) {
4500 // If for some reason the load is a sextload, the and is needed to zero
4501 // out the high 8 bits
4502 return SDValue();
4503 }
4504
4505 bool AddTo = false;
4506 if (AExt.getNode() != nullptr) {
4507 // Re-insert the ext as a zext.
4508 Val = DCI.DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),
4509 AExt.getValueType(), Val);
4510 AddTo = true;
4511 }
4512
4513 // If we get here, the AND is unnecessary. Just replace it with the load
4514 DCI.CombineTo(N, Val, AddTo);
4515 }
4516
4517 return SDValue();
4518 }
4519
PerformREMCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,CodeGenOpt::Level OptLevel)4520 static SDValue PerformREMCombine(SDNode *N,
4521 TargetLowering::DAGCombinerInfo &DCI,
4522 CodeGenOpt::Level OptLevel) {
4523 assert(N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM);
4524
4525 // Don't do anything at less than -O2.
4526 if (OptLevel < CodeGenOpt::Default)
4527 return SDValue();
4528
4529 SelectionDAG &DAG = DCI.DAG;
4530 SDLoc DL(N);
4531 EVT VT = N->getValueType(0);
4532 bool IsSigned = N->getOpcode() == ISD::SREM;
4533 unsigned DivOpc = IsSigned ? ISD::SDIV : ISD::UDIV;
4534
4535 const SDValue &Num = N->getOperand(0);
4536 const SDValue &Den = N->getOperand(1);
4537
4538 for (const SDNode *U : Num->uses()) {
4539 if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&
4540 U->getOperand(1) == Den) {
4541 // Num % Den -> Num - (Num / Den) * Den
4542 return DAG.getNode(ISD::SUB, DL, VT, Num,
4543 DAG.getNode(ISD::MUL, DL, VT,
4544 DAG.getNode(DivOpc, DL, VT, Num, Den),
4545 Den));
4546 }
4547 }
4548 return SDValue();
4549 }
4550
4551 enum OperandSignedness {
4552 Signed = 0,
4553 Unsigned,
4554 Unknown
4555 };
4556
4557 /// IsMulWideOperandDemotable - Checks if the provided DAG node is an operand
4558 /// that can be demoted to \p OptSize bits without loss of information. The
4559 /// signedness of the operand, if determinable, is placed in \p S.
IsMulWideOperandDemotable(SDValue Op,unsigned OptSize,OperandSignedness & S)4560 static bool IsMulWideOperandDemotable(SDValue Op,
4561 unsigned OptSize,
4562 OperandSignedness &S) {
4563 S = Unknown;
4564
4565 if (Op.getOpcode() == ISD::SIGN_EXTEND ||
4566 Op.getOpcode() == ISD::SIGN_EXTEND_INREG) {
4567 EVT OrigVT = Op.getOperand(0).getValueType();
4568 if (OrigVT.getFixedSizeInBits() <= OptSize) {
4569 S = Signed;
4570 return true;
4571 }
4572 } else if (Op.getOpcode() == ISD::ZERO_EXTEND) {
4573 EVT OrigVT = Op.getOperand(0).getValueType();
4574 if (OrigVT.getFixedSizeInBits() <= OptSize) {
4575 S = Unsigned;
4576 return true;
4577 }
4578 }
4579
4580 return false;
4581 }
4582
4583 /// AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can
4584 /// be demoted to \p OptSize bits without loss of information. If the operands
4585 /// contain a constant, it should appear as the RHS operand. The signedness of
4586 /// the operands is placed in \p IsSigned.
AreMulWideOperandsDemotable(SDValue LHS,SDValue RHS,unsigned OptSize,bool & IsSigned)4587 static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS,
4588 unsigned OptSize,
4589 bool &IsSigned) {
4590 OperandSignedness LHSSign;
4591
4592 // The LHS operand must be a demotable op
4593 if (!IsMulWideOperandDemotable(LHS, OptSize, LHSSign))
4594 return false;
4595
4596 // We should have been able to determine the signedness from the LHS
4597 if (LHSSign == Unknown)
4598 return false;
4599
4600 IsSigned = (LHSSign == Signed);
4601
4602 // The RHS can be a demotable op or a constant
4603 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(RHS)) {
4604 const APInt &Val = CI->getAPIntValue();
4605 if (LHSSign == Unsigned) {
4606 return Val.isIntN(OptSize);
4607 } else {
4608 return Val.isSignedIntN(OptSize);
4609 }
4610 } else {
4611 OperandSignedness RHSSign;
4612 if (!IsMulWideOperandDemotable(RHS, OptSize, RHSSign))
4613 return false;
4614
4615 return LHSSign == RHSSign;
4616 }
4617 }
4618
4619 /// TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply
4620 /// of M/2 bits that produces an M-bit result (i.e. mul.wide). This transform
4621 /// works on both multiply DAG nodes and SHL DAG nodes with a constant shift
4622 /// amount.
TryMULWIDECombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)4623 static SDValue TryMULWIDECombine(SDNode *N,
4624 TargetLowering::DAGCombinerInfo &DCI) {
4625 EVT MulType = N->getValueType(0);
4626 if (MulType != MVT::i32 && MulType != MVT::i64) {
4627 return SDValue();
4628 }
4629
4630 SDLoc DL(N);
4631 unsigned OptSize = MulType.getSizeInBits() >> 1;
4632 SDValue LHS = N->getOperand(0);
4633 SDValue RHS = N->getOperand(1);
4634
4635 // Canonicalize the multiply so the constant (if any) is on the right
4636 if (N->getOpcode() == ISD::MUL) {
4637 if (isa<ConstantSDNode>(LHS)) {
4638 std::swap(LHS, RHS);
4639 }
4640 }
4641
4642 // If we have a SHL, determine the actual multiply amount
4643 if (N->getOpcode() == ISD::SHL) {
4644 ConstantSDNode *ShlRHS = dyn_cast<ConstantSDNode>(RHS);
4645 if (!ShlRHS) {
4646 return SDValue();
4647 }
4648
4649 APInt ShiftAmt = ShlRHS->getAPIntValue();
4650 unsigned BitWidth = MulType.getSizeInBits();
4651 if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) {
4652 APInt MulVal = APInt(BitWidth, 1) << ShiftAmt;
4653 RHS = DCI.DAG.getConstant(MulVal, DL, MulType);
4654 } else {
4655 return SDValue();
4656 }
4657 }
4658
4659 bool Signed;
4660 // Verify that our operands are demotable
4661 if (!AreMulWideOperandsDemotable(LHS, RHS, OptSize, Signed)) {
4662 return SDValue();
4663 }
4664
4665 EVT DemotedVT;
4666 if (MulType == MVT::i32) {
4667 DemotedVT = MVT::i16;
4668 } else {
4669 DemotedVT = MVT::i32;
4670 }
4671
4672 // Truncate the operands to the correct size. Note that these are just for
4673 // type consistency and will (likely) be eliminated in later phases.
4674 SDValue TruncLHS =
4675 DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, LHS);
4676 SDValue TruncRHS =
4677 DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, RHS);
4678
4679 unsigned Opc;
4680 if (Signed) {
4681 Opc = NVPTXISD::MUL_WIDE_SIGNED;
4682 } else {
4683 Opc = NVPTXISD::MUL_WIDE_UNSIGNED;
4684 }
4685
4686 return DCI.DAG.getNode(Opc, DL, MulType, TruncLHS, TruncRHS);
4687 }
4688
4689 /// PerformMULCombine - Runs PTX-specific DAG combine patterns on MUL nodes.
PerformMULCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,CodeGenOpt::Level OptLevel)4690 static SDValue PerformMULCombine(SDNode *N,
4691 TargetLowering::DAGCombinerInfo &DCI,
4692 CodeGenOpt::Level OptLevel) {
4693 if (OptLevel > 0) {
4694 // Try mul.wide combining at OptLevel > 0
4695 if (SDValue Ret = TryMULWIDECombine(N, DCI))
4696 return Ret;
4697 }
4698
4699 return SDValue();
4700 }
4701
4702 /// PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
PerformSHLCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,CodeGenOpt::Level OptLevel)4703 static SDValue PerformSHLCombine(SDNode *N,
4704 TargetLowering::DAGCombinerInfo &DCI,
4705 CodeGenOpt::Level OptLevel) {
4706 if (OptLevel > 0) {
4707 // Try mul.wide combining at OptLevel > 0
4708 if (SDValue Ret = TryMULWIDECombine(N, DCI))
4709 return Ret;
4710 }
4711
4712 return SDValue();
4713 }
4714
PerformSETCCCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)4715 static SDValue PerformSETCCCombine(SDNode *N,
4716 TargetLowering::DAGCombinerInfo &DCI) {
4717 EVT CCType = N->getValueType(0);
4718 SDValue A = N->getOperand(0);
4719 SDValue B = N->getOperand(1);
4720
4721 if (CCType != MVT::v2i1 || A.getValueType() != MVT::v2f16)
4722 return SDValue();
4723
4724 SDLoc DL(N);
4725 // setp.f16x2 returns two scalar predicates, which we need to
4726 // convert back to v2i1. The returned result will be scalarized by
4727 // the legalizer, but the comparison will remain a single vector
4728 // instruction.
4729 SDValue CCNode = DCI.DAG.getNode(NVPTXISD::SETP_F16X2, DL,
4730 DCI.DAG.getVTList(MVT::i1, MVT::i1),
4731 {A, B, N->getOperand(2)});
4732 return DCI.DAG.getNode(ISD::BUILD_VECTOR, DL, CCType, CCNode.getValue(0),
4733 CCNode.getValue(1));
4734 }
4735
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const4736 SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
4737 DAGCombinerInfo &DCI) const {
4738 CodeGenOpt::Level OptLevel = getTargetMachine().getOptLevel();
4739 switch (N->getOpcode()) {
4740 default: break;
4741 case ISD::ADD:
4742 case ISD::FADD:
4743 return PerformADDCombine(N, DCI, STI, OptLevel);
4744 case ISD::MUL:
4745 return PerformMULCombine(N, DCI, OptLevel);
4746 case ISD::SHL:
4747 return PerformSHLCombine(N, DCI, OptLevel);
4748 case ISD::AND:
4749 return PerformANDCombine(N, DCI);
4750 case ISD::UREM:
4751 case ISD::SREM:
4752 return PerformREMCombine(N, DCI, OptLevel);
4753 case ISD::SETCC:
4754 return PerformSETCCCombine(N, DCI);
4755 }
4756 return SDValue();
4757 }
4758
4759 /// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
ReplaceLoadVector(SDNode * N,SelectionDAG & DAG,SmallVectorImpl<SDValue> & Results)4760 static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
4761 SmallVectorImpl<SDValue> &Results) {
4762 EVT ResVT = N->getValueType(0);
4763 SDLoc DL(N);
4764
4765 assert(ResVT.isVector() && "Vector load must have vector type");
4766
4767 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
4768 // legal. We can (and should) split that into 2 loads of <2 x double> here
4769 // but I'm leaving that as a TODO for now.
4770 assert(ResVT.isSimple() && "Can only handle simple types");
4771 switch (ResVT.getSimpleVT().SimpleTy) {
4772 default:
4773 return;
4774 case MVT::v2i8:
4775 case MVT::v2i16:
4776 case MVT::v2i32:
4777 case MVT::v2i64:
4778 case MVT::v2f16:
4779 case MVT::v2f32:
4780 case MVT::v2f64:
4781 case MVT::v4i8:
4782 case MVT::v4i16:
4783 case MVT::v4i32:
4784 case MVT::v4f16:
4785 case MVT::v4f32:
4786 case MVT::v8f16: // <4 x f16x2>
4787 // This is a "native" vector type
4788 break;
4789 }
4790
4791 LoadSDNode *LD = cast<LoadSDNode>(N);
4792
4793 Align Alignment = LD->getAlign();
4794 auto &TD = DAG.getDataLayout();
4795 Align PrefAlign = TD.getPrefTypeAlign(ResVT.getTypeForEVT(*DAG.getContext()));
4796 if (Alignment < PrefAlign) {
4797 // This load is not sufficiently aligned, so bail out and let this vector
4798 // load be scalarized. Note that we may still be able to emit smaller
4799 // vector loads. For example, if we are loading a <4 x float> with an
4800 // alignment of 8, this check will fail but the legalizer will try again
4801 // with 2 x <2 x float>, which will succeed with an alignment of 8.
4802 return;
4803 }
4804
4805 EVT EltVT = ResVT.getVectorElementType();
4806 unsigned NumElts = ResVT.getVectorNumElements();
4807
4808 // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
4809 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
4810 // loaded type to i16 and propagate the "real" type as the memory type.
4811 bool NeedTrunc = false;
4812 if (EltVT.getSizeInBits() < 16) {
4813 EltVT = MVT::i16;
4814 NeedTrunc = true;
4815 }
4816
4817 unsigned Opcode = 0;
4818 SDVTList LdResVTs;
4819 bool LoadF16x2 = false;
4820
4821 switch (NumElts) {
4822 default:
4823 return;
4824 case 2:
4825 Opcode = NVPTXISD::LoadV2;
4826 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
4827 break;
4828 case 4: {
4829 Opcode = NVPTXISD::LoadV4;
4830 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
4831 LdResVTs = DAG.getVTList(ListVTs);
4832 break;
4833 }
4834 case 8: {
4835 // v8f16 is a special case. PTX doesn't have ld.v8.f16
4836 // instruction. Instead, we split the vector into v2f16 chunks and
4837 // load them with ld.v4.b32.
4838 assert(EltVT == MVT::f16 && "Unsupported v8 vector type.");
4839 LoadF16x2 = true;
4840 Opcode = NVPTXISD::LoadV4;
4841 EVT ListVTs[] = {MVT::v2f16, MVT::v2f16, MVT::v2f16, MVT::v2f16,
4842 MVT::Other};
4843 LdResVTs = DAG.getVTList(ListVTs);
4844 break;
4845 }
4846 }
4847
4848 // Copy regular operands
4849 SmallVector<SDValue, 8> OtherOps(N->op_begin(), N->op_end());
4850
4851 // The select routine does not have access to the LoadSDNode instance, so
4852 // pass along the extension information
4853 OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType(), DL));
4854
4855 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
4856 LD->getMemoryVT(),
4857 LD->getMemOperand());
4858
4859 SmallVector<SDValue, 8> ScalarRes;
4860 if (LoadF16x2) {
4861 // Split v2f16 subvectors back into individual elements.
4862 NumElts /= 2;
4863 for (unsigned i = 0; i < NumElts; ++i) {
4864 SDValue SubVector = NewLD.getValue(i);
4865 SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, SubVector,
4866 DAG.getIntPtrConstant(0, DL));
4867 SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, SubVector,
4868 DAG.getIntPtrConstant(1, DL));
4869 ScalarRes.push_back(E0);
4870 ScalarRes.push_back(E1);
4871 }
4872 } else {
4873 for (unsigned i = 0; i < NumElts; ++i) {
4874 SDValue Res = NewLD.getValue(i);
4875 if (NeedTrunc)
4876 Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
4877 ScalarRes.push_back(Res);
4878 }
4879 }
4880
4881 SDValue LoadChain = NewLD.getValue(NumElts);
4882
4883 SDValue BuildVec = DAG.getBuildVector(ResVT, DL, ScalarRes);
4884
4885 Results.push_back(BuildVec);
4886 Results.push_back(LoadChain);
4887 }
4888
ReplaceINTRINSIC_W_CHAIN(SDNode * N,SelectionDAG & DAG,SmallVectorImpl<SDValue> & Results)4889 static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
4890 SmallVectorImpl<SDValue> &Results) {
4891 SDValue Chain = N->getOperand(0);
4892 SDValue Intrin = N->getOperand(1);
4893 SDLoc DL(N);
4894
4895 // Get the intrinsic ID
4896 unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
4897 switch (IntrinNo) {
4898 default:
4899 return;
4900 case Intrinsic::nvvm_ldg_global_i:
4901 case Intrinsic::nvvm_ldg_global_f:
4902 case Intrinsic::nvvm_ldg_global_p:
4903 case Intrinsic::nvvm_ldu_global_i:
4904 case Intrinsic::nvvm_ldu_global_f:
4905 case Intrinsic::nvvm_ldu_global_p: {
4906 EVT ResVT = N->getValueType(0);
4907
4908 if (ResVT.isVector()) {
4909 // Vector LDG/LDU
4910
4911 unsigned NumElts = ResVT.getVectorNumElements();
4912 EVT EltVT = ResVT.getVectorElementType();
4913
4914 // Since LDU/LDG are target nodes, we cannot rely on DAG type
4915 // legalization.
4916 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
4917 // loaded type to i16 and propagate the "real" type as the memory type.
4918 bool NeedTrunc = false;
4919 if (EltVT.getSizeInBits() < 16) {
4920 EltVT = MVT::i16;
4921 NeedTrunc = true;
4922 }
4923
4924 unsigned Opcode = 0;
4925 SDVTList LdResVTs;
4926
4927 switch (NumElts) {
4928 default:
4929 return;
4930 case 2:
4931 switch (IntrinNo) {
4932 default:
4933 return;
4934 case Intrinsic::nvvm_ldg_global_i:
4935 case Intrinsic::nvvm_ldg_global_f:
4936 case Intrinsic::nvvm_ldg_global_p:
4937 Opcode = NVPTXISD::LDGV2;
4938 break;
4939 case Intrinsic::nvvm_ldu_global_i:
4940 case Intrinsic::nvvm_ldu_global_f:
4941 case Intrinsic::nvvm_ldu_global_p:
4942 Opcode = NVPTXISD::LDUV2;
4943 break;
4944 }
4945 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
4946 break;
4947 case 4: {
4948 switch (IntrinNo) {
4949 default:
4950 return;
4951 case Intrinsic::nvvm_ldg_global_i:
4952 case Intrinsic::nvvm_ldg_global_f:
4953 case Intrinsic::nvvm_ldg_global_p:
4954 Opcode = NVPTXISD::LDGV4;
4955 break;
4956 case Intrinsic::nvvm_ldu_global_i:
4957 case Intrinsic::nvvm_ldu_global_f:
4958 case Intrinsic::nvvm_ldu_global_p:
4959 Opcode = NVPTXISD::LDUV4;
4960 break;
4961 }
4962 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
4963 LdResVTs = DAG.getVTList(ListVTs);
4964 break;
4965 }
4966 }
4967
4968 SmallVector<SDValue, 8> OtherOps;
4969
4970 // Copy regular operands
4971
4972 OtherOps.push_back(Chain); // Chain
4973 // Skip operand 1 (intrinsic ID)
4974 // Others
4975 OtherOps.append(N->op_begin() + 2, N->op_end());
4976
4977 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
4978
4979 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
4980 MemSD->getMemoryVT(),
4981 MemSD->getMemOperand());
4982
4983 SmallVector<SDValue, 4> ScalarRes;
4984
4985 for (unsigned i = 0; i < NumElts; ++i) {
4986 SDValue Res = NewLD.getValue(i);
4987 if (NeedTrunc)
4988 Res =
4989 DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
4990 ScalarRes.push_back(Res);
4991 }
4992
4993 SDValue LoadChain = NewLD.getValue(NumElts);
4994
4995 SDValue BuildVec =
4996 DAG.getBuildVector(ResVT, DL, ScalarRes);
4997
4998 Results.push_back(BuildVec);
4999 Results.push_back(LoadChain);
5000 } else {
5001 // i8 LDG/LDU
5002 assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
5003 "Custom handling of non-i8 ldu/ldg?");
5004
5005 // Just copy all operands as-is
5006 SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
5007
5008 // Force output to i16
5009 SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
5010
5011 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
5012
5013 // We make sure the memory type is i8, which will be used during isel
5014 // to select the proper instruction.
5015 SDValue NewLD =
5016 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, Ops,
5017 MVT::i8, MemSD->getMemOperand());
5018
5019 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
5020 NewLD.getValue(0)));
5021 Results.push_back(NewLD.getValue(1));
5022 }
5023 }
5024 }
5025 }
5026
ReplaceNodeResults(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const5027 void NVPTXTargetLowering::ReplaceNodeResults(
5028 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
5029 switch (N->getOpcode()) {
5030 default:
5031 report_fatal_error("Unhandled custom legalization");
5032 case ISD::LOAD:
5033 ReplaceLoadVector(N, DAG, Results);
5034 return;
5035 case ISD::INTRINSIC_W_CHAIN:
5036 ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);
5037 return;
5038 }
5039 }
5040
5041 // Pin NVPTXTargetObjectFile's vtables to this file.
~NVPTXTargetObjectFile()5042 NVPTXTargetObjectFile::~NVPTXTargetObjectFile() {}
5043
SelectSectionForGlobal(const GlobalObject * GO,SectionKind Kind,const TargetMachine & TM) const5044 MCSection *NVPTXTargetObjectFile::SelectSectionForGlobal(
5045 const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const {
5046 return getDataSection();
5047 }
5048