1 //===- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // \file
10 // This file implements a TargetTransformInfo analysis pass specific to the
11 // AMDGPU target machine. It uses the target's detailed information to provide
12 // more precise answers to certain TTI queries, while letting the target
13 // independent and default TTI implementations handle the rest.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "AMDGPUTargetTransformInfo.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/IR/IntrinsicsAMDGPU.h"
22 #include "llvm/IR/PatternMatch.h"
23 #include "llvm/Support/KnownBits.h"
24 
25 using namespace llvm;
26 
27 #define DEBUG_TYPE "AMDGPUtti"
28 
29 static cl::opt<unsigned> UnrollThresholdPrivate(
30   "amdgpu-unroll-threshold-private",
31   cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"),
32   cl::init(2700), cl::Hidden);
33 
34 static cl::opt<unsigned> UnrollThresholdLocal(
35   "amdgpu-unroll-threshold-local",
36   cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"),
37   cl::init(1000), cl::Hidden);
38 
39 static cl::opt<unsigned> UnrollThresholdIf(
40   "amdgpu-unroll-threshold-if",
41   cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"),
42   cl::init(150), cl::Hidden);
43 
44 static cl::opt<bool> UnrollRuntimeLocal(
45   "amdgpu-unroll-runtime-local",
46   cl::desc("Allow runtime unroll for AMDGPU if local memory used in a loop"),
47   cl::init(true), cl::Hidden);
48 
49 static cl::opt<bool> UseLegacyDA(
50   "amdgpu-use-legacy-divergence-analysis",
51   cl::desc("Enable legacy divergence analysis for AMDGPU"),
52   cl::init(false), cl::Hidden);
53 
54 static cl::opt<unsigned> UnrollMaxBlockToAnalyze(
55     "amdgpu-unroll-max-block-to-analyze",
56     cl::desc("Inner loop block size threshold to analyze in unroll for AMDGPU"),
57     cl::init(32), cl::Hidden);
58 
59 static cl::opt<unsigned> ArgAllocaCost("amdgpu-inline-arg-alloca-cost",
60                                        cl::Hidden, cl::init(4000),
61                                        cl::desc("Cost of alloca argument"));
62 
63 // If the amount of scratch memory to eliminate exceeds our ability to allocate
64 // it into registers we gain nothing by aggressively inlining functions for that
65 // heuristic.
66 static cl::opt<unsigned>
67     ArgAllocaCutoff("amdgpu-inline-arg-alloca-cutoff", cl::Hidden,
68                     cl::init(256),
69                     cl::desc("Maximum alloca size to use for inline cost"));
70 
71 // Inliner constraint to achieve reasonable compilation time.
72 static cl::opt<size_t> InlineMaxBB(
73     "amdgpu-inline-max-bb", cl::Hidden, cl::init(1100),
74     cl::desc("Maximum number of BBs allowed in a function after inlining"
75              " (compile time constraint)"));
76 
77 static bool dependsOnLocalPhi(const Loop *L, const Value *Cond,
78                               unsigned Depth = 0) {
79   const Instruction *I = dyn_cast<Instruction>(Cond);
80   if (!I)
81     return false;
82 
83   for (const Value *V : I->operand_values()) {
84     if (!L->contains(I))
85       continue;
86     if (const PHINode *PHI = dyn_cast<PHINode>(V)) {
87       if (llvm::none_of(L->getSubLoops(), [PHI](const Loop* SubLoop) {
88                   return SubLoop->contains(PHI); }))
89         return true;
90     } else if (Depth < 10 && dependsOnLocalPhi(L, V, Depth+1))
91       return true;
92   }
93   return false;
94 }
95 
96 AMDGPUTTIImpl::AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
97     : BaseT(TM, F.getParent()->getDataLayout()),
98       TargetTriple(TM->getTargetTriple()),
99       ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))),
100       TLI(ST->getTargetLowering()) {}
101 
102 void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
103                                             TTI::UnrollingPreferences &UP) {
104   const Function &F = *L->getHeader()->getParent();
105   UP.Threshold = AMDGPU::getIntegerAttribute(F, "amdgpu-unroll-threshold", 300);
106   UP.MaxCount = std::numeric_limits<unsigned>::max();
107   UP.Partial = true;
108 
109   // TODO: Do we want runtime unrolling?
110 
111   // Maximum alloca size than can fit registers. Reserve 16 registers.
112   const unsigned MaxAlloca = (256 - 16) * 4;
113   unsigned ThresholdPrivate = UnrollThresholdPrivate;
114   unsigned ThresholdLocal = UnrollThresholdLocal;
115 
116   // If this loop has the amdgpu.loop.unroll.threshold metadata we will use the
117   // provided threshold value as the default for Threshold
118   if (MDNode *LoopUnrollThreshold =
119           findOptionMDForLoop(L, "amdgpu.loop.unroll.threshold")) {
120     if (LoopUnrollThreshold->getNumOperands() == 2) {
121       ConstantInt *MetaThresholdValue = mdconst::extract_or_null<ConstantInt>(
122           LoopUnrollThreshold->getOperand(1));
123       if (MetaThresholdValue) {
124         // We will also use the supplied value for PartialThreshold for now.
125         // We may introduce additional metadata if it becomes necessary in the
126         // future.
127         UP.Threshold = MetaThresholdValue->getSExtValue();
128         UP.PartialThreshold = UP.Threshold;
129         ThresholdPrivate = std::min(ThresholdPrivate, UP.Threshold);
130         ThresholdLocal = std::min(ThresholdLocal, UP.Threshold);
131       }
132     }
133   }
134 
135   unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal);
136   for (const BasicBlock *BB : L->getBlocks()) {
137     const DataLayout &DL = BB->getModule()->getDataLayout();
138     unsigned LocalGEPsSeen = 0;
139 
140     if (llvm::any_of(L->getSubLoops(), [BB](const Loop* SubLoop) {
141                return SubLoop->contains(BB); }))
142         continue; // Block belongs to an inner loop.
143 
144     for (const Instruction &I : *BB) {
145       // Unroll a loop which contains an "if" statement whose condition
146       // defined by a PHI belonging to the loop. This may help to eliminate
147       // if region and potentially even PHI itself, saving on both divergence
148       // and registers used for the PHI.
149       // Add a small bonus for each of such "if" statements.
150       if (const BranchInst *Br = dyn_cast<BranchInst>(&I)) {
151         if (UP.Threshold < MaxBoost && Br->isConditional()) {
152           BasicBlock *Succ0 = Br->getSuccessor(0);
153           BasicBlock *Succ1 = Br->getSuccessor(1);
154           if ((L->contains(Succ0) && L->isLoopExiting(Succ0)) ||
155               (L->contains(Succ1) && L->isLoopExiting(Succ1)))
156             continue;
157           if (dependsOnLocalPhi(L, Br->getCondition())) {
158             UP.Threshold += UnrollThresholdIf;
159             LLVM_DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold
160                               << " for loop:\n"
161                               << *L << " due to " << *Br << '\n');
162             if (UP.Threshold >= MaxBoost)
163               return;
164           }
165         }
166         continue;
167       }
168 
169       const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
170       if (!GEP)
171         continue;
172 
173       unsigned AS = GEP->getAddressSpace();
174       unsigned Threshold = 0;
175       if (AS == AMDGPUAS::PRIVATE_ADDRESS)
176         Threshold = ThresholdPrivate;
177       else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS)
178         Threshold = ThresholdLocal;
179       else
180         continue;
181 
182       if (UP.Threshold >= Threshold)
183         continue;
184 
185       if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
186         const Value *Ptr = GEP->getPointerOperand();
187         const AllocaInst *Alloca =
188             dyn_cast<AllocaInst>(getUnderlyingObject(Ptr));
189         if (!Alloca || !Alloca->isStaticAlloca())
190           continue;
191         Type *Ty = Alloca->getAllocatedType();
192         unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0;
193         if (AllocaSize > MaxAlloca)
194           continue;
195       } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
196                  AS == AMDGPUAS::REGION_ADDRESS) {
197         LocalGEPsSeen++;
198         // Inhibit unroll for local memory if we have seen addressing not to
199         // a variable, most likely we will be unable to combine it.
200         // Do not unroll too deep inner loops for local memory to give a chance
201         // to unroll an outer loop for a more important reason.
202         if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 ||
203             (!isa<GlobalVariable>(GEP->getPointerOperand()) &&
204              !isa<Argument>(GEP->getPointerOperand())))
205           continue;
206         LLVM_DEBUG(dbgs() << "Allow unroll runtime for loop:\n"
207                           << *L << " due to LDS use.\n");
208         UP.Runtime = UnrollRuntimeLocal;
209       }
210 
211       // Check if GEP depends on a value defined by this loop itself.
212       bool HasLoopDef = false;
213       for (const Value *Op : GEP->operands()) {
214         const Instruction *Inst = dyn_cast<Instruction>(Op);
215         if (!Inst || L->isLoopInvariant(Op))
216           continue;
217 
218         if (llvm::any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) {
219              return SubLoop->contains(Inst); }))
220           continue;
221         HasLoopDef = true;
222         break;
223       }
224       if (!HasLoopDef)
225         continue;
226 
227       // We want to do whatever we can to limit the number of alloca
228       // instructions that make it through to the code generator.  allocas
229       // require us to use indirect addressing, which is slow and prone to
230       // compiler bugs.  If this loop does an address calculation on an
231       // alloca ptr, then we want to use a higher than normal loop unroll
232       // threshold. This will give SROA a better chance to eliminate these
233       // allocas.
234       //
235       // We also want to have more unrolling for local memory to let ds
236       // instructions with different offsets combine.
237       //
238       // Don't use the maximum allowed value here as it will make some
239       // programs way too big.
240       UP.Threshold = Threshold;
241       LLVM_DEBUG(dbgs() << "Set unroll threshold " << Threshold
242                         << " for loop:\n"
243                         << *L << " due to " << *GEP << '\n');
244       if (UP.Threshold >= MaxBoost)
245         return;
246     }
247 
248     // If we got a GEP in a small BB from inner loop then increase max trip
249     // count to analyze for better estimation cost in unroll
250     if (L->isInnermost() && BB->size() < UnrollMaxBlockToAnalyze)
251       UP.MaxIterationsCountToAnalyze = 32;
252   }
253 }
254 
255 void AMDGPUTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
256                                           TTI::PeelingPreferences &PP) {
257   BaseT::getPeelingPreferences(L, SE, PP);
258 }
259 
260 const FeatureBitset GCNTTIImpl::InlineFeatureIgnoreList = {
261     // Codegen control options which don't matter.
262     AMDGPU::FeatureEnableLoadStoreOpt, AMDGPU::FeatureEnableSIScheduler,
263     AMDGPU::FeatureEnableUnsafeDSOffsetFolding, AMDGPU::FeatureFlatForGlobal,
264     AMDGPU::FeaturePromoteAlloca, AMDGPU::FeatureUnalignedScratchAccess,
265     AMDGPU::FeatureUnalignedAccessMode,
266 
267     AMDGPU::FeatureAutoWaitcntBeforeBarrier,
268 
269     // Property of the kernel/environment which can't actually differ.
270     AMDGPU::FeatureSGPRInitBug, AMDGPU::FeatureXNACK,
271     AMDGPU::FeatureTrapHandler,
272 
273     // The default assumption needs to be ecc is enabled, but no directly
274     // exposed operations depend on it, so it can be safely inlined.
275     AMDGPU::FeatureSRAMECC,
276 
277     // Perf-tuning features
278     AMDGPU::FeatureFastFMAF32, AMDGPU::HalfRate64Ops};
279 
280 GCNTTIImpl::GCNTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
281     : BaseT(TM, F.getParent()->getDataLayout()),
282       ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))),
283       TLI(ST->getTargetLowering()), CommonTTI(TM, F),
284       IsGraphics(AMDGPU::isGraphics(F.getCallingConv())),
285       MaxVGPRs(ST->getMaxNumVGPRs(
286           std::max(ST->getWavesPerEU(F).first,
287                    ST->getWavesPerEUForWorkGroup(
288                        ST->getFlatWorkGroupSizes(F).second)))) {
289   AMDGPU::SIModeRegisterDefaults Mode(F);
290   HasFP32Denormals = Mode.allFP32Denormals();
291   HasFP64FP16Denormals = Mode.allFP64FP16Denormals();
292 }
293 
294 unsigned GCNTTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
295   // The concept of vector registers doesn't really exist. Some packed vector
296   // operations operate on the normal 32-bit registers.
297   return MaxVGPRs;
298 }
299 
300 unsigned GCNTTIImpl::getNumberOfRegisters(bool Vec) const {
301   // This is really the number of registers to fill when vectorizing /
302   // interleaving loops, so we lie to avoid trying to use all registers.
303   return getHardwareNumberOfRegisters(Vec) >> 3;
304 }
305 
306 unsigned GCNTTIImpl::getNumberOfRegisters(unsigned RCID) const {
307   const SIRegisterInfo *TRI = ST->getRegisterInfo();
308   const TargetRegisterClass *RC = TRI->getRegClass(RCID);
309   unsigned NumVGPRs = (TRI->getRegSizeInBits(*RC) + 31) / 32;
310   return getHardwareNumberOfRegisters(false) / NumVGPRs;
311 }
312 
313 unsigned GCNTTIImpl::getRegisterBitWidth(bool Vector) const {
314   return 32;
315 }
316 
317 unsigned GCNTTIImpl::getMinVectorRegisterBitWidth() const {
318   return 32;
319 }
320 
321 unsigned GCNTTIImpl::getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
322   if (Opcode == Instruction::Load || Opcode == Instruction::Store)
323     return 32 * 4 / ElemWidth;
324   return (ElemWidth == 16 && ST->has16BitInsts()) ? 2 : 1;
325 }
326 
327 unsigned GCNTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
328                                          unsigned ChainSizeInBytes,
329                                          VectorType *VecTy) const {
330   unsigned VecRegBitWidth = VF * LoadSize;
331   if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32)
332     // TODO: Support element-size less than 32bit?
333     return 128 / LoadSize;
334 
335   return VF;
336 }
337 
338 unsigned GCNTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize,
339                                              unsigned ChainSizeInBytes,
340                                              VectorType *VecTy) const {
341   unsigned VecRegBitWidth = VF * StoreSize;
342   if (VecRegBitWidth > 128)
343     return 128 / StoreSize;
344 
345   return VF;
346 }
347 
348 unsigned GCNTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
349   if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
350       AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
351       AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
352       AddrSpace == AMDGPUAS::BUFFER_FAT_POINTER) {
353     return 512;
354   }
355 
356   if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
357     return 8 * ST->getMaxPrivateElementSize();
358 
359   // Common to flat, global, local and region. Assume for unknown addrspace.
360   return 128;
361 }
362 
363 bool GCNTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
364                                             Align Alignment,
365                                             unsigned AddrSpace) const {
366   // We allow vectorization of flat stores, even though we may need to decompose
367   // them later if they may access private memory. We don't have enough context
368   // here, and legalization can handle it.
369   if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) {
370     return (Alignment >= 4 || ST->hasUnalignedScratchAccess()) &&
371       ChainSizeInBytes <= ST->getMaxPrivateElementSize();
372   }
373   return true;
374 }
375 
376 bool GCNTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
377                                              Align Alignment,
378                                              unsigned AddrSpace) const {
379   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
380 }
381 
382 bool GCNTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
383                                               Align Alignment,
384                                               unsigned AddrSpace) const {
385   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
386 }
387 
388 // FIXME: Really we would like to issue multiple 128-bit loads and stores per
389 // iteration. Should we report a larger size and let it legalize?
390 //
391 // FIXME: Should we use narrower types for local/region, or account for when
392 // unaligned access is legal?
393 //
394 // FIXME: This could use fine tuning and microbenchmarks.
395 Type *GCNTTIImpl::getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
396                                             unsigned SrcAddrSpace,
397                                             unsigned DestAddrSpace,
398                                             unsigned SrcAlign,
399                                             unsigned DestAlign) const {
400   unsigned MinAlign = std::min(SrcAlign, DestAlign);
401 
402   // A (multi-)dword access at an address == 2 (mod 4) will be decomposed by the
403   // hardware into byte accesses. If you assume all alignments are equally
404   // probable, it's more efficient on average to use short accesses for this
405   // case.
406   if (MinAlign == 2)
407     return Type::getInt16Ty(Context);
408 
409   // Not all subtargets have 128-bit DS instructions, and we currently don't
410   // form them by default.
411   if (SrcAddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
412       SrcAddrSpace == AMDGPUAS::REGION_ADDRESS ||
413       DestAddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
414       DestAddrSpace == AMDGPUAS::REGION_ADDRESS) {
415     return FixedVectorType::get(Type::getInt32Ty(Context), 2);
416   }
417 
418   // Global memory works best with 16-byte accesses. Private memory will also
419   // hit this, although they'll be decomposed.
420   return FixedVectorType::get(Type::getInt32Ty(Context), 4);
421 }
422 
423 void GCNTTIImpl::getMemcpyLoopResidualLoweringType(
424   SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
425   unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
426   unsigned SrcAlign, unsigned DestAlign) const {
427   assert(RemainingBytes < 16);
428 
429   unsigned MinAlign = std::min(SrcAlign, DestAlign);
430 
431   if (MinAlign != 2) {
432     Type *I64Ty = Type::getInt64Ty(Context);
433     while (RemainingBytes >= 8) {
434       OpsOut.push_back(I64Ty);
435       RemainingBytes -= 8;
436     }
437 
438     Type *I32Ty = Type::getInt32Ty(Context);
439     while (RemainingBytes >= 4) {
440       OpsOut.push_back(I32Ty);
441       RemainingBytes -= 4;
442     }
443   }
444 
445   Type *I16Ty = Type::getInt16Ty(Context);
446   while (RemainingBytes >= 2) {
447     OpsOut.push_back(I16Ty);
448     RemainingBytes -= 2;
449   }
450 
451   Type *I8Ty = Type::getInt8Ty(Context);
452   while (RemainingBytes) {
453     OpsOut.push_back(I8Ty);
454     --RemainingBytes;
455   }
456 }
457 
458 unsigned GCNTTIImpl::getMaxInterleaveFactor(unsigned VF) {
459   // Disable unrolling if the loop is not vectorized.
460   // TODO: Enable this again.
461   if (VF == 1)
462     return 1;
463 
464   return 8;
465 }
466 
467 bool GCNTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
468                                        MemIntrinsicInfo &Info) const {
469   switch (Inst->getIntrinsicID()) {
470   case Intrinsic::amdgcn_atomic_inc:
471   case Intrinsic::amdgcn_atomic_dec:
472   case Intrinsic::amdgcn_ds_ordered_add:
473   case Intrinsic::amdgcn_ds_ordered_swap:
474   case Intrinsic::amdgcn_ds_fadd:
475   case Intrinsic::amdgcn_ds_fmin:
476   case Intrinsic::amdgcn_ds_fmax: {
477     auto *Ordering = dyn_cast<ConstantInt>(Inst->getArgOperand(2));
478     auto *Volatile = dyn_cast<ConstantInt>(Inst->getArgOperand(4));
479     if (!Ordering || !Volatile)
480       return false; // Invalid.
481 
482     unsigned OrderingVal = Ordering->getZExtValue();
483     if (OrderingVal > static_cast<unsigned>(AtomicOrdering::SequentiallyConsistent))
484       return false;
485 
486     Info.PtrVal = Inst->getArgOperand(0);
487     Info.Ordering = static_cast<AtomicOrdering>(OrderingVal);
488     Info.ReadMem = true;
489     Info.WriteMem = true;
490     Info.IsVolatile = !Volatile->isNullValue();
491     return true;
492   }
493   default:
494     return false;
495   }
496 }
497 
498 int GCNTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
499                                        TTI::TargetCostKind CostKind,
500                                        TTI::OperandValueKind Opd1Info,
501                                        TTI::OperandValueKind Opd2Info,
502                                        TTI::OperandValueProperties Opd1PropInfo,
503                                        TTI::OperandValueProperties Opd2PropInfo,
504                                        ArrayRef<const Value *> Args,
505                                        const Instruction *CxtI) {
506   EVT OrigTy = TLI->getValueType(DL, Ty);
507   if (!OrigTy.isSimple()) {
508     // FIXME: We're having to query the throughput cost so that the basic
509     // implementation tries to generate legalize and scalarization costs. Maybe
510     // we could hoist the scalarization code here?
511     if (CostKind != TTI::TCK_CodeSize)
512       return BaseT::getArithmeticInstrCost(Opcode, Ty, TTI::TCK_RecipThroughput,
513                                            Opd1Info, Opd2Info, Opd1PropInfo,
514                                            Opd2PropInfo, Args, CxtI);
515     // Scalarization
516 
517     // Check if any of the operands are vector operands.
518     int ISD = TLI->InstructionOpcodeToISD(Opcode);
519     assert(ISD && "Invalid opcode");
520 
521     std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
522 
523     bool IsFloat = Ty->isFPOrFPVectorTy();
524     // Assume that floating point arithmetic operations cost twice as much as
525     // integer operations.
526     unsigned OpCost = (IsFloat ? 2 : 1);
527 
528     if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
529       // The operation is legal. Assume it costs 1.
530       // TODO: Once we have extract/insert subvector cost we need to use them.
531       return LT.first * OpCost;
532     }
533 
534     if (!TLI->isOperationExpand(ISD, LT.second)) {
535       // If the operation is custom lowered, then assume that the code is twice
536       // as expensive.
537       return LT.first * 2 * OpCost;
538     }
539 
540     // Else, assume that we need to scalarize this op.
541     // TODO: If one of the types get legalized by splitting, handle this
542     // similarly to what getCastInstrCost() does.
543     if (auto *VTy = dyn_cast<VectorType>(Ty)) {
544       unsigned Num = cast<FixedVectorType>(VTy)->getNumElements();
545       unsigned Cost = getArithmeticInstrCost(
546           Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
547           Opd1PropInfo, Opd2PropInfo, Args, CxtI);
548       // Return the cost of multiple scalar invocation plus the cost of
549       // inserting and extracting the values.
550       return getScalarizationOverhead(VTy, Args) + Num * Cost;
551     }
552 
553     // We don't know anything about this scalar instruction.
554     return OpCost;
555   }
556 
557   // Legalize the type.
558   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
559   int ISD = TLI->InstructionOpcodeToISD(Opcode);
560 
561   // Because we don't have any legal vector operations, but the legal types, we
562   // need to account for split vectors.
563   unsigned NElts = LT.second.isVector() ?
564     LT.second.getVectorNumElements() : 1;
565 
566   MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
567 
568   switch (ISD) {
569   case ISD::SHL:
570   case ISD::SRL:
571   case ISD::SRA:
572     if (SLT == MVT::i64)
573       return get64BitInstrCost(CostKind) * LT.first * NElts;
574 
575     if (ST->has16BitInsts() && SLT == MVT::i16)
576       NElts = (NElts + 1) / 2;
577 
578     // i32
579     return getFullRateInstrCost() * LT.first * NElts;
580   case ISD::ADD:
581   case ISD::SUB:
582   case ISD::AND:
583   case ISD::OR:
584   case ISD::XOR:
585     if (SLT == MVT::i64) {
586       // and, or and xor are typically split into 2 VALU instructions.
587       return 2 * getFullRateInstrCost() * LT.first * NElts;
588     }
589 
590     if (ST->has16BitInsts() && SLT == MVT::i16)
591       NElts = (NElts + 1) / 2;
592 
593     return LT.first * NElts * getFullRateInstrCost();
594   case ISD::MUL: {
595     const int QuarterRateCost = getQuarterRateInstrCost(CostKind);
596     if (SLT == MVT::i64) {
597       const int FullRateCost = getFullRateInstrCost();
598       return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
599     }
600 
601     if (ST->has16BitInsts() && SLT == MVT::i16)
602       NElts = (NElts + 1) / 2;
603 
604     // i32
605     return QuarterRateCost * NElts * LT.first;
606   }
607   case ISD::FMUL:
608     // Check possible fuse {fadd|fsub}(a,fmul(b,c)) and return zero cost for
609     // fmul(b,c) supposing the fadd|fsub will get estimated cost for the whole
610     // fused operation.
611     if (CxtI && CxtI->hasOneUse())
612       if (const auto *FAdd = dyn_cast<BinaryOperator>(*CxtI->user_begin())) {
613         const int OPC = TLI->InstructionOpcodeToISD(FAdd->getOpcode());
614         if (OPC == ISD::FADD || OPC == ISD::FSUB) {
615           if (ST->hasMadMacF32Insts() && SLT == MVT::f32 && !HasFP32Denormals)
616             return TargetTransformInfo::TCC_Free;
617           if (ST->has16BitInsts() && SLT == MVT::f16 && !HasFP64FP16Denormals)
618             return TargetTransformInfo::TCC_Free;
619 
620           // Estimate all types may be fused with contract/unsafe flags
621           const TargetOptions &Options = TLI->getTargetMachine().Options;
622           if (Options.AllowFPOpFusion == FPOpFusion::Fast ||
623               Options.UnsafeFPMath ||
624               (FAdd->hasAllowContract() && CxtI->hasAllowContract()))
625             return TargetTransformInfo::TCC_Free;
626         }
627       }
628     LLVM_FALLTHROUGH;
629   case ISD::FADD:
630   case ISD::FSUB:
631     if (SLT == MVT::f64)
632       return LT.first * NElts * get64BitInstrCost(CostKind);
633 
634     if (ST->has16BitInsts() && SLT == MVT::f16)
635       NElts = (NElts + 1) / 2;
636 
637     if (SLT == MVT::f32 || SLT == MVT::f16)
638       return LT.first * NElts * getFullRateInstrCost();
639     break;
640   case ISD::FDIV:
641   case ISD::FREM:
642     // FIXME: frem should be handled separately. The fdiv in it is most of it,
643     // but the current lowering is also not entirely correct.
644     if (SLT == MVT::f64) {
645       int Cost = 7 * get64BitInstrCost(CostKind) +
646                  getQuarterRateInstrCost(CostKind) +
647                  3 * getHalfRateInstrCost(CostKind);
648       // Add cost of workaround.
649       if (!ST->hasUsableDivScaleConditionOutput())
650         Cost += 3 * getFullRateInstrCost();
651 
652       return LT.first * Cost * NElts;
653     }
654 
655     if (!Args.empty() && match(Args[0], PatternMatch::m_FPOne())) {
656       // TODO: This is more complicated, unsafe flags etc.
657       if ((SLT == MVT::f32 && !HasFP32Denormals) ||
658           (SLT == MVT::f16 && ST->has16BitInsts())) {
659         return LT.first * getQuarterRateInstrCost(CostKind) * NElts;
660       }
661     }
662 
663     if (SLT == MVT::f16 && ST->has16BitInsts()) {
664       // 2 x v_cvt_f32_f16
665       // f32 rcp
666       // f32 fmul
667       // v_cvt_f16_f32
668       // f16 div_fixup
669       int Cost =
670           4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost(CostKind);
671       return LT.first * Cost * NElts;
672     }
673 
674     if (SLT == MVT::f32 || SLT == MVT::f16) {
675       // 4 more v_cvt_* insts without f16 insts support
676       int Cost = (SLT == MVT::f16 ? 14 : 10) * getFullRateInstrCost() +
677                  1 * getQuarterRateInstrCost(CostKind);
678 
679       if (!HasFP32Denormals) {
680         // FP mode switches.
681         Cost += 2 * getFullRateInstrCost();
682       }
683 
684       return LT.first * NElts * Cost;
685     }
686     break;
687   case ISD::FNEG:
688     // Use the backend' estimation. If fneg is not free each element will cost
689     // one additional instruction.
690     return TLI->isFNegFree(SLT) ? 0 : NElts;
691   default:
692     break;
693   }
694 
695   return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info,
696                                        Opd1PropInfo, Opd2PropInfo, Args, CxtI);
697 }
698 
699 // Return true if there's a potential benefit from using v2f16/v2i16
700 // instructions for an intrinsic, even if it requires nontrivial legalization.
701 static bool intrinsicHasPackedVectorBenefit(Intrinsic::ID ID) {
702   switch (ID) {
703   case Intrinsic::fma: // TODO: fmuladd
704   // There's a small benefit to using vector ops in the legalized code.
705   case Intrinsic::round:
706   case Intrinsic::uadd_sat:
707   case Intrinsic::usub_sat:
708   case Intrinsic::sadd_sat:
709   case Intrinsic::ssub_sat:
710     return true;
711   default:
712     return false;
713   }
714 }
715 
716 int GCNTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
717                                       TTI::TargetCostKind CostKind) {
718   if (ICA.getID() == Intrinsic::fabs)
719     return 0;
720 
721   if (!intrinsicHasPackedVectorBenefit(ICA.getID()))
722     return BaseT::getIntrinsicInstrCost(ICA, CostKind);
723 
724   Type *RetTy = ICA.getReturnType();
725   EVT OrigTy = TLI->getValueType(DL, RetTy);
726   if (!OrigTy.isSimple()) {
727     if (CostKind != TTI::TCK_CodeSize)
728       return BaseT::getIntrinsicInstrCost(ICA, CostKind);
729 
730     // TODO: Combine these two logic paths.
731     if (ICA.isTypeBasedOnly())
732       return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
733 
734     Type *RetTy = ICA.getReturnType();
735     unsigned VF = ICA.getVectorFactor().getFixedValue();
736     unsigned RetVF =
737         (RetTy->isVectorTy() ? cast<FixedVectorType>(RetTy)->getNumElements()
738                              : 1);
739     assert((RetVF == 1 || VF == 1) && "VF > 1 and RetVF is a vector type");
740     const IntrinsicInst *I = ICA.getInst();
741     const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
742     FastMathFlags FMF = ICA.getFlags();
743     // Assume that we need to scalarize this intrinsic.
744     SmallVector<Type *, 4> Types;
745     for (const Value *Op : Args) {
746       Type *OpTy = Op->getType();
747       assert(VF == 1 || !OpTy->isVectorTy());
748       Types.push_back(VF == 1 ? OpTy : FixedVectorType::get(OpTy, VF));
749     }
750 
751     if (VF > 1 && !RetTy->isVoidTy())
752       RetTy = FixedVectorType::get(RetTy, VF);
753 
754     // Compute the scalarization overhead based on Args for a vector
755     // intrinsic. A vectorizer will pass a scalar RetTy and VF > 1, while
756     // CostModel will pass a vector RetTy and VF is 1.
757     unsigned ScalarizationCost = std::numeric_limits<unsigned>::max();
758     if (RetVF > 1 || VF > 1) {
759       ScalarizationCost = 0;
760       if (!RetTy->isVoidTy())
761         ScalarizationCost +=
762             getScalarizationOverhead(cast<VectorType>(RetTy), true, false);
763       ScalarizationCost += getOperandsScalarizationOverhead(Args, VF);
764     }
765 
766     IntrinsicCostAttributes Attrs(ICA.getID(), RetTy, Types, FMF,
767                                   ScalarizationCost, I);
768     return getIntrinsicInstrCost(Attrs, CostKind);
769   }
770 
771   // Legalize the type.
772   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
773 
774   unsigned NElts = LT.second.isVector() ?
775     LT.second.getVectorNumElements() : 1;
776 
777   MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
778 
779   if (SLT == MVT::f64)
780     return LT.first * NElts * get64BitInstrCost(CostKind);
781 
782   if (ST->has16BitInsts() && SLT == MVT::f16)
783     NElts = (NElts + 1) / 2;
784 
785   // TODO: Get more refined intrinsic costs?
786   unsigned InstRate = getQuarterRateInstrCost(CostKind);
787   if (ICA.getID() == Intrinsic::fma) {
788     InstRate = ST->hasFastFMAF32() ? getHalfRateInstrCost(CostKind)
789                                    : getQuarterRateInstrCost(CostKind);
790   }
791 
792   return LT.first * NElts * InstRate;
793 }
794 
795 unsigned GCNTTIImpl::getCFInstrCost(unsigned Opcode,
796                                     TTI::TargetCostKind CostKind) {
797   if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency)
798     return Opcode == Instruction::PHI ? 0 : 1;
799 
800   // XXX - For some reason this isn't called for switch.
801   switch (Opcode) {
802   case Instruction::Br:
803   case Instruction::Ret:
804     return 10;
805   default:
806     return BaseT::getCFInstrCost(Opcode, CostKind);
807   }
808 }
809 
810 int GCNTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
811                                            bool IsPairwise,
812                                            TTI::TargetCostKind CostKind) {
813   EVT OrigTy = TLI->getValueType(DL, Ty);
814 
815   // Computes cost on targets that have packed math instructions(which support
816   // 16-bit types only).
817   if (IsPairwise ||
818       !ST->hasVOP3PInsts() ||
819       OrigTy.getScalarSizeInBits() != 16)
820     return BaseT::getArithmeticReductionCost(Opcode, Ty, IsPairwise, CostKind);
821 
822   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
823   return LT.first * getFullRateInstrCost();
824 }
825 
826 int GCNTTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
827                                        bool IsPairwise, bool IsUnsigned,
828                                        TTI::TargetCostKind CostKind) {
829   EVT OrigTy = TLI->getValueType(DL, Ty);
830 
831   // Computes cost on targets that have packed math instructions(which support
832   // 16-bit types only).
833   if (IsPairwise ||
834       !ST->hasVOP3PInsts() ||
835       OrigTy.getScalarSizeInBits() != 16)
836     return BaseT::getMinMaxReductionCost(Ty, CondTy, IsPairwise, IsUnsigned,
837                                          CostKind);
838 
839   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
840   return LT.first * getHalfRateInstrCost(CostKind);
841 }
842 
843 int GCNTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
844                                       unsigned Index) {
845   switch (Opcode) {
846   case Instruction::ExtractElement:
847   case Instruction::InsertElement: {
848     unsigned EltSize
849       = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
850     if (EltSize < 32) {
851       if (EltSize == 16 && Index == 0 && ST->has16BitInsts())
852         return 0;
853       return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
854     }
855 
856     // Extracts are just reads of a subregister, so are free. Inserts are
857     // considered free because we don't want to have any cost for scalarizing
858     // operations, and we don't have to copy into a different register class.
859 
860     // Dynamic indexing isn't free and is best avoided.
861     return Index == ~0u ? 2 : 0;
862   }
863   default:
864     return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
865   }
866 }
867 
868 /// Analyze if the results of inline asm are divergent. If \p Indices is empty,
869 /// this is analyzing the collective result of all output registers. Otherwise,
870 /// this is only querying a specific result index if this returns multiple
871 /// registers in a struct.
872 bool GCNTTIImpl::isInlineAsmSourceOfDivergence(
873   const CallInst *CI, ArrayRef<unsigned> Indices) const {
874   // TODO: Handle complex extract indices
875   if (Indices.size() > 1)
876     return true;
877 
878   const DataLayout &DL = CI->getModule()->getDataLayout();
879   const SIRegisterInfo *TRI = ST->getRegisterInfo();
880   TargetLowering::AsmOperandInfoVector TargetConstraints =
881       TLI->ParseConstraints(DL, ST->getRegisterInfo(), *CI);
882 
883   const int TargetOutputIdx = Indices.empty() ? -1 : Indices[0];
884 
885   int OutputIdx = 0;
886   for (auto &TC : TargetConstraints) {
887     if (TC.Type != InlineAsm::isOutput)
888       continue;
889 
890     // Skip outputs we don't care about.
891     if (TargetOutputIdx != -1 && TargetOutputIdx != OutputIdx++)
892       continue;
893 
894     TLI->ComputeConstraintToUse(TC, SDValue());
895 
896     Register AssignedReg;
897     const TargetRegisterClass *RC;
898     std::tie(AssignedReg, RC) = TLI->getRegForInlineAsmConstraint(
899       TRI, TC.ConstraintCode, TC.ConstraintVT);
900     if (AssignedReg) {
901       // FIXME: This is a workaround for getRegForInlineAsmConstraint
902       // returning VS_32
903       RC = TRI->getPhysRegClass(AssignedReg);
904     }
905 
906     // For AGPR constraints null is returned on subtargets without AGPRs, so
907     // assume divergent for null.
908     if (!RC || !TRI->isSGPRClass(RC))
909       return true;
910   }
911 
912   return false;
913 }
914 
915 /// \returns true if the new GPU divergence analysis is enabled.
916 bool GCNTTIImpl::useGPUDivergenceAnalysis() const {
917   return !UseLegacyDA;
918 }
919 
920 /// \returns true if the result of the value could potentially be
921 /// different across workitems in a wavefront.
922 bool GCNTTIImpl::isSourceOfDivergence(const Value *V) const {
923   if (const Argument *A = dyn_cast<Argument>(V))
924     return !AMDGPU::isArgPassedInSGPR(A);
925 
926   // Loads from the private and flat address spaces are divergent, because
927   // threads can execute the load instruction with the same inputs and get
928   // different results.
929   //
930   // All other loads are not divergent, because if threads issue loads with the
931   // same arguments, they will always get the same result.
932   if (const LoadInst *Load = dyn_cast<LoadInst>(V))
933     return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
934            Load->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
935 
936   // Atomics are divergent because they are executed sequentially: when an
937   // atomic operation refers to the same address in each thread, then each
938   // thread after the first sees the value written by the previous thread as
939   // original value.
940   if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
941     return true;
942 
943   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V))
944     return AMDGPU::isIntrinsicSourceOfDivergence(Intrinsic->getIntrinsicID());
945 
946   // Assume all function calls are a source of divergence.
947   if (const CallInst *CI = dyn_cast<CallInst>(V)) {
948     if (CI->isInlineAsm())
949       return isInlineAsmSourceOfDivergence(CI);
950     return true;
951   }
952 
953   // Assume all function calls are a source of divergence.
954   if (isa<InvokeInst>(V))
955     return true;
956 
957   return false;
958 }
959 
960 bool GCNTTIImpl::isAlwaysUniform(const Value *V) const {
961   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
962     switch (Intrinsic->getIntrinsicID()) {
963     default:
964       return false;
965     case Intrinsic::amdgcn_readfirstlane:
966     case Intrinsic::amdgcn_readlane:
967     case Intrinsic::amdgcn_icmp:
968     case Intrinsic::amdgcn_fcmp:
969     case Intrinsic::amdgcn_ballot:
970     case Intrinsic::amdgcn_if_break:
971       return true;
972     }
973   }
974 
975   if (const CallInst *CI = dyn_cast<CallInst>(V)) {
976     if (CI->isInlineAsm())
977       return !isInlineAsmSourceOfDivergence(CI);
978     return false;
979   }
980 
981   const ExtractValueInst *ExtValue = dyn_cast<ExtractValueInst>(V);
982   if (!ExtValue)
983     return false;
984 
985   const CallInst *CI = dyn_cast<CallInst>(ExtValue->getOperand(0));
986   if (!CI)
987     return false;
988 
989   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(CI)) {
990     switch (Intrinsic->getIntrinsicID()) {
991     default:
992       return false;
993     case Intrinsic::amdgcn_if:
994     case Intrinsic::amdgcn_else: {
995       ArrayRef<unsigned> Indices = ExtValue->getIndices();
996       return Indices.size() == 1 && Indices[0] == 1;
997     }
998     }
999   }
1000 
1001   // If we have inline asm returning mixed SGPR and VGPR results, we inferred
1002   // divergent for the overall struct return. We need to override it in the
1003   // case we're extracting an SGPR component here.
1004   if (CI->isInlineAsm())
1005     return !isInlineAsmSourceOfDivergence(CI, ExtValue->getIndices());
1006 
1007   return false;
1008 }
1009 
1010 bool GCNTTIImpl::collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
1011                                             Intrinsic::ID IID) const {
1012   switch (IID) {
1013   case Intrinsic::amdgcn_atomic_inc:
1014   case Intrinsic::amdgcn_atomic_dec:
1015   case Intrinsic::amdgcn_ds_fadd:
1016   case Intrinsic::amdgcn_ds_fmin:
1017   case Intrinsic::amdgcn_ds_fmax:
1018   case Intrinsic::amdgcn_is_shared:
1019   case Intrinsic::amdgcn_is_private:
1020     OpIndexes.push_back(0);
1021     return true;
1022   default:
1023     return false;
1024   }
1025 }
1026 
1027 Value *GCNTTIImpl::rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
1028                                                     Value *OldV,
1029                                                     Value *NewV) const {
1030   auto IntrID = II->getIntrinsicID();
1031   switch (IntrID) {
1032   case Intrinsic::amdgcn_atomic_inc:
1033   case Intrinsic::amdgcn_atomic_dec:
1034   case Intrinsic::amdgcn_ds_fadd:
1035   case Intrinsic::amdgcn_ds_fmin:
1036   case Intrinsic::amdgcn_ds_fmax: {
1037     const ConstantInt *IsVolatile = cast<ConstantInt>(II->getArgOperand(4));
1038     if (!IsVolatile->isZero())
1039       return nullptr;
1040     Module *M = II->getParent()->getParent()->getParent();
1041     Type *DestTy = II->getType();
1042     Type *SrcTy = NewV->getType();
1043     Function *NewDecl =
1044         Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy});
1045     II->setArgOperand(0, NewV);
1046     II->setCalledFunction(NewDecl);
1047     return II;
1048   }
1049   case Intrinsic::amdgcn_is_shared:
1050   case Intrinsic::amdgcn_is_private: {
1051     unsigned TrueAS = IntrID == Intrinsic::amdgcn_is_shared ?
1052       AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS;
1053     unsigned NewAS = NewV->getType()->getPointerAddressSpace();
1054     LLVMContext &Ctx = NewV->getType()->getContext();
1055     ConstantInt *NewVal = (TrueAS == NewAS) ?
1056       ConstantInt::getTrue(Ctx) : ConstantInt::getFalse(Ctx);
1057     return NewVal;
1058   }
1059   case Intrinsic::ptrmask: {
1060     unsigned OldAS = OldV->getType()->getPointerAddressSpace();
1061     unsigned NewAS = NewV->getType()->getPointerAddressSpace();
1062     Value *MaskOp = II->getArgOperand(1);
1063     Type *MaskTy = MaskOp->getType();
1064 
1065     bool DoTruncate = false;
1066 
1067     const GCNTargetMachine &TM =
1068         static_cast<const GCNTargetMachine &>(getTLI()->getTargetMachine());
1069     if (!TM.isNoopAddrSpaceCast(OldAS, NewAS)) {
1070       // All valid 64-bit to 32-bit casts work by chopping off the high
1071       // bits. Any masking only clearing the low bits will also apply in the new
1072       // address space.
1073       if (DL.getPointerSizeInBits(OldAS) != 64 ||
1074           DL.getPointerSizeInBits(NewAS) != 32)
1075         return nullptr;
1076 
1077       // TODO: Do we need to thread more context in here?
1078       KnownBits Known = computeKnownBits(MaskOp, DL, 0, nullptr, II);
1079       if (Known.countMinLeadingOnes() < 32)
1080         return nullptr;
1081 
1082       DoTruncate = true;
1083     }
1084 
1085     IRBuilder<> B(II);
1086     if (DoTruncate) {
1087       MaskTy = B.getInt32Ty();
1088       MaskOp = B.CreateTrunc(MaskOp, MaskTy);
1089     }
1090 
1091     return B.CreateIntrinsic(Intrinsic::ptrmask, {NewV->getType(), MaskTy},
1092                              {NewV, MaskOp});
1093   }
1094   default:
1095     return nullptr;
1096   }
1097 }
1098 
1099 unsigned GCNTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *VT,
1100                                     int Index, VectorType *SubTp) {
1101   if (ST->hasVOP3PInsts()) {
1102     if (cast<FixedVectorType>(VT)->getNumElements() == 2 &&
1103         DL.getTypeSizeInBits(VT->getElementType()) == 16) {
1104       // With op_sel VOP3P instructions freely can access the low half or high
1105       // half of a register, so any swizzle is free.
1106 
1107       switch (Kind) {
1108       case TTI::SK_Broadcast:
1109       case TTI::SK_Reverse:
1110       case TTI::SK_PermuteSingleSrc:
1111         return 0;
1112       default:
1113         break;
1114       }
1115     }
1116   }
1117 
1118   return BaseT::getShuffleCost(Kind, VT, Index, SubTp);
1119 }
1120 
1121 bool GCNTTIImpl::areInlineCompatible(const Function *Caller,
1122                                      const Function *Callee) const {
1123   const TargetMachine &TM = getTLI()->getTargetMachine();
1124   const GCNSubtarget *CallerST
1125     = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Caller));
1126   const GCNSubtarget *CalleeST
1127     = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Callee));
1128 
1129   const FeatureBitset &CallerBits = CallerST->getFeatureBits();
1130   const FeatureBitset &CalleeBits = CalleeST->getFeatureBits();
1131 
1132   FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
1133   FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
1134   if ((RealCallerBits & RealCalleeBits) != RealCalleeBits)
1135     return false;
1136 
1137   // FIXME: dx10_clamp can just take the caller setting, but there seems to be
1138   // no way to support merge for backend defined attributes.
1139   AMDGPU::SIModeRegisterDefaults CallerMode(*Caller);
1140   AMDGPU::SIModeRegisterDefaults CalleeMode(*Callee);
1141   if (!CallerMode.isInlineCompatible(CalleeMode))
1142     return false;
1143 
1144   // Hack to make compile times reasonable.
1145   if (InlineMaxBB && !Callee->hasFnAttribute(Attribute::InlineHint)) {
1146     // Single BB does not increase total BB amount, thus subtract 1.
1147     size_t BBSize = Caller->size() + Callee->size() - 1;
1148     return BBSize <= InlineMaxBB;
1149   }
1150 
1151   return true;
1152 }
1153 
1154 unsigned GCNTTIImpl::adjustInliningThreshold(const CallBase *CB) const {
1155   // If we have a pointer to private array passed into a function
1156   // it will not be optimized out, leaving scratch usage.
1157   // Increase the inline threshold to allow inlining in this case.
1158   uint64_t AllocaSize = 0;
1159   SmallPtrSet<const AllocaInst *, 8> AIVisited;
1160   for (Value *PtrArg : CB->args()) {
1161     PointerType *Ty = dyn_cast<PointerType>(PtrArg->getType());
1162     if (!Ty || (Ty->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS &&
1163                 Ty->getAddressSpace() != AMDGPUAS::FLAT_ADDRESS))
1164       continue;
1165 
1166     PtrArg = getUnderlyingObject(PtrArg);
1167     if (const AllocaInst *AI = dyn_cast<AllocaInst>(PtrArg)) {
1168       if (!AI->isStaticAlloca() || !AIVisited.insert(AI).second)
1169         continue;
1170       AllocaSize += DL.getTypeAllocSize(AI->getAllocatedType());
1171       // If the amount of stack memory is excessive we will not be able
1172       // to get rid of the scratch anyway, bail out.
1173       if (AllocaSize > ArgAllocaCutoff) {
1174         AllocaSize = 0;
1175         break;
1176       }
1177     }
1178   }
1179   if (AllocaSize)
1180     return ArgAllocaCost;
1181   return 0;
1182 }
1183 
1184 void GCNTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1185                                          TTI::UnrollingPreferences &UP) {
1186   CommonTTI.getUnrollingPreferences(L, SE, UP);
1187 }
1188 
1189 void GCNTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1190                                        TTI::PeelingPreferences &PP) {
1191   CommonTTI.getPeelingPreferences(L, SE, PP);
1192 }
1193 
1194 int GCNTTIImpl::get64BitInstrCost(TTI::TargetCostKind CostKind) const {
1195   return ST->hasHalfRate64Ops() ? getHalfRateInstrCost(CostKind)
1196                                 : getQuarterRateInstrCost(CostKind);
1197 }
1198 
1199 R600TTIImpl::R600TTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
1200     : BaseT(TM, F.getParent()->getDataLayout()),
1201       ST(static_cast<const R600Subtarget *>(TM->getSubtargetImpl(F))),
1202       TLI(ST->getTargetLowering()), CommonTTI(TM, F) {}
1203 
1204 unsigned R600TTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
1205   return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
1206 }
1207 
1208 unsigned R600TTIImpl::getNumberOfRegisters(bool Vec) const {
1209   return getHardwareNumberOfRegisters(Vec);
1210 }
1211 
1212 unsigned R600TTIImpl::getRegisterBitWidth(bool Vector) const {
1213   return 32;
1214 }
1215 
1216 unsigned R600TTIImpl::getMinVectorRegisterBitWidth() const {
1217   return 32;
1218 }
1219 
1220 unsigned R600TTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
1221   if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
1222       AddrSpace == AMDGPUAS::CONSTANT_ADDRESS)
1223     return 128;
1224   if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1225       AddrSpace == AMDGPUAS::REGION_ADDRESS)
1226     return 64;
1227   if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
1228     return 32;
1229 
1230   if ((AddrSpace == AMDGPUAS::PARAM_D_ADDRESS ||
1231       AddrSpace == AMDGPUAS::PARAM_I_ADDRESS ||
1232       (AddrSpace >= AMDGPUAS::CONSTANT_BUFFER_0 &&
1233       AddrSpace <= AMDGPUAS::CONSTANT_BUFFER_15)))
1234     return 128;
1235   llvm_unreachable("unhandled address space");
1236 }
1237 
1238 bool R600TTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
1239                                              Align Alignment,
1240                                              unsigned AddrSpace) const {
1241   // We allow vectorization of flat stores, even though we may need to decompose
1242   // them later if they may access private memory. We don't have enough context
1243   // here, and legalization can handle it.
1244   return (AddrSpace != AMDGPUAS::PRIVATE_ADDRESS);
1245 }
1246 
1247 bool R600TTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
1248                                               Align Alignment,
1249                                               unsigned AddrSpace) const {
1250   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
1251 }
1252 
1253 bool R600TTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
1254                                                Align Alignment,
1255                                                unsigned AddrSpace) const {
1256   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
1257 }
1258 
1259 unsigned R600TTIImpl::getMaxInterleaveFactor(unsigned VF) {
1260   // Disable unrolling if the loop is not vectorized.
1261   // TODO: Enable this again.
1262   if (VF == 1)
1263     return 1;
1264 
1265   return 8;
1266 }
1267 
1268 unsigned R600TTIImpl::getCFInstrCost(unsigned Opcode,
1269                                      TTI::TargetCostKind CostKind) {
1270   if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency)
1271     return Opcode == Instruction::PHI ? 0 : 1;
1272 
1273   // XXX - For some reason this isn't called for switch.
1274   switch (Opcode) {
1275   case Instruction::Br:
1276   case Instruction::Ret:
1277     return 10;
1278   default:
1279     return BaseT::getCFInstrCost(Opcode, CostKind);
1280   }
1281 }
1282 
1283 int R600TTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
1284                                     unsigned Index) {
1285   switch (Opcode) {
1286   case Instruction::ExtractElement:
1287   case Instruction::InsertElement: {
1288     unsigned EltSize
1289       = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
1290     if (EltSize < 32) {
1291       return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
1292     }
1293 
1294     // Extracts are just reads of a subregister, so are free. Inserts are
1295     // considered free because we don't want to have any cost for scalarizing
1296     // operations, and we don't have to copy into a different register class.
1297 
1298     // Dynamic indexing isn't free and is best avoided.
1299     return Index == ~0u ? 2 : 0;
1300   }
1301   default:
1302     return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
1303   }
1304 }
1305 
1306 void R600TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1307                                           TTI::UnrollingPreferences &UP) {
1308   CommonTTI.getUnrollingPreferences(L, SE, UP);
1309 }
1310 
1311 void R600TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1312                                         TTI::PeelingPreferences &PP) {
1313   CommonTTI.getPeelingPreferences(L, SE, PP);
1314 }
1315