1 //===- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // \file
10 // This file implements a TargetTransformInfo analysis pass specific to the
11 // AMDGPU target machine. It uses the target's detailed information to provide
12 // more precise answers to certain TTI queries, while letting the target
13 // independent and default TTI implementations handle the rest.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "AMDGPUTargetTransformInfo.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
20 #include "SIModeRegisterDefaults.h"
21 #include "llvm/Analysis/InlineCost.h"
22 #include "llvm/Analysis/LoopInfo.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/CodeGen/Analysis.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/IntrinsicsAMDGPU.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/Support/KnownBits.h"
29 #include <optional>
30 
31 using namespace llvm;
32 
33 #define DEBUG_TYPE "AMDGPUtti"
34 
35 static cl::opt<unsigned> UnrollThresholdPrivate(
36   "amdgpu-unroll-threshold-private",
37   cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"),
38   cl::init(2700), cl::Hidden);
39 
40 static cl::opt<unsigned> UnrollThresholdLocal(
41   "amdgpu-unroll-threshold-local",
42   cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"),
43   cl::init(1000), cl::Hidden);
44 
45 static cl::opt<unsigned> UnrollThresholdIf(
46   "amdgpu-unroll-threshold-if",
47   cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"),
48   cl::init(200), cl::Hidden);
49 
50 static cl::opt<bool> UnrollRuntimeLocal(
51   "amdgpu-unroll-runtime-local",
52   cl::desc("Allow runtime unroll for AMDGPU if local memory used in a loop"),
53   cl::init(true), cl::Hidden);
54 
55 static cl::opt<unsigned> UnrollMaxBlockToAnalyze(
56     "amdgpu-unroll-max-block-to-analyze",
57     cl::desc("Inner loop block size threshold to analyze in unroll for AMDGPU"),
58     cl::init(32), cl::Hidden);
59 
60 static cl::opt<unsigned> ArgAllocaCost("amdgpu-inline-arg-alloca-cost",
61                                        cl::Hidden, cl::init(4000),
62                                        cl::desc("Cost of alloca argument"));
63 
64 // If the amount of scratch memory to eliminate exceeds our ability to allocate
65 // it into registers we gain nothing by aggressively inlining functions for that
66 // heuristic.
67 static cl::opt<unsigned>
68     ArgAllocaCutoff("amdgpu-inline-arg-alloca-cutoff", cl::Hidden,
69                     cl::init(256),
70                     cl::desc("Maximum alloca size to use for inline cost"));
71 
72 // Inliner constraint to achieve reasonable compilation time.
73 static cl::opt<size_t> InlineMaxBB(
74     "amdgpu-inline-max-bb", cl::Hidden, cl::init(1100),
75     cl::desc("Maximum number of BBs allowed in a function after inlining"
76              " (compile time constraint)"));
77 
78 static bool dependsOnLocalPhi(const Loop *L, const Value *Cond,
79                               unsigned Depth = 0) {
80   const Instruction *I = dyn_cast<Instruction>(Cond);
81   if (!I)
82     return false;
83 
84   for (const Value *V : I->operand_values()) {
85     if (!L->contains(I))
86       continue;
87     if (const PHINode *PHI = dyn_cast<PHINode>(V)) {
88       if (llvm::none_of(L->getSubLoops(), [PHI](const Loop* SubLoop) {
89                   return SubLoop->contains(PHI); }))
90         return true;
91     } else if (Depth < 10 && dependsOnLocalPhi(L, V, Depth+1))
92       return true;
93   }
94   return false;
95 }
96 
97 AMDGPUTTIImpl::AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
98     : BaseT(TM, F.getParent()->getDataLayout()),
99       TargetTriple(TM->getTargetTriple()),
100       ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))),
101       TLI(ST->getTargetLowering()) {}
102 
103 void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
104                                             TTI::UnrollingPreferences &UP,
105                                             OptimizationRemarkEmitter *ORE) {
106   const Function &F = *L->getHeader()->getParent();
107   UP.Threshold =
108       F.getFnAttributeAsParsedInteger("amdgpu-unroll-threshold", 300);
109   UP.MaxCount = std::numeric_limits<unsigned>::max();
110   UP.Partial = true;
111 
112   // Conditional branch in a loop back edge needs 3 additional exec
113   // manipulations in average.
114   UP.BEInsns += 3;
115 
116   // We want to run unroll even for the loops which have been vectorized.
117   UP.UnrollVectorizedLoop = true;
118 
119   // TODO: Do we want runtime unrolling?
120 
121   // Maximum alloca size than can fit registers. Reserve 16 registers.
122   const unsigned MaxAlloca = (256 - 16) * 4;
123   unsigned ThresholdPrivate = UnrollThresholdPrivate;
124   unsigned ThresholdLocal = UnrollThresholdLocal;
125 
126   // If this loop has the amdgpu.loop.unroll.threshold metadata we will use the
127   // provided threshold value as the default for Threshold
128   if (MDNode *LoopUnrollThreshold =
129           findOptionMDForLoop(L, "amdgpu.loop.unroll.threshold")) {
130     if (LoopUnrollThreshold->getNumOperands() == 2) {
131       ConstantInt *MetaThresholdValue = mdconst::extract_or_null<ConstantInt>(
132           LoopUnrollThreshold->getOperand(1));
133       if (MetaThresholdValue) {
134         // We will also use the supplied value for PartialThreshold for now.
135         // We may introduce additional metadata if it becomes necessary in the
136         // future.
137         UP.Threshold = MetaThresholdValue->getSExtValue();
138         UP.PartialThreshold = UP.Threshold;
139         ThresholdPrivate = std::min(ThresholdPrivate, UP.Threshold);
140         ThresholdLocal = std::min(ThresholdLocal, UP.Threshold);
141       }
142     }
143   }
144 
145   unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal);
146   for (const BasicBlock *BB : L->getBlocks()) {
147     const DataLayout &DL = BB->getModule()->getDataLayout();
148     unsigned LocalGEPsSeen = 0;
149 
150     if (llvm::any_of(L->getSubLoops(), [BB](const Loop* SubLoop) {
151                return SubLoop->contains(BB); }))
152         continue; // Block belongs to an inner loop.
153 
154     for (const Instruction &I : *BB) {
155       // Unroll a loop which contains an "if" statement whose condition
156       // defined by a PHI belonging to the loop. This may help to eliminate
157       // if region and potentially even PHI itself, saving on both divergence
158       // and registers used for the PHI.
159       // Add a small bonus for each of such "if" statements.
160       if (const BranchInst *Br = dyn_cast<BranchInst>(&I)) {
161         if (UP.Threshold < MaxBoost && Br->isConditional()) {
162           BasicBlock *Succ0 = Br->getSuccessor(0);
163           BasicBlock *Succ1 = Br->getSuccessor(1);
164           if ((L->contains(Succ0) && L->isLoopExiting(Succ0)) ||
165               (L->contains(Succ1) && L->isLoopExiting(Succ1)))
166             continue;
167           if (dependsOnLocalPhi(L, Br->getCondition())) {
168             UP.Threshold += UnrollThresholdIf;
169             LLVM_DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold
170                               << " for loop:\n"
171                               << *L << " due to " << *Br << '\n');
172             if (UP.Threshold >= MaxBoost)
173               return;
174           }
175         }
176         continue;
177       }
178 
179       const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
180       if (!GEP)
181         continue;
182 
183       unsigned AS = GEP->getAddressSpace();
184       unsigned Threshold = 0;
185       if (AS == AMDGPUAS::PRIVATE_ADDRESS)
186         Threshold = ThresholdPrivate;
187       else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS)
188         Threshold = ThresholdLocal;
189       else
190         continue;
191 
192       if (UP.Threshold >= Threshold)
193         continue;
194 
195       if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
196         const Value *Ptr = GEP->getPointerOperand();
197         const AllocaInst *Alloca =
198             dyn_cast<AllocaInst>(getUnderlyingObject(Ptr));
199         if (!Alloca || !Alloca->isStaticAlloca())
200           continue;
201         Type *Ty = Alloca->getAllocatedType();
202         unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0;
203         if (AllocaSize > MaxAlloca)
204           continue;
205       } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
206                  AS == AMDGPUAS::REGION_ADDRESS) {
207         LocalGEPsSeen++;
208         // Inhibit unroll for local memory if we have seen addressing not to
209         // a variable, most likely we will be unable to combine it.
210         // Do not unroll too deep inner loops for local memory to give a chance
211         // to unroll an outer loop for a more important reason.
212         if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 ||
213             (!isa<GlobalVariable>(GEP->getPointerOperand()) &&
214              !isa<Argument>(GEP->getPointerOperand())))
215           continue;
216         LLVM_DEBUG(dbgs() << "Allow unroll runtime for loop:\n"
217                           << *L << " due to LDS use.\n");
218         UP.Runtime = UnrollRuntimeLocal;
219       }
220 
221       // Check if GEP depends on a value defined by this loop itself.
222       bool HasLoopDef = false;
223       for (const Value *Op : GEP->operands()) {
224         const Instruction *Inst = dyn_cast<Instruction>(Op);
225         if (!Inst || L->isLoopInvariant(Op))
226           continue;
227 
228         if (llvm::any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) {
229              return SubLoop->contains(Inst); }))
230           continue;
231         HasLoopDef = true;
232         break;
233       }
234       if (!HasLoopDef)
235         continue;
236 
237       // We want to do whatever we can to limit the number of alloca
238       // instructions that make it through to the code generator.  allocas
239       // require us to use indirect addressing, which is slow and prone to
240       // compiler bugs.  If this loop does an address calculation on an
241       // alloca ptr, then we want to use a higher than normal loop unroll
242       // threshold. This will give SROA a better chance to eliminate these
243       // allocas.
244       //
245       // We also want to have more unrolling for local memory to let ds
246       // instructions with different offsets combine.
247       //
248       // Don't use the maximum allowed value here as it will make some
249       // programs way too big.
250       UP.Threshold = Threshold;
251       LLVM_DEBUG(dbgs() << "Set unroll threshold " << Threshold
252                         << " for loop:\n"
253                         << *L << " due to " << *GEP << '\n');
254       if (UP.Threshold >= MaxBoost)
255         return;
256     }
257 
258     // If we got a GEP in a small BB from inner loop then increase max trip
259     // count to analyze for better estimation cost in unroll
260     if (L->isInnermost() && BB->size() < UnrollMaxBlockToAnalyze)
261       UP.MaxIterationsCountToAnalyze = 32;
262   }
263 }
264 
265 void AMDGPUTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
266                                           TTI::PeelingPreferences &PP) {
267   BaseT::getPeelingPreferences(L, SE, PP);
268 }
269 
270 int64_t AMDGPUTTIImpl::getMaxMemIntrinsicInlineSizeThreshold() const {
271   return 1024;
272 }
273 
274 const FeatureBitset GCNTTIImpl::InlineFeatureIgnoreList = {
275     // Codegen control options which don't matter.
276     AMDGPU::FeatureEnableLoadStoreOpt, AMDGPU::FeatureEnableSIScheduler,
277     AMDGPU::FeatureEnableUnsafeDSOffsetFolding, AMDGPU::FeatureFlatForGlobal,
278     AMDGPU::FeaturePromoteAlloca, AMDGPU::FeatureUnalignedScratchAccess,
279     AMDGPU::FeatureUnalignedAccessMode,
280 
281     AMDGPU::FeatureAutoWaitcntBeforeBarrier,
282 
283     // Property of the kernel/environment which can't actually differ.
284     AMDGPU::FeatureSGPRInitBug, AMDGPU::FeatureXNACK,
285     AMDGPU::FeatureTrapHandler,
286 
287     // The default assumption needs to be ecc is enabled, but no directly
288     // exposed operations depend on it, so it can be safely inlined.
289     AMDGPU::FeatureSRAMECC,
290 
291     // Perf-tuning features
292     AMDGPU::FeatureFastFMAF32, AMDGPU::HalfRate64Ops};
293 
294 GCNTTIImpl::GCNTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
295     : BaseT(TM, F.getParent()->getDataLayout()),
296       ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))),
297       TLI(ST->getTargetLowering()), CommonTTI(TM, F),
298       IsGraphics(AMDGPU::isGraphics(F.getCallingConv())) {
299   SIModeRegisterDefaults Mode(F);
300   HasFP32Denormals = Mode.FP32Denormals != DenormalMode::getPreserveSign();
301   HasFP64FP16Denormals =
302       Mode.FP64FP16Denormals != DenormalMode::getPreserveSign();
303 }
304 
305 bool GCNTTIImpl::hasBranchDivergence(const Function *F) const {
306   return !F || !ST->isSingleLaneExecution(*F);
307 }
308 
309 unsigned GCNTTIImpl::getNumberOfRegisters(unsigned RCID) const {
310   // NB: RCID is not an RCID. In fact it is 0 or 1 for scalar or vector
311   // registers. See getRegisterClassForType for the implementation.
312   // In this case vector registers are not vector in terms of
313   // VGPRs, but those which can hold multiple values.
314 
315   // This is really the number of registers to fill when vectorizing /
316   // interleaving loops, so we lie to avoid trying to use all registers.
317   return 4;
318 }
319 
320 TypeSize
321 GCNTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
322   switch (K) {
323   case TargetTransformInfo::RGK_Scalar:
324     return TypeSize::getFixed(32);
325   case TargetTransformInfo::RGK_FixedWidthVector:
326     return TypeSize::getFixed(ST->hasPackedFP32Ops() ? 64 : 32);
327   case TargetTransformInfo::RGK_ScalableVector:
328     return TypeSize::getScalable(0);
329   }
330   llvm_unreachable("Unsupported register kind");
331 }
332 
333 unsigned GCNTTIImpl::getMinVectorRegisterBitWidth() const {
334   return 32;
335 }
336 
337 unsigned GCNTTIImpl::getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
338   if (Opcode == Instruction::Load || Opcode == Instruction::Store)
339     return 32 * 4 / ElemWidth;
340   return (ElemWidth == 16 && ST->has16BitInsts()) ? 2
341        : (ElemWidth == 32 && ST->hasPackedFP32Ops()) ? 2
342        : 1;
343 }
344 
345 unsigned GCNTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
346                                          unsigned ChainSizeInBytes,
347                                          VectorType *VecTy) const {
348   unsigned VecRegBitWidth = VF * LoadSize;
349   if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32)
350     // TODO: Support element-size less than 32bit?
351     return 128 / LoadSize;
352 
353   return VF;
354 }
355 
356 unsigned GCNTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize,
357                                              unsigned ChainSizeInBytes,
358                                              VectorType *VecTy) const {
359   unsigned VecRegBitWidth = VF * StoreSize;
360   if (VecRegBitWidth > 128)
361     return 128 / StoreSize;
362 
363   return VF;
364 }
365 
366 unsigned GCNTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
367   if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
368       AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
369       AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
370       AddrSpace == AMDGPUAS::BUFFER_FAT_POINTER ||
371       AddrSpace == AMDGPUAS::BUFFER_RESOURCE) {
372     return 512;
373   }
374 
375   if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
376     return 8 * ST->getMaxPrivateElementSize();
377 
378   // Common to flat, global, local and region. Assume for unknown addrspace.
379   return 128;
380 }
381 
382 bool GCNTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
383                                             Align Alignment,
384                                             unsigned AddrSpace) const {
385   // We allow vectorization of flat stores, even though we may need to decompose
386   // them later if they may access private memory. We don't have enough context
387   // here, and legalization can handle it.
388   if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) {
389     return (Alignment >= 4 || ST->hasUnalignedScratchAccess()) &&
390       ChainSizeInBytes <= ST->getMaxPrivateElementSize();
391   }
392   return true;
393 }
394 
395 bool GCNTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
396                                              Align Alignment,
397                                              unsigned AddrSpace) const {
398   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
399 }
400 
401 bool GCNTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
402                                               Align Alignment,
403                                               unsigned AddrSpace) const {
404   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
405 }
406 
407 int64_t GCNTTIImpl::getMaxMemIntrinsicInlineSizeThreshold() const {
408   return 1024;
409 }
410 
411 // FIXME: Really we would like to issue multiple 128-bit loads and stores per
412 // iteration. Should we report a larger size and let it legalize?
413 //
414 // FIXME: Should we use narrower types for local/region, or account for when
415 // unaligned access is legal?
416 //
417 // FIXME: This could use fine tuning and microbenchmarks.
418 Type *GCNTTIImpl::getMemcpyLoopLoweringType(
419     LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
420     unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign,
421     std::optional<uint32_t> AtomicElementSize) const {
422 
423   if (AtomicElementSize)
424     return Type::getIntNTy(Context, *AtomicElementSize * 8);
425 
426   unsigned MinAlign = std::min(SrcAlign, DestAlign);
427 
428   // A (multi-)dword access at an address == 2 (mod 4) will be decomposed by the
429   // hardware into byte accesses. If you assume all alignments are equally
430   // probable, it's more efficient on average to use short accesses for this
431   // case.
432   if (MinAlign == 2)
433     return Type::getInt16Ty(Context);
434 
435   // Not all subtargets have 128-bit DS instructions, and we currently don't
436   // form them by default.
437   if (SrcAddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
438       SrcAddrSpace == AMDGPUAS::REGION_ADDRESS ||
439       DestAddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
440       DestAddrSpace == AMDGPUAS::REGION_ADDRESS) {
441     return FixedVectorType::get(Type::getInt32Ty(Context), 2);
442   }
443 
444   // Global memory works best with 16-byte accesses. Private memory will also
445   // hit this, although they'll be decomposed.
446   return FixedVectorType::get(Type::getInt32Ty(Context), 4);
447 }
448 
449 void GCNTTIImpl::getMemcpyLoopResidualLoweringType(
450     SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
451     unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
452     unsigned SrcAlign, unsigned DestAlign,
453     std::optional<uint32_t> AtomicCpySize) const {
454   assert(RemainingBytes < 16);
455 
456   if (AtomicCpySize)
457     BaseT::getMemcpyLoopResidualLoweringType(
458         OpsOut, Context, RemainingBytes, SrcAddrSpace, DestAddrSpace, SrcAlign,
459         DestAlign, AtomicCpySize);
460 
461   unsigned MinAlign = std::min(SrcAlign, DestAlign);
462 
463   if (MinAlign != 2) {
464     Type *I64Ty = Type::getInt64Ty(Context);
465     while (RemainingBytes >= 8) {
466       OpsOut.push_back(I64Ty);
467       RemainingBytes -= 8;
468     }
469 
470     Type *I32Ty = Type::getInt32Ty(Context);
471     while (RemainingBytes >= 4) {
472       OpsOut.push_back(I32Ty);
473       RemainingBytes -= 4;
474     }
475   }
476 
477   Type *I16Ty = Type::getInt16Ty(Context);
478   while (RemainingBytes >= 2) {
479     OpsOut.push_back(I16Ty);
480     RemainingBytes -= 2;
481   }
482 
483   Type *I8Ty = Type::getInt8Ty(Context);
484   while (RemainingBytes) {
485     OpsOut.push_back(I8Ty);
486     --RemainingBytes;
487   }
488 }
489 
490 unsigned GCNTTIImpl::getMaxInterleaveFactor(ElementCount VF) {
491   // Disable unrolling if the loop is not vectorized.
492   // TODO: Enable this again.
493   if (VF.isScalar())
494     return 1;
495 
496   return 8;
497 }
498 
499 bool GCNTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
500                                        MemIntrinsicInfo &Info) const {
501   switch (Inst->getIntrinsicID()) {
502   case Intrinsic::amdgcn_ds_ordered_add:
503   case Intrinsic::amdgcn_ds_ordered_swap:
504   case Intrinsic::amdgcn_ds_fadd:
505   case Intrinsic::amdgcn_ds_fmin:
506   case Intrinsic::amdgcn_ds_fmax: {
507     auto *Ordering = dyn_cast<ConstantInt>(Inst->getArgOperand(2));
508     auto *Volatile = dyn_cast<ConstantInt>(Inst->getArgOperand(4));
509     if (!Ordering || !Volatile)
510       return false; // Invalid.
511 
512     unsigned OrderingVal = Ordering->getZExtValue();
513     if (OrderingVal > static_cast<unsigned>(AtomicOrdering::SequentiallyConsistent))
514       return false;
515 
516     Info.PtrVal = Inst->getArgOperand(0);
517     Info.Ordering = static_cast<AtomicOrdering>(OrderingVal);
518     Info.ReadMem = true;
519     Info.WriteMem = true;
520     Info.IsVolatile = !Volatile->isZero();
521     return true;
522   }
523   default:
524     return false;
525   }
526 }
527 
528 InstructionCost GCNTTIImpl::getArithmeticInstrCost(
529     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
530     TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info,
531     ArrayRef<const Value *> Args,
532     const Instruction *CxtI) {
533 
534   // Legalize the type.
535   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
536   int ISD = TLI->InstructionOpcodeToISD(Opcode);
537 
538   // Because we don't have any legal vector operations, but the legal types, we
539   // need to account for split vectors.
540   unsigned NElts = LT.second.isVector() ?
541     LT.second.getVectorNumElements() : 1;
542 
543   MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
544 
545   switch (ISD) {
546   case ISD::SHL:
547   case ISD::SRL:
548   case ISD::SRA:
549     if (SLT == MVT::i64)
550       return get64BitInstrCost(CostKind) * LT.first * NElts;
551 
552     if (ST->has16BitInsts() && SLT == MVT::i16)
553       NElts = (NElts + 1) / 2;
554 
555     // i32
556     return getFullRateInstrCost() * LT.first * NElts;
557   case ISD::ADD:
558   case ISD::SUB:
559   case ISD::AND:
560   case ISD::OR:
561   case ISD::XOR:
562     if (SLT == MVT::i64) {
563       // and, or and xor are typically split into 2 VALU instructions.
564       return 2 * getFullRateInstrCost() * LT.first * NElts;
565     }
566 
567     if (ST->has16BitInsts() && SLT == MVT::i16)
568       NElts = (NElts + 1) / 2;
569 
570     return LT.first * NElts * getFullRateInstrCost();
571   case ISD::MUL: {
572     const int QuarterRateCost = getQuarterRateInstrCost(CostKind);
573     if (SLT == MVT::i64) {
574       const int FullRateCost = getFullRateInstrCost();
575       return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
576     }
577 
578     if (ST->has16BitInsts() && SLT == MVT::i16)
579       NElts = (NElts + 1) / 2;
580 
581     // i32
582     return QuarterRateCost * NElts * LT.first;
583   }
584   case ISD::FMUL:
585     // Check possible fuse {fadd|fsub}(a,fmul(b,c)) and return zero cost for
586     // fmul(b,c) supposing the fadd|fsub will get estimated cost for the whole
587     // fused operation.
588     if (CxtI && CxtI->hasOneUse())
589       if (const auto *FAdd = dyn_cast<BinaryOperator>(*CxtI->user_begin())) {
590         const int OPC = TLI->InstructionOpcodeToISD(FAdd->getOpcode());
591         if (OPC == ISD::FADD || OPC == ISD::FSUB) {
592           if (ST->hasMadMacF32Insts() && SLT == MVT::f32 && !HasFP32Denormals)
593             return TargetTransformInfo::TCC_Free;
594           if (ST->has16BitInsts() && SLT == MVT::f16 && !HasFP64FP16Denormals)
595             return TargetTransformInfo::TCC_Free;
596 
597           // Estimate all types may be fused with contract/unsafe flags
598           const TargetOptions &Options = TLI->getTargetMachine().Options;
599           if (Options.AllowFPOpFusion == FPOpFusion::Fast ||
600               Options.UnsafeFPMath ||
601               (FAdd->hasAllowContract() && CxtI->hasAllowContract()))
602             return TargetTransformInfo::TCC_Free;
603         }
604       }
605     [[fallthrough]];
606   case ISD::FADD:
607   case ISD::FSUB:
608     if (ST->hasPackedFP32Ops() && SLT == MVT::f32)
609       NElts = (NElts + 1) / 2;
610     if (SLT == MVT::f64)
611       return LT.first * NElts * get64BitInstrCost(CostKind);
612 
613     if (ST->has16BitInsts() && SLT == MVT::f16)
614       NElts = (NElts + 1) / 2;
615 
616     if (SLT == MVT::f32 || SLT == MVT::f16)
617       return LT.first * NElts * getFullRateInstrCost();
618     break;
619   case ISD::FDIV:
620   case ISD::FREM:
621     // FIXME: frem should be handled separately. The fdiv in it is most of it,
622     // but the current lowering is also not entirely correct.
623     if (SLT == MVT::f64) {
624       int Cost = 7 * get64BitInstrCost(CostKind) +
625                  getQuarterRateInstrCost(CostKind) +
626                  3 * getHalfRateInstrCost(CostKind);
627       // Add cost of workaround.
628       if (!ST->hasUsableDivScaleConditionOutput())
629         Cost += 3 * getFullRateInstrCost();
630 
631       return LT.first * Cost * NElts;
632     }
633 
634     if (!Args.empty() && match(Args[0], PatternMatch::m_FPOne())) {
635       // TODO: This is more complicated, unsafe flags etc.
636       if ((SLT == MVT::f32 && !HasFP32Denormals) ||
637           (SLT == MVT::f16 && ST->has16BitInsts())) {
638         return LT.first * getQuarterRateInstrCost(CostKind) * NElts;
639       }
640     }
641 
642     if (SLT == MVT::f16 && ST->has16BitInsts()) {
643       // 2 x v_cvt_f32_f16
644       // f32 rcp
645       // f32 fmul
646       // v_cvt_f16_f32
647       // f16 div_fixup
648       int Cost =
649           4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost(CostKind);
650       return LT.first * Cost * NElts;
651     }
652 
653     if (SLT == MVT::f32 || SLT == MVT::f16) {
654       // 4 more v_cvt_* insts without f16 insts support
655       int Cost = (SLT == MVT::f16 ? 14 : 10) * getFullRateInstrCost() +
656                  1 * getQuarterRateInstrCost(CostKind);
657 
658       if (!HasFP32Denormals) {
659         // FP mode switches.
660         Cost += 2 * getFullRateInstrCost();
661       }
662 
663       return LT.first * NElts * Cost;
664     }
665     break;
666   case ISD::FNEG:
667     // Use the backend' estimation. If fneg is not free each element will cost
668     // one additional instruction.
669     return TLI->isFNegFree(SLT) ? 0 : NElts;
670   default:
671     break;
672   }
673 
674   return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
675                                        Args, CxtI);
676 }
677 
678 // Return true if there's a potential benefit from using v2f16/v2i16
679 // instructions for an intrinsic, even if it requires nontrivial legalization.
680 static bool intrinsicHasPackedVectorBenefit(Intrinsic::ID ID) {
681   switch (ID) {
682   case Intrinsic::fma: // TODO: fmuladd
683   // There's a small benefit to using vector ops in the legalized code.
684   case Intrinsic::round:
685   case Intrinsic::uadd_sat:
686   case Intrinsic::usub_sat:
687   case Intrinsic::sadd_sat:
688   case Intrinsic::ssub_sat:
689     return true;
690   default:
691     return false;
692   }
693 }
694 
695 InstructionCost
696 GCNTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
697                                   TTI::TargetCostKind CostKind) {
698   if (ICA.getID() == Intrinsic::fabs)
699     return 0;
700 
701   if (!intrinsicHasPackedVectorBenefit(ICA.getID()))
702     return BaseT::getIntrinsicInstrCost(ICA, CostKind);
703 
704   Type *RetTy = ICA.getReturnType();
705 
706   // Legalize the type.
707   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(RetTy);
708 
709   unsigned NElts = LT.second.isVector() ?
710     LT.second.getVectorNumElements() : 1;
711 
712   MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
713 
714   if (SLT == MVT::f64)
715     return LT.first * NElts * get64BitInstrCost(CostKind);
716 
717   if ((ST->has16BitInsts() && SLT == MVT::f16) ||
718       (ST->hasPackedFP32Ops() && SLT == MVT::f32))
719     NElts = (NElts + 1) / 2;
720 
721   // TODO: Get more refined intrinsic costs?
722   unsigned InstRate = getQuarterRateInstrCost(CostKind);
723 
724   switch (ICA.getID()) {
725   case Intrinsic::fma:
726     InstRate = ST->hasFastFMAF32() ? getHalfRateInstrCost(CostKind)
727                                    : getQuarterRateInstrCost(CostKind);
728     break;
729   case Intrinsic::uadd_sat:
730   case Intrinsic::usub_sat:
731   case Intrinsic::sadd_sat:
732   case Intrinsic::ssub_sat:
733     static const auto ValidSatTys = {MVT::v2i16, MVT::v4i16};
734     if (any_of(ValidSatTys, [&LT](MVT M) { return M == LT.second; }))
735       NElts = 1;
736     break;
737   }
738 
739   return LT.first * NElts * InstRate;
740 }
741 
742 InstructionCost GCNTTIImpl::getCFInstrCost(unsigned Opcode,
743                                            TTI::TargetCostKind CostKind,
744                                            const Instruction *I) {
745   assert((I == nullptr || I->getOpcode() == Opcode) &&
746          "Opcode should reflect passed instruction.");
747   const bool SCost =
748       (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency);
749   const int CBrCost = SCost ? 5 : 7;
750   switch (Opcode) {
751   case Instruction::Br: {
752     // Branch instruction takes about 4 slots on gfx900.
753     auto BI = dyn_cast_or_null<BranchInst>(I);
754     if (BI && BI->isUnconditional())
755       return SCost ? 1 : 4;
756     // Suppose conditional branch takes additional 3 exec manipulations
757     // instructions in average.
758     return CBrCost;
759   }
760   case Instruction::Switch: {
761     auto SI = dyn_cast_or_null<SwitchInst>(I);
762     // Each case (including default) takes 1 cmp + 1 cbr instructions in
763     // average.
764     return (SI ? (SI->getNumCases() + 1) : 4) * (CBrCost + 1);
765   }
766   case Instruction::Ret:
767     return SCost ? 1 : 10;
768   }
769   return BaseT::getCFInstrCost(Opcode, CostKind, I);
770 }
771 
772 InstructionCost
773 GCNTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
774                                        std::optional<FastMathFlags> FMF,
775                                        TTI::TargetCostKind CostKind) {
776   if (TTI::requiresOrderedReduction(FMF))
777     return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
778 
779   EVT OrigTy = TLI->getValueType(DL, Ty);
780 
781   // Computes cost on targets that have packed math instructions(which support
782   // 16-bit types only).
783   if (!ST->hasVOP3PInsts() || OrigTy.getScalarSizeInBits() != 16)
784     return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
785 
786   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
787   return LT.first * getFullRateInstrCost();
788 }
789 
790 InstructionCost
791 GCNTTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty,
792                                    FastMathFlags FMF,
793                                    TTI::TargetCostKind CostKind) {
794   EVT OrigTy = TLI->getValueType(DL, Ty);
795 
796   // Computes cost on targets that have packed math instructions(which support
797   // 16-bit types only).
798   if (!ST->hasVOP3PInsts() || OrigTy.getScalarSizeInBits() != 16)
799     return BaseT::getMinMaxReductionCost(IID, Ty, FMF, CostKind);
800 
801   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
802   return LT.first * getHalfRateInstrCost(CostKind);
803 }
804 
805 InstructionCost GCNTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
806                                                TTI::TargetCostKind CostKind,
807                                                unsigned Index, Value *Op0,
808                                                Value *Op1) {
809   switch (Opcode) {
810   case Instruction::ExtractElement:
811   case Instruction::InsertElement: {
812     unsigned EltSize
813       = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
814     if (EltSize < 32) {
815       if (EltSize == 16 && Index == 0 && ST->has16BitInsts())
816         return 0;
817       return BaseT::getVectorInstrCost(Opcode, ValTy, CostKind, Index, Op0,
818                                        Op1);
819     }
820 
821     // Extracts are just reads of a subregister, so are free. Inserts are
822     // considered free because we don't want to have any cost for scalarizing
823     // operations, and we don't have to copy into a different register class.
824 
825     // Dynamic indexing isn't free and is best avoided.
826     return Index == ~0u ? 2 : 0;
827   }
828   default:
829     return BaseT::getVectorInstrCost(Opcode, ValTy, CostKind, Index, Op0, Op1);
830   }
831 }
832 
833 /// Analyze if the results of inline asm are divergent. If \p Indices is empty,
834 /// this is analyzing the collective result of all output registers. Otherwise,
835 /// this is only querying a specific result index if this returns multiple
836 /// registers in a struct.
837 bool GCNTTIImpl::isInlineAsmSourceOfDivergence(
838   const CallInst *CI, ArrayRef<unsigned> Indices) const {
839   // TODO: Handle complex extract indices
840   if (Indices.size() > 1)
841     return true;
842 
843   const DataLayout &DL = CI->getModule()->getDataLayout();
844   const SIRegisterInfo *TRI = ST->getRegisterInfo();
845   TargetLowering::AsmOperandInfoVector TargetConstraints =
846       TLI->ParseConstraints(DL, ST->getRegisterInfo(), *CI);
847 
848   const int TargetOutputIdx = Indices.empty() ? -1 : Indices[0];
849 
850   int OutputIdx = 0;
851   for (auto &TC : TargetConstraints) {
852     if (TC.Type != InlineAsm::isOutput)
853       continue;
854 
855     // Skip outputs we don't care about.
856     if (TargetOutputIdx != -1 && TargetOutputIdx != OutputIdx++)
857       continue;
858 
859     TLI->ComputeConstraintToUse(TC, SDValue());
860 
861     const TargetRegisterClass *RC = TLI->getRegForInlineAsmConstraint(
862         TRI, TC.ConstraintCode, TC.ConstraintVT).second;
863 
864     // For AGPR constraints null is returned on subtargets without AGPRs, so
865     // assume divergent for null.
866     if (!RC || !TRI->isSGPRClass(RC))
867       return true;
868   }
869 
870   return false;
871 }
872 
873 bool GCNTTIImpl::isReadRegisterSourceOfDivergence(
874     const IntrinsicInst *ReadReg) const {
875   Metadata *MD =
876       cast<MetadataAsValue>(ReadReg->getArgOperand(0))->getMetadata();
877   StringRef RegName =
878       cast<MDString>(cast<MDNode>(MD)->getOperand(0))->getString();
879 
880   // Special case registers that look like VCC.
881   MVT VT = MVT::getVT(ReadReg->getType());
882   if (VT == MVT::i1)
883     return true;
884 
885   // Special case scalar registers that start with 'v'.
886   if (RegName.startswith("vcc") || RegName.empty())
887     return false;
888 
889   // VGPR or AGPR is divergent. There aren't any specially named vector
890   // registers.
891   return RegName[0] == 'v' || RegName[0] == 'a';
892 }
893 
894 /// \returns true if the result of the value could potentially be
895 /// different across workitems in a wavefront.
896 bool GCNTTIImpl::isSourceOfDivergence(const Value *V) const {
897   if (const Argument *A = dyn_cast<Argument>(V))
898     return !AMDGPU::isArgPassedInSGPR(A);
899 
900   // Loads from the private and flat address spaces are divergent, because
901   // threads can execute the load instruction with the same inputs and get
902   // different results.
903   //
904   // All other loads are not divergent, because if threads issue loads with the
905   // same arguments, they will always get the same result.
906   if (const LoadInst *Load = dyn_cast<LoadInst>(V))
907     return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
908            Load->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
909 
910   // Atomics are divergent because they are executed sequentially: when an
911   // atomic operation refers to the same address in each thread, then each
912   // thread after the first sees the value written by the previous thread as
913   // original value.
914   if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
915     return true;
916 
917   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
918     if (Intrinsic->getIntrinsicID() == Intrinsic::read_register)
919       return isReadRegisterSourceOfDivergence(Intrinsic);
920 
921     return AMDGPU::isIntrinsicSourceOfDivergence(Intrinsic->getIntrinsicID());
922   }
923 
924   // Assume all function calls are a source of divergence.
925   if (const CallInst *CI = dyn_cast<CallInst>(V)) {
926     if (CI->isInlineAsm())
927       return isInlineAsmSourceOfDivergence(CI);
928     return true;
929   }
930 
931   // Assume all function calls are a source of divergence.
932   if (isa<InvokeInst>(V))
933     return true;
934 
935   return false;
936 }
937 
938 bool GCNTTIImpl::isAlwaysUniform(const Value *V) const {
939   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V))
940     return AMDGPU::isIntrinsicAlwaysUniform(Intrinsic->getIntrinsicID());
941 
942   if (const CallInst *CI = dyn_cast<CallInst>(V)) {
943     if (CI->isInlineAsm())
944       return !isInlineAsmSourceOfDivergence(CI);
945     return false;
946   }
947 
948   // In most cases TID / wavefrontsize is uniform.
949   //
950   // However, if a kernel has uneven dimesions we can have a value of
951   // workitem-id-x divided by the wavefrontsize non-uniform. For example
952   // dimensions (65, 2) will have workitems with address (64, 0) and (0, 1)
953   // packed into a same wave which gives 1 and 0 after the division by 64
954   // respectively.
955   //
956   // FIXME: limit it to 1D kernels only, although that shall be possible
957   // to perform this optimization is the size of the X dimension is a power
958   // of 2, we just do not currently have infrastructure to query it.
959   using namespace llvm::PatternMatch;
960   uint64_t C;
961   if (match(V, m_LShr(m_Intrinsic<Intrinsic::amdgcn_workitem_id_x>(),
962                       m_ConstantInt(C))) ||
963       match(V, m_AShr(m_Intrinsic<Intrinsic::amdgcn_workitem_id_x>(),
964                       m_ConstantInt(C)))) {
965     const Function *F = cast<Instruction>(V)->getFunction();
966     return C >= ST->getWavefrontSizeLog2() &&
967            ST->getMaxWorkitemID(*F, 1) == 0 && ST->getMaxWorkitemID(*F, 2) == 0;
968   }
969 
970   Value *Mask;
971   if (match(V, m_c_And(m_Intrinsic<Intrinsic::amdgcn_workitem_id_x>(),
972                        m_Value(Mask)))) {
973     const Function *F = cast<Instruction>(V)->getFunction();
974     const DataLayout &DL = F->getParent()->getDataLayout();
975     return computeKnownBits(Mask, DL).countMinTrailingZeros() >=
976                ST->getWavefrontSizeLog2() &&
977            ST->getMaxWorkitemID(*F, 1) == 0 && ST->getMaxWorkitemID(*F, 2) == 0;
978   }
979 
980   const ExtractValueInst *ExtValue = dyn_cast<ExtractValueInst>(V);
981   if (!ExtValue)
982     return false;
983 
984   const CallInst *CI = dyn_cast<CallInst>(ExtValue->getOperand(0));
985   if (!CI)
986     return false;
987 
988   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(CI)) {
989     switch (Intrinsic->getIntrinsicID()) {
990     default:
991       return false;
992     case Intrinsic::amdgcn_if:
993     case Intrinsic::amdgcn_else: {
994       ArrayRef<unsigned> Indices = ExtValue->getIndices();
995       return Indices.size() == 1 && Indices[0] == 1;
996     }
997     }
998   }
999 
1000   // If we have inline asm returning mixed SGPR and VGPR results, we inferred
1001   // divergent for the overall struct return. We need to override it in the
1002   // case we're extracting an SGPR component here.
1003   if (CI->isInlineAsm())
1004     return !isInlineAsmSourceOfDivergence(CI, ExtValue->getIndices());
1005 
1006   return false;
1007 }
1008 
1009 bool GCNTTIImpl::collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
1010                                             Intrinsic::ID IID) const {
1011   switch (IID) {
1012   case Intrinsic::amdgcn_ds_fadd:
1013   case Intrinsic::amdgcn_ds_fmin:
1014   case Intrinsic::amdgcn_ds_fmax:
1015   case Intrinsic::amdgcn_is_shared:
1016   case Intrinsic::amdgcn_is_private:
1017   case Intrinsic::amdgcn_flat_atomic_fadd:
1018   case Intrinsic::amdgcn_flat_atomic_fmax:
1019   case Intrinsic::amdgcn_flat_atomic_fmin:
1020     OpIndexes.push_back(0);
1021     return true;
1022   default:
1023     return false;
1024   }
1025 }
1026 
1027 Value *GCNTTIImpl::rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
1028                                                     Value *OldV,
1029                                                     Value *NewV) const {
1030   auto IntrID = II->getIntrinsicID();
1031   switch (IntrID) {
1032   case Intrinsic::amdgcn_ds_fadd:
1033   case Intrinsic::amdgcn_ds_fmin:
1034   case Intrinsic::amdgcn_ds_fmax: {
1035     const ConstantInt *IsVolatile = cast<ConstantInt>(II->getArgOperand(4));
1036     if (!IsVolatile->isZero())
1037       return nullptr;
1038     Module *M = II->getParent()->getParent()->getParent();
1039     Type *DestTy = II->getType();
1040     Type *SrcTy = NewV->getType();
1041     Function *NewDecl =
1042         Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy});
1043     II->setArgOperand(0, NewV);
1044     II->setCalledFunction(NewDecl);
1045     return II;
1046   }
1047   case Intrinsic::amdgcn_is_shared:
1048   case Intrinsic::amdgcn_is_private: {
1049     unsigned TrueAS = IntrID == Intrinsic::amdgcn_is_shared ?
1050       AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS;
1051     unsigned NewAS = NewV->getType()->getPointerAddressSpace();
1052     LLVMContext &Ctx = NewV->getType()->getContext();
1053     ConstantInt *NewVal = (TrueAS == NewAS) ?
1054       ConstantInt::getTrue(Ctx) : ConstantInt::getFalse(Ctx);
1055     return NewVal;
1056   }
1057   case Intrinsic::ptrmask: {
1058     unsigned OldAS = OldV->getType()->getPointerAddressSpace();
1059     unsigned NewAS = NewV->getType()->getPointerAddressSpace();
1060     Value *MaskOp = II->getArgOperand(1);
1061     Type *MaskTy = MaskOp->getType();
1062 
1063     bool DoTruncate = false;
1064 
1065     const GCNTargetMachine &TM =
1066         static_cast<const GCNTargetMachine &>(getTLI()->getTargetMachine());
1067     if (!TM.isNoopAddrSpaceCast(OldAS, NewAS)) {
1068       // All valid 64-bit to 32-bit casts work by chopping off the high
1069       // bits. Any masking only clearing the low bits will also apply in the new
1070       // address space.
1071       if (DL.getPointerSizeInBits(OldAS) != 64 ||
1072           DL.getPointerSizeInBits(NewAS) != 32)
1073         return nullptr;
1074 
1075       // TODO: Do we need to thread more context in here?
1076       KnownBits Known = computeKnownBits(MaskOp, DL, 0, nullptr, II);
1077       if (Known.countMinLeadingOnes() < 32)
1078         return nullptr;
1079 
1080       DoTruncate = true;
1081     }
1082 
1083     IRBuilder<> B(II);
1084     if (DoTruncate) {
1085       MaskTy = B.getInt32Ty();
1086       MaskOp = B.CreateTrunc(MaskOp, MaskTy);
1087     }
1088 
1089     return B.CreateIntrinsic(Intrinsic::ptrmask, {NewV->getType(), MaskTy},
1090                              {NewV, MaskOp});
1091   }
1092   case Intrinsic::amdgcn_flat_atomic_fadd:
1093   case Intrinsic::amdgcn_flat_atomic_fmax:
1094   case Intrinsic::amdgcn_flat_atomic_fmin: {
1095     Type *DestTy = II->getType();
1096     Type *SrcTy = NewV->getType();
1097     unsigned NewAS = SrcTy->getPointerAddressSpace();
1098     if (!AMDGPU::isExtendedGlobalAddrSpace(NewAS))
1099       return nullptr;
1100     Module *M = II->getModule();
1101     Function *NewDecl = Intrinsic::getDeclaration(M, II->getIntrinsicID(),
1102                                                   {DestTy, SrcTy, DestTy});
1103     II->setArgOperand(0, NewV);
1104     II->setCalledFunction(NewDecl);
1105     return II;
1106   }
1107   default:
1108     return nullptr;
1109   }
1110 }
1111 
1112 InstructionCost GCNTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1113                                            VectorType *VT, ArrayRef<int> Mask,
1114                                            TTI::TargetCostKind CostKind,
1115                                            int Index, VectorType *SubTp,
1116                                            ArrayRef<const Value *> Args) {
1117   Kind = improveShuffleKindFromMask(Kind, Mask);
1118   if (ST->hasVOP3PInsts()) {
1119     if (cast<FixedVectorType>(VT)->getNumElements() == 2 &&
1120         DL.getTypeSizeInBits(VT->getElementType()) == 16) {
1121       // With op_sel VOP3P instructions freely can access the low half or high
1122       // half of a register, so any swizzle is free.
1123 
1124       switch (Kind) {
1125       case TTI::SK_Broadcast:
1126       case TTI::SK_Reverse:
1127       case TTI::SK_PermuteSingleSrc:
1128         return 0;
1129       default:
1130         break;
1131       }
1132     }
1133   }
1134 
1135   return BaseT::getShuffleCost(Kind, VT, Mask, CostKind, Index, SubTp);
1136 }
1137 
1138 bool GCNTTIImpl::areInlineCompatible(const Function *Caller,
1139                                      const Function *Callee) const {
1140   const TargetMachine &TM = getTLI()->getTargetMachine();
1141   const GCNSubtarget *CallerST
1142     = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Caller));
1143   const GCNSubtarget *CalleeST
1144     = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Callee));
1145 
1146   const FeatureBitset &CallerBits = CallerST->getFeatureBits();
1147   const FeatureBitset &CalleeBits = CalleeST->getFeatureBits();
1148 
1149   FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
1150   FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
1151   if ((RealCallerBits & RealCalleeBits) != RealCalleeBits)
1152     return false;
1153 
1154   // FIXME: dx10_clamp can just take the caller setting, but there seems to be
1155   // no way to support merge for backend defined attributes.
1156   SIModeRegisterDefaults CallerMode(*Caller);
1157   SIModeRegisterDefaults CalleeMode(*Callee);
1158   if (!CallerMode.isInlineCompatible(CalleeMode))
1159     return false;
1160 
1161   if (Callee->hasFnAttribute(Attribute::AlwaysInline) ||
1162       Callee->hasFnAttribute(Attribute::InlineHint))
1163     return true;
1164 
1165   // Hack to make compile times reasonable.
1166   if (InlineMaxBB) {
1167     // Single BB does not increase total BB amount.
1168     if (Callee->size() == 1)
1169       return true;
1170     size_t BBSize = Caller->size() + Callee->size() - 1;
1171     return BBSize <= InlineMaxBB;
1172   }
1173 
1174   return true;
1175 }
1176 
1177 static unsigned adjustInliningThresholdUsingCallee(const CallBase *CB,
1178                                                    const SITargetLowering *TLI,
1179                                                    const GCNTTIImpl *TTIImpl) {
1180   const int NrOfSGPRUntilSpill = 26;
1181   const int NrOfVGPRUntilSpill = 32;
1182 
1183   const DataLayout &DL = TTIImpl->getDataLayout();
1184 
1185   unsigned adjustThreshold = 0;
1186   int SGPRsInUse = 0;
1187   int VGPRsInUse = 0;
1188   for (const Use &A : CB->args()) {
1189     SmallVector<EVT, 4> ValueVTs;
1190     ComputeValueVTs(*TLI, DL, A.get()->getType(), ValueVTs);
1191     for (auto ArgVT : ValueVTs) {
1192       unsigned CCRegNum = TLI->getNumRegistersForCallingConv(
1193           CB->getContext(), CB->getCallingConv(), ArgVT);
1194       if (AMDGPU::isArgPassedInSGPR(CB, CB->getArgOperandNo(&A)))
1195         SGPRsInUse += CCRegNum;
1196       else
1197         VGPRsInUse += CCRegNum;
1198     }
1199   }
1200 
1201   // The cost of passing function arguments through the stack:
1202   //  1 instruction to put a function argument on the stack in the caller.
1203   //  1 instruction to take a function argument from the stack in callee.
1204   //  1 instruction is explicitly take care of data dependencies in callee
1205   //  function.
1206   InstructionCost ArgStackCost(1);
1207   ArgStackCost += const_cast<GCNTTIImpl *>(TTIImpl)->getMemoryOpCost(
1208       Instruction::Store, Type::getInt32Ty(CB->getContext()), Align(4),
1209       AMDGPUAS::PRIVATE_ADDRESS, TTI::TCK_SizeAndLatency);
1210   ArgStackCost += const_cast<GCNTTIImpl *>(TTIImpl)->getMemoryOpCost(
1211       Instruction::Load, Type::getInt32Ty(CB->getContext()), Align(4),
1212       AMDGPUAS::PRIVATE_ADDRESS, TTI::TCK_SizeAndLatency);
1213 
1214   // The penalty cost is computed relative to the cost of instructions and does
1215   // not model any storage costs.
1216   adjustThreshold += std::max(0, SGPRsInUse - NrOfSGPRUntilSpill) *
1217                      *ArgStackCost.getValue() * InlineConstants::getInstrCost();
1218   adjustThreshold += std::max(0, VGPRsInUse - NrOfVGPRUntilSpill) *
1219                      *ArgStackCost.getValue() * InlineConstants::getInstrCost();
1220   return adjustThreshold;
1221 }
1222 
1223 static unsigned getCallArgsTotalAllocaSize(const CallBase *CB,
1224                                            const DataLayout &DL) {
1225   // If we have a pointer to a private array passed into a function
1226   // it will not be optimized out, leaving scratch usage.
1227   // This function calculates the total size in bytes of the memory that would
1228   // end in scratch if the call was not inlined.
1229   unsigned AllocaSize = 0;
1230   SmallPtrSet<const AllocaInst *, 8> AIVisited;
1231   for (Value *PtrArg : CB->args()) {
1232     PointerType *Ty = dyn_cast<PointerType>(PtrArg->getType());
1233     if (!Ty)
1234       continue;
1235 
1236     unsigned AddrSpace = Ty->getAddressSpace();
1237     if (AddrSpace != AMDGPUAS::FLAT_ADDRESS &&
1238         AddrSpace != AMDGPUAS::PRIVATE_ADDRESS)
1239       continue;
1240 
1241     const AllocaInst *AI = dyn_cast<AllocaInst>(getUnderlyingObject(PtrArg));
1242     if (!AI || !AI->isStaticAlloca() || !AIVisited.insert(AI).second)
1243       continue;
1244 
1245     AllocaSize += DL.getTypeAllocSize(AI->getAllocatedType());
1246   }
1247   return AllocaSize;
1248 }
1249 
1250 unsigned GCNTTIImpl::adjustInliningThreshold(const CallBase *CB) const {
1251   unsigned Threshold = adjustInliningThresholdUsingCallee(CB, TLI, this);
1252 
1253   // Private object passed as arguments may end up in scratch usage if the call
1254   // is not inlined. Increase the inline threshold to promote inlining.
1255   unsigned AllocaSize = getCallArgsTotalAllocaSize(CB, DL);
1256   if (AllocaSize > 0)
1257     Threshold += ArgAllocaCost;
1258   return Threshold;
1259 }
1260 
1261 unsigned GCNTTIImpl::getCallerAllocaCost(const CallBase *CB,
1262                                          const AllocaInst *AI) const {
1263 
1264   // Below the cutoff, assume that the private memory objects would be
1265   // optimized
1266   auto AllocaSize = getCallArgsTotalAllocaSize(CB, DL);
1267   if (AllocaSize <= ArgAllocaCutoff)
1268     return 0;
1269 
1270   // Above the cutoff, we give a cost to each private memory object
1271   // depending its size. If the array can be optimized by SROA this cost is not
1272   // added to the total-cost in the inliner cost analysis.
1273   //
1274   // We choose the total cost of the alloca such that their sum cancels the
1275   // bonus given in the threshold (ArgAllocaCost).
1276   //
1277   //   Cost_Alloca_0 + ... + Cost_Alloca_N == ArgAllocaCost
1278   //
1279   // Awkwardly, the ArgAllocaCost bonus is multiplied by threshold-multiplier,
1280   // the single-bb bonus and the vector-bonus.
1281   //
1282   // We compensate the first two multipliers, by repeating logic from the
1283   // inliner-cost in here. The vector-bonus is 0 on AMDGPU.
1284   static_assert(InlinerVectorBonusPercent == 0, "vector bonus assumed to be 0");
1285   unsigned Threshold = ArgAllocaCost * getInliningThresholdMultiplier();
1286 
1287   bool SingleBB = none_of(*CB->getCalledFunction(), [](const BasicBlock &BB) {
1288     return BB.getTerminator()->getNumSuccessors() > 1;
1289   });
1290   if (SingleBB) {
1291     Threshold += Threshold / 2;
1292   }
1293 
1294   auto ArgAllocaSize = DL.getTypeAllocSize(AI->getAllocatedType());
1295 
1296   // Attribute the bonus proportionally to the alloca size
1297   unsigned AllocaThresholdBonus = (Threshold * ArgAllocaSize) / AllocaSize;
1298 
1299   return AllocaThresholdBonus;
1300 }
1301 
1302 void GCNTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1303                                          TTI::UnrollingPreferences &UP,
1304                                          OptimizationRemarkEmitter *ORE) {
1305   CommonTTI.getUnrollingPreferences(L, SE, UP, ORE);
1306 }
1307 
1308 void GCNTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1309                                        TTI::PeelingPreferences &PP) {
1310   CommonTTI.getPeelingPreferences(L, SE, PP);
1311 }
1312 
1313 int GCNTTIImpl::get64BitInstrCost(TTI::TargetCostKind CostKind) const {
1314   return ST->hasFullRate64Ops()
1315              ? getFullRateInstrCost()
1316              : ST->hasHalfRate64Ops() ? getHalfRateInstrCost(CostKind)
1317                                       : getQuarterRateInstrCost(CostKind);
1318 }
1319 
1320 std::pair<InstructionCost, MVT>
1321 GCNTTIImpl::getTypeLegalizationCost(Type *Ty) const {
1322   std::pair<InstructionCost, MVT> Cost = BaseT::getTypeLegalizationCost(Ty);
1323   auto Size = DL.getTypeSizeInBits(Ty);
1324   // Maximum load or store can handle 8 dwords for scalar and 4 for
1325   // vector ALU. Let's assume anything above 8 dwords is expensive
1326   // even if legal.
1327   if (Size <= 256)
1328     return Cost;
1329 
1330   Cost.first += (Size + 255) / 256;
1331   return Cost;
1332 }
1333