1 //===- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // \file
10 // This file implements a TargetTransformInfo analysis pass specific to the
11 // AMDGPU target machine. It uses the target's detailed information to provide
12 // more precise answers to certain TTI queries, while letting the target
13 // independent and default TTI implementations handle the rest.
14 //
15 //===----------------------------------------------------------------------===//
16
17 #include "AMDGPUTargetTransformInfo.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
20 #include "llvm/Analysis/LoopInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/IRBuilder.h"
23 #include "llvm/IR/IntrinsicsAMDGPU.h"
24 #include "llvm/IR/PatternMatch.h"
25 #include "llvm/Support/KnownBits.h"
26
27 using namespace llvm;
28
29 #define DEBUG_TYPE "AMDGPUtti"
30
31 static cl::opt<unsigned> UnrollThresholdPrivate(
32 "amdgpu-unroll-threshold-private",
33 cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"),
34 cl::init(2700), cl::Hidden);
35
36 static cl::opt<unsigned> UnrollThresholdLocal(
37 "amdgpu-unroll-threshold-local",
38 cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"),
39 cl::init(1000), cl::Hidden);
40
41 static cl::opt<unsigned> UnrollThresholdIf(
42 "amdgpu-unroll-threshold-if",
43 cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"),
44 cl::init(200), cl::Hidden);
45
46 static cl::opt<bool> UnrollRuntimeLocal(
47 "amdgpu-unroll-runtime-local",
48 cl::desc("Allow runtime unroll for AMDGPU if local memory used in a loop"),
49 cl::init(true), cl::Hidden);
50
51 static cl::opt<bool> UseLegacyDA(
52 "amdgpu-use-legacy-divergence-analysis",
53 cl::desc("Enable legacy divergence analysis for AMDGPU"),
54 cl::init(false), cl::Hidden);
55
56 static cl::opt<unsigned> UnrollMaxBlockToAnalyze(
57 "amdgpu-unroll-max-block-to-analyze",
58 cl::desc("Inner loop block size threshold to analyze in unroll for AMDGPU"),
59 cl::init(32), cl::Hidden);
60
61 static cl::opt<unsigned> ArgAllocaCost("amdgpu-inline-arg-alloca-cost",
62 cl::Hidden, cl::init(4000),
63 cl::desc("Cost of alloca argument"));
64
65 // If the amount of scratch memory to eliminate exceeds our ability to allocate
66 // it into registers we gain nothing by aggressively inlining functions for that
67 // heuristic.
68 static cl::opt<unsigned>
69 ArgAllocaCutoff("amdgpu-inline-arg-alloca-cutoff", cl::Hidden,
70 cl::init(256),
71 cl::desc("Maximum alloca size to use for inline cost"));
72
73 // Inliner constraint to achieve reasonable compilation time.
74 static cl::opt<size_t> InlineMaxBB(
75 "amdgpu-inline-max-bb", cl::Hidden, cl::init(1100),
76 cl::desc("Maximum number of BBs allowed in a function after inlining"
77 " (compile time constraint)"));
78
dependsOnLocalPhi(const Loop * L,const Value * Cond,unsigned Depth=0)79 static bool dependsOnLocalPhi(const Loop *L, const Value *Cond,
80 unsigned Depth = 0) {
81 const Instruction *I = dyn_cast<Instruction>(Cond);
82 if (!I)
83 return false;
84
85 for (const Value *V : I->operand_values()) {
86 if (!L->contains(I))
87 continue;
88 if (const PHINode *PHI = dyn_cast<PHINode>(V)) {
89 if (llvm::none_of(L->getSubLoops(), [PHI](const Loop* SubLoop) {
90 return SubLoop->contains(PHI); }))
91 return true;
92 } else if (Depth < 10 && dependsOnLocalPhi(L, V, Depth+1))
93 return true;
94 }
95 return false;
96 }
97
AMDGPUTTIImpl(const AMDGPUTargetMachine * TM,const Function & F)98 AMDGPUTTIImpl::AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
99 : BaseT(TM, F.getParent()->getDataLayout()),
100 TargetTriple(TM->getTargetTriple()),
101 ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))),
102 TLI(ST->getTargetLowering()) {}
103
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,TTI::UnrollingPreferences & UP,OptimizationRemarkEmitter * ORE)104 void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
105 TTI::UnrollingPreferences &UP,
106 OptimizationRemarkEmitter *ORE) {
107 const Function &F = *L->getHeader()->getParent();
108 UP.Threshold = AMDGPU::getIntegerAttribute(F, "amdgpu-unroll-threshold", 300);
109 UP.MaxCount = std::numeric_limits<unsigned>::max();
110 UP.Partial = true;
111
112 // Conditional branch in a loop back edge needs 3 additional exec
113 // manipulations in average.
114 UP.BEInsns += 3;
115
116 // TODO: Do we want runtime unrolling?
117
118 // Maximum alloca size than can fit registers. Reserve 16 registers.
119 const unsigned MaxAlloca = (256 - 16) * 4;
120 unsigned ThresholdPrivate = UnrollThresholdPrivate;
121 unsigned ThresholdLocal = UnrollThresholdLocal;
122
123 // If this loop has the amdgpu.loop.unroll.threshold metadata we will use the
124 // provided threshold value as the default for Threshold
125 if (MDNode *LoopUnrollThreshold =
126 findOptionMDForLoop(L, "amdgpu.loop.unroll.threshold")) {
127 if (LoopUnrollThreshold->getNumOperands() == 2) {
128 ConstantInt *MetaThresholdValue = mdconst::extract_or_null<ConstantInt>(
129 LoopUnrollThreshold->getOperand(1));
130 if (MetaThresholdValue) {
131 // We will also use the supplied value for PartialThreshold for now.
132 // We may introduce additional metadata if it becomes necessary in the
133 // future.
134 UP.Threshold = MetaThresholdValue->getSExtValue();
135 UP.PartialThreshold = UP.Threshold;
136 ThresholdPrivate = std::min(ThresholdPrivate, UP.Threshold);
137 ThresholdLocal = std::min(ThresholdLocal, UP.Threshold);
138 }
139 }
140 }
141
142 unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal);
143 for (const BasicBlock *BB : L->getBlocks()) {
144 const DataLayout &DL = BB->getModule()->getDataLayout();
145 unsigned LocalGEPsSeen = 0;
146
147 if (llvm::any_of(L->getSubLoops(), [BB](const Loop* SubLoop) {
148 return SubLoop->contains(BB); }))
149 continue; // Block belongs to an inner loop.
150
151 for (const Instruction &I : *BB) {
152 // Unroll a loop which contains an "if" statement whose condition
153 // defined by a PHI belonging to the loop. This may help to eliminate
154 // if region and potentially even PHI itself, saving on both divergence
155 // and registers used for the PHI.
156 // Add a small bonus for each of such "if" statements.
157 if (const BranchInst *Br = dyn_cast<BranchInst>(&I)) {
158 if (UP.Threshold < MaxBoost && Br->isConditional()) {
159 BasicBlock *Succ0 = Br->getSuccessor(0);
160 BasicBlock *Succ1 = Br->getSuccessor(1);
161 if ((L->contains(Succ0) && L->isLoopExiting(Succ0)) ||
162 (L->contains(Succ1) && L->isLoopExiting(Succ1)))
163 continue;
164 if (dependsOnLocalPhi(L, Br->getCondition())) {
165 UP.Threshold += UnrollThresholdIf;
166 LLVM_DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold
167 << " for loop:\n"
168 << *L << " due to " << *Br << '\n');
169 if (UP.Threshold >= MaxBoost)
170 return;
171 }
172 }
173 continue;
174 }
175
176 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
177 if (!GEP)
178 continue;
179
180 unsigned AS = GEP->getAddressSpace();
181 unsigned Threshold = 0;
182 if (AS == AMDGPUAS::PRIVATE_ADDRESS)
183 Threshold = ThresholdPrivate;
184 else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS)
185 Threshold = ThresholdLocal;
186 else
187 continue;
188
189 if (UP.Threshold >= Threshold)
190 continue;
191
192 if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
193 const Value *Ptr = GEP->getPointerOperand();
194 const AllocaInst *Alloca =
195 dyn_cast<AllocaInst>(getUnderlyingObject(Ptr));
196 if (!Alloca || !Alloca->isStaticAlloca())
197 continue;
198 Type *Ty = Alloca->getAllocatedType();
199 unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0;
200 if (AllocaSize > MaxAlloca)
201 continue;
202 } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
203 AS == AMDGPUAS::REGION_ADDRESS) {
204 LocalGEPsSeen++;
205 // Inhibit unroll for local memory if we have seen addressing not to
206 // a variable, most likely we will be unable to combine it.
207 // Do not unroll too deep inner loops for local memory to give a chance
208 // to unroll an outer loop for a more important reason.
209 if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 ||
210 (!isa<GlobalVariable>(GEP->getPointerOperand()) &&
211 !isa<Argument>(GEP->getPointerOperand())))
212 continue;
213 LLVM_DEBUG(dbgs() << "Allow unroll runtime for loop:\n"
214 << *L << " due to LDS use.\n");
215 UP.Runtime = UnrollRuntimeLocal;
216 }
217
218 // Check if GEP depends on a value defined by this loop itself.
219 bool HasLoopDef = false;
220 for (const Value *Op : GEP->operands()) {
221 const Instruction *Inst = dyn_cast<Instruction>(Op);
222 if (!Inst || L->isLoopInvariant(Op))
223 continue;
224
225 if (llvm::any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) {
226 return SubLoop->contains(Inst); }))
227 continue;
228 HasLoopDef = true;
229 break;
230 }
231 if (!HasLoopDef)
232 continue;
233
234 // We want to do whatever we can to limit the number of alloca
235 // instructions that make it through to the code generator. allocas
236 // require us to use indirect addressing, which is slow and prone to
237 // compiler bugs. If this loop does an address calculation on an
238 // alloca ptr, then we want to use a higher than normal loop unroll
239 // threshold. This will give SROA a better chance to eliminate these
240 // allocas.
241 //
242 // We also want to have more unrolling for local memory to let ds
243 // instructions with different offsets combine.
244 //
245 // Don't use the maximum allowed value here as it will make some
246 // programs way too big.
247 UP.Threshold = Threshold;
248 LLVM_DEBUG(dbgs() << "Set unroll threshold " << Threshold
249 << " for loop:\n"
250 << *L << " due to " << *GEP << '\n');
251 if (UP.Threshold >= MaxBoost)
252 return;
253 }
254
255 // If we got a GEP in a small BB from inner loop then increase max trip
256 // count to analyze for better estimation cost in unroll
257 if (L->isInnermost() && BB->size() < UnrollMaxBlockToAnalyze)
258 UP.MaxIterationsCountToAnalyze = 32;
259 }
260 }
261
getPeelingPreferences(Loop * L,ScalarEvolution & SE,TTI::PeelingPreferences & PP)262 void AMDGPUTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
263 TTI::PeelingPreferences &PP) {
264 BaseT::getPeelingPreferences(L, SE, PP);
265 }
266
267 const FeatureBitset GCNTTIImpl::InlineFeatureIgnoreList = {
268 // Codegen control options which don't matter.
269 AMDGPU::FeatureEnableLoadStoreOpt, AMDGPU::FeatureEnableSIScheduler,
270 AMDGPU::FeatureEnableUnsafeDSOffsetFolding, AMDGPU::FeatureFlatForGlobal,
271 AMDGPU::FeaturePromoteAlloca, AMDGPU::FeatureUnalignedScratchAccess,
272 AMDGPU::FeatureUnalignedAccessMode,
273
274 AMDGPU::FeatureAutoWaitcntBeforeBarrier,
275
276 // Property of the kernel/environment which can't actually differ.
277 AMDGPU::FeatureSGPRInitBug, AMDGPU::FeatureXNACK,
278 AMDGPU::FeatureTrapHandler,
279
280 // The default assumption needs to be ecc is enabled, but no directly
281 // exposed operations depend on it, so it can be safely inlined.
282 AMDGPU::FeatureSRAMECC,
283
284 // Perf-tuning features
285 AMDGPU::FeatureFastFMAF32, AMDGPU::HalfRate64Ops};
286
GCNTTIImpl(const AMDGPUTargetMachine * TM,const Function & F)287 GCNTTIImpl::GCNTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
288 : BaseT(TM, F.getParent()->getDataLayout()),
289 ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))),
290 TLI(ST->getTargetLowering()), CommonTTI(TM, F),
291 IsGraphics(AMDGPU::isGraphics(F.getCallingConv())),
292 MaxVGPRs(ST->getMaxNumVGPRs(
293 std::max(ST->getWavesPerEU(F).first,
294 ST->getWavesPerEUForWorkGroup(
295 ST->getFlatWorkGroupSizes(F).second)))) {
296 AMDGPU::SIModeRegisterDefaults Mode(F);
297 HasFP32Denormals = Mode.allFP32Denormals();
298 HasFP64FP16Denormals = Mode.allFP64FP16Denormals();
299 }
300
getHardwareNumberOfRegisters(bool Vec) const301 unsigned GCNTTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
302 // The concept of vector registers doesn't really exist. Some packed vector
303 // operations operate on the normal 32-bit registers.
304 return MaxVGPRs;
305 }
306
getNumberOfRegisters(bool Vec) const307 unsigned GCNTTIImpl::getNumberOfRegisters(bool Vec) const {
308 // This is really the number of registers to fill when vectorizing /
309 // interleaving loops, so we lie to avoid trying to use all registers.
310 return getHardwareNumberOfRegisters(Vec) >> 3;
311 }
312
getNumberOfRegisters(unsigned RCID) const313 unsigned GCNTTIImpl::getNumberOfRegisters(unsigned RCID) const {
314 const SIRegisterInfo *TRI = ST->getRegisterInfo();
315 const TargetRegisterClass *RC = TRI->getRegClass(RCID);
316 unsigned NumVGPRs = (TRI->getRegSizeInBits(*RC) + 31) / 32;
317 return getHardwareNumberOfRegisters(false) / NumVGPRs;
318 }
319
320 TypeSize
getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const321 GCNTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
322 switch (K) {
323 case TargetTransformInfo::RGK_Scalar:
324 return TypeSize::getFixed(32);
325 case TargetTransformInfo::RGK_FixedWidthVector:
326 return TypeSize::getFixed(ST->hasPackedFP32Ops() ? 64 : 32);
327 case TargetTransformInfo::RGK_ScalableVector:
328 return TypeSize::getScalable(0);
329 }
330 llvm_unreachable("Unsupported register kind");
331 }
332
getMinVectorRegisterBitWidth() const333 unsigned GCNTTIImpl::getMinVectorRegisterBitWidth() const {
334 return 32;
335 }
336
getMaximumVF(unsigned ElemWidth,unsigned Opcode) const337 unsigned GCNTTIImpl::getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
338 if (Opcode == Instruction::Load || Opcode == Instruction::Store)
339 return 32 * 4 / ElemWidth;
340 return (ElemWidth == 16 && ST->has16BitInsts()) ? 2
341 : (ElemWidth == 32 && ST->hasPackedFP32Ops()) ? 2
342 : 1;
343 }
344
getLoadVectorFactor(unsigned VF,unsigned LoadSize,unsigned ChainSizeInBytes,VectorType * VecTy) const345 unsigned GCNTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
346 unsigned ChainSizeInBytes,
347 VectorType *VecTy) const {
348 unsigned VecRegBitWidth = VF * LoadSize;
349 if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32)
350 // TODO: Support element-size less than 32bit?
351 return 128 / LoadSize;
352
353 return VF;
354 }
355
getStoreVectorFactor(unsigned VF,unsigned StoreSize,unsigned ChainSizeInBytes,VectorType * VecTy) const356 unsigned GCNTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize,
357 unsigned ChainSizeInBytes,
358 VectorType *VecTy) const {
359 unsigned VecRegBitWidth = VF * StoreSize;
360 if (VecRegBitWidth > 128)
361 return 128 / StoreSize;
362
363 return VF;
364 }
365
getLoadStoreVecRegBitWidth(unsigned AddrSpace) const366 unsigned GCNTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
367 if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
368 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
369 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
370 AddrSpace == AMDGPUAS::BUFFER_FAT_POINTER) {
371 return 512;
372 }
373
374 if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
375 return 8 * ST->getMaxPrivateElementSize();
376
377 // Common to flat, global, local and region. Assume for unknown addrspace.
378 return 128;
379 }
380
isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,Align Alignment,unsigned AddrSpace) const381 bool GCNTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
382 Align Alignment,
383 unsigned AddrSpace) const {
384 // We allow vectorization of flat stores, even though we may need to decompose
385 // them later if they may access private memory. We don't have enough context
386 // here, and legalization can handle it.
387 if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) {
388 return (Alignment >= 4 || ST->hasUnalignedScratchAccess()) &&
389 ChainSizeInBytes <= ST->getMaxPrivateElementSize();
390 }
391 return true;
392 }
393
isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,Align Alignment,unsigned AddrSpace) const394 bool GCNTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
395 Align Alignment,
396 unsigned AddrSpace) const {
397 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
398 }
399
isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,Align Alignment,unsigned AddrSpace) const400 bool GCNTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
401 Align Alignment,
402 unsigned AddrSpace) const {
403 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
404 }
405
406 // FIXME: Really we would like to issue multiple 128-bit loads and stores per
407 // iteration. Should we report a larger size and let it legalize?
408 //
409 // FIXME: Should we use narrower types for local/region, or account for when
410 // unaligned access is legal?
411 //
412 // FIXME: This could use fine tuning and microbenchmarks.
getMemcpyLoopLoweringType(LLVMContext & Context,Value * Length,unsigned SrcAddrSpace,unsigned DestAddrSpace,unsigned SrcAlign,unsigned DestAlign) const413 Type *GCNTTIImpl::getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
414 unsigned SrcAddrSpace,
415 unsigned DestAddrSpace,
416 unsigned SrcAlign,
417 unsigned DestAlign) const {
418 unsigned MinAlign = std::min(SrcAlign, DestAlign);
419
420 // A (multi-)dword access at an address == 2 (mod 4) will be decomposed by the
421 // hardware into byte accesses. If you assume all alignments are equally
422 // probable, it's more efficient on average to use short accesses for this
423 // case.
424 if (MinAlign == 2)
425 return Type::getInt16Ty(Context);
426
427 // Not all subtargets have 128-bit DS instructions, and we currently don't
428 // form them by default.
429 if (SrcAddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
430 SrcAddrSpace == AMDGPUAS::REGION_ADDRESS ||
431 DestAddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
432 DestAddrSpace == AMDGPUAS::REGION_ADDRESS) {
433 return FixedVectorType::get(Type::getInt32Ty(Context), 2);
434 }
435
436 // Global memory works best with 16-byte accesses. Private memory will also
437 // hit this, although they'll be decomposed.
438 return FixedVectorType::get(Type::getInt32Ty(Context), 4);
439 }
440
getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type * > & OpsOut,LLVMContext & Context,unsigned RemainingBytes,unsigned SrcAddrSpace,unsigned DestAddrSpace,unsigned SrcAlign,unsigned DestAlign) const441 void GCNTTIImpl::getMemcpyLoopResidualLoweringType(
442 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
443 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
444 unsigned SrcAlign, unsigned DestAlign) const {
445 assert(RemainingBytes < 16);
446
447 unsigned MinAlign = std::min(SrcAlign, DestAlign);
448
449 if (MinAlign != 2) {
450 Type *I64Ty = Type::getInt64Ty(Context);
451 while (RemainingBytes >= 8) {
452 OpsOut.push_back(I64Ty);
453 RemainingBytes -= 8;
454 }
455
456 Type *I32Ty = Type::getInt32Ty(Context);
457 while (RemainingBytes >= 4) {
458 OpsOut.push_back(I32Ty);
459 RemainingBytes -= 4;
460 }
461 }
462
463 Type *I16Ty = Type::getInt16Ty(Context);
464 while (RemainingBytes >= 2) {
465 OpsOut.push_back(I16Ty);
466 RemainingBytes -= 2;
467 }
468
469 Type *I8Ty = Type::getInt8Ty(Context);
470 while (RemainingBytes) {
471 OpsOut.push_back(I8Ty);
472 --RemainingBytes;
473 }
474 }
475
getMaxInterleaveFactor(unsigned VF)476 unsigned GCNTTIImpl::getMaxInterleaveFactor(unsigned VF) {
477 // Disable unrolling if the loop is not vectorized.
478 // TODO: Enable this again.
479 if (VF == 1)
480 return 1;
481
482 return 8;
483 }
484
getTgtMemIntrinsic(IntrinsicInst * Inst,MemIntrinsicInfo & Info) const485 bool GCNTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
486 MemIntrinsicInfo &Info) const {
487 switch (Inst->getIntrinsicID()) {
488 case Intrinsic::amdgcn_atomic_inc:
489 case Intrinsic::amdgcn_atomic_dec:
490 case Intrinsic::amdgcn_ds_ordered_add:
491 case Intrinsic::amdgcn_ds_ordered_swap:
492 case Intrinsic::amdgcn_ds_fadd:
493 case Intrinsic::amdgcn_ds_fmin:
494 case Intrinsic::amdgcn_ds_fmax: {
495 auto *Ordering = dyn_cast<ConstantInt>(Inst->getArgOperand(2));
496 auto *Volatile = dyn_cast<ConstantInt>(Inst->getArgOperand(4));
497 if (!Ordering || !Volatile)
498 return false; // Invalid.
499
500 unsigned OrderingVal = Ordering->getZExtValue();
501 if (OrderingVal > static_cast<unsigned>(AtomicOrdering::SequentiallyConsistent))
502 return false;
503
504 Info.PtrVal = Inst->getArgOperand(0);
505 Info.Ordering = static_cast<AtomicOrdering>(OrderingVal);
506 Info.ReadMem = true;
507 Info.WriteMem = true;
508 Info.IsVolatile = !Volatile->isZero();
509 return true;
510 }
511 default:
512 return false;
513 }
514 }
515
getArithmeticInstrCost(unsigned Opcode,Type * Ty,TTI::TargetCostKind CostKind,TTI::OperandValueKind Opd1Info,TTI::OperandValueKind Opd2Info,TTI::OperandValueProperties Opd1PropInfo,TTI::OperandValueProperties Opd2PropInfo,ArrayRef<const Value * > Args,const Instruction * CxtI)516 InstructionCost GCNTTIImpl::getArithmeticInstrCost(
517 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
518 TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info,
519 TTI::OperandValueProperties Opd1PropInfo,
520 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
521 const Instruction *CxtI) {
522 EVT OrigTy = TLI->getValueType(DL, Ty);
523 if (!OrigTy.isSimple()) {
524 // FIXME: We're having to query the throughput cost so that the basic
525 // implementation tries to generate legalize and scalarization costs. Maybe
526 // we could hoist the scalarization code here?
527 if (CostKind != TTI::TCK_CodeSize)
528 return BaseT::getArithmeticInstrCost(Opcode, Ty, TTI::TCK_RecipThroughput,
529 Opd1Info, Opd2Info, Opd1PropInfo,
530 Opd2PropInfo, Args, CxtI);
531 // Scalarization
532
533 // Check if any of the operands are vector operands.
534 int ISD = TLI->InstructionOpcodeToISD(Opcode);
535 assert(ISD && "Invalid opcode");
536
537 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
538
539 bool IsFloat = Ty->isFPOrFPVectorTy();
540 // Assume that floating point arithmetic operations cost twice as much as
541 // integer operations.
542 unsigned OpCost = (IsFloat ? 2 : 1);
543
544 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
545 // The operation is legal. Assume it costs 1.
546 // TODO: Once we have extract/insert subvector cost we need to use them.
547 return LT.first * OpCost;
548 }
549
550 if (!TLI->isOperationExpand(ISD, LT.second)) {
551 // If the operation is custom lowered, then assume that the code is twice
552 // as expensive.
553 return LT.first * 2 * OpCost;
554 }
555
556 // Else, assume that we need to scalarize this op.
557 // TODO: If one of the types get legalized by splitting, handle this
558 // similarly to what getCastInstrCost() does.
559 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
560 unsigned Num = cast<FixedVectorType>(VTy)->getNumElements();
561 InstructionCost Cost = getArithmeticInstrCost(
562 Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
563 Opd1PropInfo, Opd2PropInfo, Args, CxtI);
564 // Return the cost of multiple scalar invocation plus the cost of
565 // inserting and extracting the values.
566 SmallVector<Type *> Tys(Args.size(), Ty);
567 return getScalarizationOverhead(VTy, Args, Tys) + Num * Cost;
568 }
569
570 // We don't know anything about this scalar instruction.
571 return OpCost;
572 }
573
574 // Legalize the type.
575 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
576 int ISD = TLI->InstructionOpcodeToISD(Opcode);
577
578 // Because we don't have any legal vector operations, but the legal types, we
579 // need to account for split vectors.
580 unsigned NElts = LT.second.isVector() ?
581 LT.second.getVectorNumElements() : 1;
582
583 MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
584
585 switch (ISD) {
586 case ISD::SHL:
587 case ISD::SRL:
588 case ISD::SRA:
589 if (SLT == MVT::i64)
590 return get64BitInstrCost(CostKind) * LT.first * NElts;
591
592 if (ST->has16BitInsts() && SLT == MVT::i16)
593 NElts = (NElts + 1) / 2;
594
595 // i32
596 return getFullRateInstrCost() * LT.first * NElts;
597 case ISD::ADD:
598 case ISD::SUB:
599 case ISD::AND:
600 case ISD::OR:
601 case ISD::XOR:
602 if (SLT == MVT::i64) {
603 // and, or and xor are typically split into 2 VALU instructions.
604 return 2 * getFullRateInstrCost() * LT.first * NElts;
605 }
606
607 if (ST->has16BitInsts() && SLT == MVT::i16)
608 NElts = (NElts + 1) / 2;
609
610 return LT.first * NElts * getFullRateInstrCost();
611 case ISD::MUL: {
612 const int QuarterRateCost = getQuarterRateInstrCost(CostKind);
613 if (SLT == MVT::i64) {
614 const int FullRateCost = getFullRateInstrCost();
615 return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
616 }
617
618 if (ST->has16BitInsts() && SLT == MVT::i16)
619 NElts = (NElts + 1) / 2;
620
621 // i32
622 return QuarterRateCost * NElts * LT.first;
623 }
624 case ISD::FMUL:
625 // Check possible fuse {fadd|fsub}(a,fmul(b,c)) and return zero cost for
626 // fmul(b,c) supposing the fadd|fsub will get estimated cost for the whole
627 // fused operation.
628 if (CxtI && CxtI->hasOneUse())
629 if (const auto *FAdd = dyn_cast<BinaryOperator>(*CxtI->user_begin())) {
630 const int OPC = TLI->InstructionOpcodeToISD(FAdd->getOpcode());
631 if (OPC == ISD::FADD || OPC == ISD::FSUB) {
632 if (ST->hasMadMacF32Insts() && SLT == MVT::f32 && !HasFP32Denormals)
633 return TargetTransformInfo::TCC_Free;
634 if (ST->has16BitInsts() && SLT == MVT::f16 && !HasFP64FP16Denormals)
635 return TargetTransformInfo::TCC_Free;
636
637 // Estimate all types may be fused with contract/unsafe flags
638 const TargetOptions &Options = TLI->getTargetMachine().Options;
639 if (Options.AllowFPOpFusion == FPOpFusion::Fast ||
640 Options.UnsafeFPMath ||
641 (FAdd->hasAllowContract() && CxtI->hasAllowContract()))
642 return TargetTransformInfo::TCC_Free;
643 }
644 }
645 LLVM_FALLTHROUGH;
646 case ISD::FADD:
647 case ISD::FSUB:
648 if (ST->hasPackedFP32Ops() && SLT == MVT::f32)
649 NElts = (NElts + 1) / 2;
650 if (SLT == MVT::f64)
651 return LT.first * NElts * get64BitInstrCost(CostKind);
652
653 if (ST->has16BitInsts() && SLT == MVT::f16)
654 NElts = (NElts + 1) / 2;
655
656 if (SLT == MVT::f32 || SLT == MVT::f16)
657 return LT.first * NElts * getFullRateInstrCost();
658 break;
659 case ISD::FDIV:
660 case ISD::FREM:
661 // FIXME: frem should be handled separately. The fdiv in it is most of it,
662 // but the current lowering is also not entirely correct.
663 if (SLT == MVT::f64) {
664 int Cost = 7 * get64BitInstrCost(CostKind) +
665 getQuarterRateInstrCost(CostKind) +
666 3 * getHalfRateInstrCost(CostKind);
667 // Add cost of workaround.
668 if (!ST->hasUsableDivScaleConditionOutput())
669 Cost += 3 * getFullRateInstrCost();
670
671 return LT.first * Cost * NElts;
672 }
673
674 if (!Args.empty() && match(Args[0], PatternMatch::m_FPOne())) {
675 // TODO: This is more complicated, unsafe flags etc.
676 if ((SLT == MVT::f32 && !HasFP32Denormals) ||
677 (SLT == MVT::f16 && ST->has16BitInsts())) {
678 return LT.first * getQuarterRateInstrCost(CostKind) * NElts;
679 }
680 }
681
682 if (SLT == MVT::f16 && ST->has16BitInsts()) {
683 // 2 x v_cvt_f32_f16
684 // f32 rcp
685 // f32 fmul
686 // v_cvt_f16_f32
687 // f16 div_fixup
688 int Cost =
689 4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost(CostKind);
690 return LT.first * Cost * NElts;
691 }
692
693 if (SLT == MVT::f32 || SLT == MVT::f16) {
694 // 4 more v_cvt_* insts without f16 insts support
695 int Cost = (SLT == MVT::f16 ? 14 : 10) * getFullRateInstrCost() +
696 1 * getQuarterRateInstrCost(CostKind);
697
698 if (!HasFP32Denormals) {
699 // FP mode switches.
700 Cost += 2 * getFullRateInstrCost();
701 }
702
703 return LT.first * NElts * Cost;
704 }
705 break;
706 case ISD::FNEG:
707 // Use the backend' estimation. If fneg is not free each element will cost
708 // one additional instruction.
709 return TLI->isFNegFree(SLT) ? 0 : NElts;
710 default:
711 break;
712 }
713
714 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info,
715 Opd1PropInfo, Opd2PropInfo, Args, CxtI);
716 }
717
718 // Return true if there's a potential benefit from using v2f16/v2i16
719 // instructions for an intrinsic, even if it requires nontrivial legalization.
intrinsicHasPackedVectorBenefit(Intrinsic::ID ID)720 static bool intrinsicHasPackedVectorBenefit(Intrinsic::ID ID) {
721 switch (ID) {
722 case Intrinsic::fma: // TODO: fmuladd
723 // There's a small benefit to using vector ops in the legalized code.
724 case Intrinsic::round:
725 case Intrinsic::uadd_sat:
726 case Intrinsic::usub_sat:
727 case Intrinsic::sadd_sat:
728 case Intrinsic::ssub_sat:
729 return true;
730 default:
731 return false;
732 }
733 }
734
735 InstructionCost
getIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind)736 GCNTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
737 TTI::TargetCostKind CostKind) {
738 if (ICA.getID() == Intrinsic::fabs)
739 return 0;
740
741 if (!intrinsicHasPackedVectorBenefit(ICA.getID()))
742 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
743
744 Type *RetTy = ICA.getReturnType();
745 EVT OrigTy = TLI->getValueType(DL, RetTy);
746 if (!OrigTy.isSimple()) {
747 if (CostKind != TTI::TCK_CodeSize)
748 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
749
750 // TODO: Combine these two logic paths.
751 if (ICA.isTypeBasedOnly())
752 return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
753
754 unsigned RetVF =
755 (RetTy->isVectorTy() ? cast<FixedVectorType>(RetTy)->getNumElements()
756 : 1);
757 const IntrinsicInst *I = ICA.getInst();
758 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
759 FastMathFlags FMF = ICA.getFlags();
760 // Assume that we need to scalarize this intrinsic.
761
762 // Compute the scalarization overhead based on Args for a vector
763 // intrinsic. A vectorizer will pass a scalar RetTy and VF > 1, while
764 // CostModel will pass a vector RetTy and VF is 1.
765 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
766 if (RetVF > 1) {
767 ScalarizationCost = 0;
768 if (!RetTy->isVoidTy())
769 ScalarizationCost +=
770 getScalarizationOverhead(cast<VectorType>(RetTy), true, false);
771 ScalarizationCost +=
772 getOperandsScalarizationOverhead(Args, ICA.getArgTypes());
773 }
774
775 IntrinsicCostAttributes Attrs(ICA.getID(), RetTy, ICA.getArgTypes(), FMF, I,
776 ScalarizationCost);
777 return getIntrinsicInstrCost(Attrs, CostKind);
778 }
779
780 // Legalize the type.
781 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
782
783 unsigned NElts = LT.second.isVector() ?
784 LT.second.getVectorNumElements() : 1;
785
786 MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
787
788 if (SLT == MVT::f64)
789 return LT.first * NElts * get64BitInstrCost(CostKind);
790
791 if ((ST->has16BitInsts() && SLT == MVT::f16) ||
792 (ST->hasPackedFP32Ops() && SLT == MVT::f32))
793 NElts = (NElts + 1) / 2;
794
795 // TODO: Get more refined intrinsic costs?
796 unsigned InstRate = getQuarterRateInstrCost(CostKind);
797
798 switch (ICA.getID()) {
799 case Intrinsic::fma:
800 InstRate = ST->hasFastFMAF32() ? getHalfRateInstrCost(CostKind)
801 : getQuarterRateInstrCost(CostKind);
802 break;
803 case Intrinsic::uadd_sat:
804 case Intrinsic::usub_sat:
805 case Intrinsic::sadd_sat:
806 case Intrinsic::ssub_sat:
807 static const auto ValidSatTys = {MVT::v2i16, MVT::v4i16};
808 if (any_of(ValidSatTys, [<](MVT M) { return M == LT.second; }))
809 NElts = 1;
810 break;
811 }
812
813 return LT.first * NElts * InstRate;
814 }
815
getCFInstrCost(unsigned Opcode,TTI::TargetCostKind CostKind,const Instruction * I)816 InstructionCost GCNTTIImpl::getCFInstrCost(unsigned Opcode,
817 TTI::TargetCostKind CostKind,
818 const Instruction *I) {
819 assert((I == nullptr || I->getOpcode() == Opcode) &&
820 "Opcode should reflect passed instruction.");
821 const bool SCost =
822 (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency);
823 const int CBrCost = SCost ? 5 : 7;
824 switch (Opcode) {
825 case Instruction::Br: {
826 // Branch instruction takes about 4 slots on gfx900.
827 auto BI = dyn_cast_or_null<BranchInst>(I);
828 if (BI && BI->isUnconditional())
829 return SCost ? 1 : 4;
830 // Suppose conditional branch takes additional 3 exec manipulations
831 // instructions in average.
832 return CBrCost;
833 }
834 case Instruction::Switch: {
835 auto SI = dyn_cast_or_null<SwitchInst>(I);
836 // Each case (including default) takes 1 cmp + 1 cbr instructions in
837 // average.
838 return (SI ? (SI->getNumCases() + 1) : 4) * (CBrCost + 1);
839 }
840 case Instruction::Ret:
841 return SCost ? 1 : 10;
842 }
843 return BaseT::getCFInstrCost(Opcode, CostKind, I);
844 }
845
846 InstructionCost
getArithmeticReductionCost(unsigned Opcode,VectorType * Ty,Optional<FastMathFlags> FMF,TTI::TargetCostKind CostKind)847 GCNTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
848 Optional<FastMathFlags> FMF,
849 TTI::TargetCostKind CostKind) {
850 if (TTI::requiresOrderedReduction(FMF))
851 return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
852
853 EVT OrigTy = TLI->getValueType(DL, Ty);
854
855 // Computes cost on targets that have packed math instructions(which support
856 // 16-bit types only).
857 if (!ST->hasVOP3PInsts() || OrigTy.getScalarSizeInBits() != 16)
858 return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
859
860 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
861 return LT.first * getFullRateInstrCost();
862 }
863
864 InstructionCost
getMinMaxReductionCost(VectorType * Ty,VectorType * CondTy,bool IsUnsigned,TTI::TargetCostKind CostKind)865 GCNTTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
866 bool IsUnsigned,
867 TTI::TargetCostKind CostKind) {
868 EVT OrigTy = TLI->getValueType(DL, Ty);
869
870 // Computes cost on targets that have packed math instructions(which support
871 // 16-bit types only).
872 if (!ST->hasVOP3PInsts() || OrigTy.getScalarSizeInBits() != 16)
873 return BaseT::getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind);
874
875 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
876 return LT.first * getHalfRateInstrCost(CostKind);
877 }
878
getVectorInstrCost(unsigned Opcode,Type * ValTy,unsigned Index)879 InstructionCost GCNTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
880 unsigned Index) {
881 switch (Opcode) {
882 case Instruction::ExtractElement:
883 case Instruction::InsertElement: {
884 unsigned EltSize
885 = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
886 if (EltSize < 32) {
887 if (EltSize == 16 && Index == 0 && ST->has16BitInsts())
888 return 0;
889 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
890 }
891
892 // Extracts are just reads of a subregister, so are free. Inserts are
893 // considered free because we don't want to have any cost for scalarizing
894 // operations, and we don't have to copy into a different register class.
895
896 // Dynamic indexing isn't free and is best avoided.
897 return Index == ~0u ? 2 : 0;
898 }
899 default:
900 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
901 }
902 }
903
904 /// Analyze if the results of inline asm are divergent. If \p Indices is empty,
905 /// this is analyzing the collective result of all output registers. Otherwise,
906 /// this is only querying a specific result index if this returns multiple
907 /// registers in a struct.
isInlineAsmSourceOfDivergence(const CallInst * CI,ArrayRef<unsigned> Indices) const908 bool GCNTTIImpl::isInlineAsmSourceOfDivergence(
909 const CallInst *CI, ArrayRef<unsigned> Indices) const {
910 // TODO: Handle complex extract indices
911 if (Indices.size() > 1)
912 return true;
913
914 const DataLayout &DL = CI->getModule()->getDataLayout();
915 const SIRegisterInfo *TRI = ST->getRegisterInfo();
916 TargetLowering::AsmOperandInfoVector TargetConstraints =
917 TLI->ParseConstraints(DL, ST->getRegisterInfo(), *CI);
918
919 const int TargetOutputIdx = Indices.empty() ? -1 : Indices[0];
920
921 int OutputIdx = 0;
922 for (auto &TC : TargetConstraints) {
923 if (TC.Type != InlineAsm::isOutput)
924 continue;
925
926 // Skip outputs we don't care about.
927 if (TargetOutputIdx != -1 && TargetOutputIdx != OutputIdx++)
928 continue;
929
930 TLI->ComputeConstraintToUse(TC, SDValue());
931
932 Register AssignedReg;
933 const TargetRegisterClass *RC;
934 std::tie(AssignedReg, RC) = TLI->getRegForInlineAsmConstraint(
935 TRI, TC.ConstraintCode, TC.ConstraintVT);
936 if (AssignedReg) {
937 // FIXME: This is a workaround for getRegForInlineAsmConstraint
938 // returning VS_32
939 RC = TRI->getPhysRegClass(AssignedReg);
940 }
941
942 // For AGPR constraints null is returned on subtargets without AGPRs, so
943 // assume divergent for null.
944 if (!RC || !TRI->isSGPRClass(RC))
945 return true;
946 }
947
948 return false;
949 }
950
951 /// \returns true if the new GPU divergence analysis is enabled.
useGPUDivergenceAnalysis() const952 bool GCNTTIImpl::useGPUDivergenceAnalysis() const {
953 return !UseLegacyDA;
954 }
955
956 /// \returns true if the result of the value could potentially be
957 /// different across workitems in a wavefront.
isSourceOfDivergence(const Value * V) const958 bool GCNTTIImpl::isSourceOfDivergence(const Value *V) const {
959 if (const Argument *A = dyn_cast<Argument>(V))
960 return !AMDGPU::isArgPassedInSGPR(A);
961
962 // Loads from the private and flat address spaces are divergent, because
963 // threads can execute the load instruction with the same inputs and get
964 // different results.
965 //
966 // All other loads are not divergent, because if threads issue loads with the
967 // same arguments, they will always get the same result.
968 if (const LoadInst *Load = dyn_cast<LoadInst>(V))
969 return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
970 Load->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
971
972 // Atomics are divergent because they are executed sequentially: when an
973 // atomic operation refers to the same address in each thread, then each
974 // thread after the first sees the value written by the previous thread as
975 // original value.
976 if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
977 return true;
978
979 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V))
980 return AMDGPU::isIntrinsicSourceOfDivergence(Intrinsic->getIntrinsicID());
981
982 // Assume all function calls are a source of divergence.
983 if (const CallInst *CI = dyn_cast<CallInst>(V)) {
984 if (CI->isInlineAsm())
985 return isInlineAsmSourceOfDivergence(CI);
986 return true;
987 }
988
989 // Assume all function calls are a source of divergence.
990 if (isa<InvokeInst>(V))
991 return true;
992
993 return false;
994 }
995
isAlwaysUniform(const Value * V) const996 bool GCNTTIImpl::isAlwaysUniform(const Value *V) const {
997 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
998 switch (Intrinsic->getIntrinsicID()) {
999 default:
1000 return false;
1001 case Intrinsic::amdgcn_readfirstlane:
1002 case Intrinsic::amdgcn_readlane:
1003 case Intrinsic::amdgcn_icmp:
1004 case Intrinsic::amdgcn_fcmp:
1005 case Intrinsic::amdgcn_ballot:
1006 case Intrinsic::amdgcn_if_break:
1007 return true;
1008 }
1009 }
1010
1011 if (const CallInst *CI = dyn_cast<CallInst>(V)) {
1012 if (CI->isInlineAsm())
1013 return !isInlineAsmSourceOfDivergence(CI);
1014 return false;
1015 }
1016
1017 const ExtractValueInst *ExtValue = dyn_cast<ExtractValueInst>(V);
1018 if (!ExtValue)
1019 return false;
1020
1021 const CallInst *CI = dyn_cast<CallInst>(ExtValue->getOperand(0));
1022 if (!CI)
1023 return false;
1024
1025 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(CI)) {
1026 switch (Intrinsic->getIntrinsicID()) {
1027 default:
1028 return false;
1029 case Intrinsic::amdgcn_if:
1030 case Intrinsic::amdgcn_else: {
1031 ArrayRef<unsigned> Indices = ExtValue->getIndices();
1032 return Indices.size() == 1 && Indices[0] == 1;
1033 }
1034 }
1035 }
1036
1037 // If we have inline asm returning mixed SGPR and VGPR results, we inferred
1038 // divergent for the overall struct return. We need to override it in the
1039 // case we're extracting an SGPR component here.
1040 if (CI->isInlineAsm())
1041 return !isInlineAsmSourceOfDivergence(CI, ExtValue->getIndices());
1042
1043 return false;
1044 }
1045
collectFlatAddressOperands(SmallVectorImpl<int> & OpIndexes,Intrinsic::ID IID) const1046 bool GCNTTIImpl::collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
1047 Intrinsic::ID IID) const {
1048 switch (IID) {
1049 case Intrinsic::amdgcn_atomic_inc:
1050 case Intrinsic::amdgcn_atomic_dec:
1051 case Intrinsic::amdgcn_ds_fadd:
1052 case Intrinsic::amdgcn_ds_fmin:
1053 case Intrinsic::amdgcn_ds_fmax:
1054 case Intrinsic::amdgcn_is_shared:
1055 case Intrinsic::amdgcn_is_private:
1056 OpIndexes.push_back(0);
1057 return true;
1058 default:
1059 return false;
1060 }
1061 }
1062
rewriteIntrinsicWithAddressSpace(IntrinsicInst * II,Value * OldV,Value * NewV) const1063 Value *GCNTTIImpl::rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
1064 Value *OldV,
1065 Value *NewV) const {
1066 auto IntrID = II->getIntrinsicID();
1067 switch (IntrID) {
1068 case Intrinsic::amdgcn_atomic_inc:
1069 case Intrinsic::amdgcn_atomic_dec:
1070 case Intrinsic::amdgcn_ds_fadd:
1071 case Intrinsic::amdgcn_ds_fmin:
1072 case Intrinsic::amdgcn_ds_fmax: {
1073 const ConstantInt *IsVolatile = cast<ConstantInt>(II->getArgOperand(4));
1074 if (!IsVolatile->isZero())
1075 return nullptr;
1076 Module *M = II->getParent()->getParent()->getParent();
1077 Type *DestTy = II->getType();
1078 Type *SrcTy = NewV->getType();
1079 Function *NewDecl =
1080 Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy});
1081 II->setArgOperand(0, NewV);
1082 II->setCalledFunction(NewDecl);
1083 return II;
1084 }
1085 case Intrinsic::amdgcn_is_shared:
1086 case Intrinsic::amdgcn_is_private: {
1087 unsigned TrueAS = IntrID == Intrinsic::amdgcn_is_shared ?
1088 AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS;
1089 unsigned NewAS = NewV->getType()->getPointerAddressSpace();
1090 LLVMContext &Ctx = NewV->getType()->getContext();
1091 ConstantInt *NewVal = (TrueAS == NewAS) ?
1092 ConstantInt::getTrue(Ctx) : ConstantInt::getFalse(Ctx);
1093 return NewVal;
1094 }
1095 case Intrinsic::ptrmask: {
1096 unsigned OldAS = OldV->getType()->getPointerAddressSpace();
1097 unsigned NewAS = NewV->getType()->getPointerAddressSpace();
1098 Value *MaskOp = II->getArgOperand(1);
1099 Type *MaskTy = MaskOp->getType();
1100
1101 bool DoTruncate = false;
1102
1103 const GCNTargetMachine &TM =
1104 static_cast<const GCNTargetMachine &>(getTLI()->getTargetMachine());
1105 if (!TM.isNoopAddrSpaceCast(OldAS, NewAS)) {
1106 // All valid 64-bit to 32-bit casts work by chopping off the high
1107 // bits. Any masking only clearing the low bits will also apply in the new
1108 // address space.
1109 if (DL.getPointerSizeInBits(OldAS) != 64 ||
1110 DL.getPointerSizeInBits(NewAS) != 32)
1111 return nullptr;
1112
1113 // TODO: Do we need to thread more context in here?
1114 KnownBits Known = computeKnownBits(MaskOp, DL, 0, nullptr, II);
1115 if (Known.countMinLeadingOnes() < 32)
1116 return nullptr;
1117
1118 DoTruncate = true;
1119 }
1120
1121 IRBuilder<> B(II);
1122 if (DoTruncate) {
1123 MaskTy = B.getInt32Ty();
1124 MaskOp = B.CreateTrunc(MaskOp, MaskTy);
1125 }
1126
1127 return B.CreateIntrinsic(Intrinsic::ptrmask, {NewV->getType(), MaskTy},
1128 {NewV, MaskOp});
1129 }
1130 default:
1131 return nullptr;
1132 }
1133 }
1134
getShuffleCost(TTI::ShuffleKind Kind,VectorType * VT,ArrayRef<int> Mask,int Index,VectorType * SubTp)1135 InstructionCost GCNTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1136 VectorType *VT, ArrayRef<int> Mask,
1137 int Index, VectorType *SubTp) {
1138 Kind = improveShuffleKindFromMask(Kind, Mask);
1139 if (ST->hasVOP3PInsts()) {
1140 if (cast<FixedVectorType>(VT)->getNumElements() == 2 &&
1141 DL.getTypeSizeInBits(VT->getElementType()) == 16) {
1142 // With op_sel VOP3P instructions freely can access the low half or high
1143 // half of a register, so any swizzle is free.
1144
1145 switch (Kind) {
1146 case TTI::SK_Broadcast:
1147 case TTI::SK_Reverse:
1148 case TTI::SK_PermuteSingleSrc:
1149 return 0;
1150 default:
1151 break;
1152 }
1153 }
1154 }
1155
1156 return BaseT::getShuffleCost(Kind, VT, Mask, Index, SubTp);
1157 }
1158
areInlineCompatible(const Function * Caller,const Function * Callee) const1159 bool GCNTTIImpl::areInlineCompatible(const Function *Caller,
1160 const Function *Callee) const {
1161 const TargetMachine &TM = getTLI()->getTargetMachine();
1162 const GCNSubtarget *CallerST
1163 = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Caller));
1164 const GCNSubtarget *CalleeST
1165 = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Callee));
1166
1167 const FeatureBitset &CallerBits = CallerST->getFeatureBits();
1168 const FeatureBitset &CalleeBits = CalleeST->getFeatureBits();
1169
1170 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
1171 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
1172 if ((RealCallerBits & RealCalleeBits) != RealCalleeBits)
1173 return false;
1174
1175 // FIXME: dx10_clamp can just take the caller setting, but there seems to be
1176 // no way to support merge for backend defined attributes.
1177 AMDGPU::SIModeRegisterDefaults CallerMode(*Caller);
1178 AMDGPU::SIModeRegisterDefaults CalleeMode(*Callee);
1179 if (!CallerMode.isInlineCompatible(CalleeMode))
1180 return false;
1181
1182 if (Callee->hasFnAttribute(Attribute::AlwaysInline) ||
1183 Callee->hasFnAttribute(Attribute::InlineHint))
1184 return true;
1185
1186 // Hack to make compile times reasonable.
1187 if (InlineMaxBB) {
1188 // Single BB does not increase total BB amount.
1189 if (Callee->size() == 1)
1190 return true;
1191 size_t BBSize = Caller->size() + Callee->size() - 1;
1192 return BBSize <= InlineMaxBB;
1193 }
1194
1195 return true;
1196 }
1197
adjustInliningThreshold(const CallBase * CB) const1198 unsigned GCNTTIImpl::adjustInliningThreshold(const CallBase *CB) const {
1199 // If we have a pointer to private array passed into a function
1200 // it will not be optimized out, leaving scratch usage.
1201 // Increase the inline threshold to allow inlining in this case.
1202 uint64_t AllocaSize = 0;
1203 SmallPtrSet<const AllocaInst *, 8> AIVisited;
1204 for (Value *PtrArg : CB->args()) {
1205 PointerType *Ty = dyn_cast<PointerType>(PtrArg->getType());
1206 if (!Ty || (Ty->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS &&
1207 Ty->getAddressSpace() != AMDGPUAS::FLAT_ADDRESS))
1208 continue;
1209
1210 PtrArg = getUnderlyingObject(PtrArg);
1211 if (const AllocaInst *AI = dyn_cast<AllocaInst>(PtrArg)) {
1212 if (!AI->isStaticAlloca() || !AIVisited.insert(AI).second)
1213 continue;
1214 AllocaSize += DL.getTypeAllocSize(AI->getAllocatedType());
1215 // If the amount of stack memory is excessive we will not be able
1216 // to get rid of the scratch anyway, bail out.
1217 if (AllocaSize > ArgAllocaCutoff) {
1218 AllocaSize = 0;
1219 break;
1220 }
1221 }
1222 }
1223 if (AllocaSize)
1224 return ArgAllocaCost;
1225 return 0;
1226 }
1227
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,TTI::UnrollingPreferences & UP,OptimizationRemarkEmitter * ORE)1228 void GCNTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1229 TTI::UnrollingPreferences &UP,
1230 OptimizationRemarkEmitter *ORE) {
1231 CommonTTI.getUnrollingPreferences(L, SE, UP, ORE);
1232 }
1233
getPeelingPreferences(Loop * L,ScalarEvolution & SE,TTI::PeelingPreferences & PP)1234 void GCNTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1235 TTI::PeelingPreferences &PP) {
1236 CommonTTI.getPeelingPreferences(L, SE, PP);
1237 }
1238
get64BitInstrCost(TTI::TargetCostKind CostKind) const1239 int GCNTTIImpl::get64BitInstrCost(TTI::TargetCostKind CostKind) const {
1240 return ST->hasFullRate64Ops()
1241 ? getFullRateInstrCost()
1242 : ST->hasHalfRate64Ops() ? getHalfRateInstrCost(CostKind)
1243 : getQuarterRateInstrCost(CostKind);
1244 }
1245