10b57cec5SDimitry Andric //===- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass -----------===// 20b57cec5SDimitry Andric // 30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 60b57cec5SDimitry Andric // 70b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 80b57cec5SDimitry Andric // 90b57cec5SDimitry Andric // \file 100b57cec5SDimitry Andric // This file implements a TargetTransformInfo analysis pass specific to the 110b57cec5SDimitry Andric // AMDGPU target machine. It uses the target's detailed information to provide 120b57cec5SDimitry Andric // more precise answers to certain TTI queries, while letting the target 130b57cec5SDimitry Andric // independent and default TTI implementations handle the rest. 140b57cec5SDimitry Andric // 150b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 160b57cec5SDimitry Andric 170b57cec5SDimitry Andric #include "AMDGPUTargetTransformInfo.h" 18e8d8bef9SDimitry Andric #include "AMDGPUTargetMachine.h" 19349cc55cSDimitry Andric #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 200b57cec5SDimitry Andric #include "llvm/Analysis/LoopInfo.h" 210b57cec5SDimitry Andric #include "llvm/Analysis/ValueTracking.h" 22fe6060f1SDimitry Andric #include "llvm/IR/IRBuilder.h" 23349cc55cSDimitry Andric #include "llvm/IR/IntrinsicsAMDGPU.h" 240b57cec5SDimitry Andric #include "llvm/IR/PatternMatch.h" 25e8d8bef9SDimitry Andric #include "llvm/Support/KnownBits.h" 26bdd1243dSDimitry Andric #include <optional> 270b57cec5SDimitry Andric 280b57cec5SDimitry Andric using namespace llvm; 290b57cec5SDimitry Andric 300b57cec5SDimitry Andric #define DEBUG_TYPE "AMDGPUtti" 310b57cec5SDimitry Andric 320b57cec5SDimitry Andric static cl::opt<unsigned> UnrollThresholdPrivate( 330b57cec5SDimitry Andric "amdgpu-unroll-threshold-private", 340b57cec5SDimitry Andric cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"), 35480093f4SDimitry Andric cl::init(2700), cl::Hidden); 360b57cec5SDimitry Andric 370b57cec5SDimitry Andric static cl::opt<unsigned> UnrollThresholdLocal( 380b57cec5SDimitry Andric "amdgpu-unroll-threshold-local", 390b57cec5SDimitry Andric cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"), 400b57cec5SDimitry Andric cl::init(1000), cl::Hidden); 410b57cec5SDimitry Andric 420b57cec5SDimitry Andric static cl::opt<unsigned> UnrollThresholdIf( 430b57cec5SDimitry Andric "amdgpu-unroll-threshold-if", 440b57cec5SDimitry Andric cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"), 45fe6060f1SDimitry Andric cl::init(200), cl::Hidden); 460b57cec5SDimitry Andric 475ffd83dbSDimitry Andric static cl::opt<bool> UnrollRuntimeLocal( 485ffd83dbSDimitry Andric "amdgpu-unroll-runtime-local", 495ffd83dbSDimitry Andric cl::desc("Allow runtime unroll for AMDGPU if local memory used in a loop"), 505ffd83dbSDimitry Andric cl::init(true), cl::Hidden); 515ffd83dbSDimitry Andric 525ffd83dbSDimitry Andric static cl::opt<bool> UseLegacyDA( 535ffd83dbSDimitry Andric "amdgpu-use-legacy-divergence-analysis", 545ffd83dbSDimitry Andric cl::desc("Enable legacy divergence analysis for AMDGPU"), 555ffd83dbSDimitry Andric cl::init(false), cl::Hidden); 565ffd83dbSDimitry Andric 575ffd83dbSDimitry Andric static cl::opt<unsigned> UnrollMaxBlockToAnalyze( 585ffd83dbSDimitry Andric "amdgpu-unroll-max-block-to-analyze", 595ffd83dbSDimitry Andric cl::desc("Inner loop block size threshold to analyze in unroll for AMDGPU"), 60e8d8bef9SDimitry Andric cl::init(32), cl::Hidden); 61e8d8bef9SDimitry Andric 62e8d8bef9SDimitry Andric static cl::opt<unsigned> ArgAllocaCost("amdgpu-inline-arg-alloca-cost", 63e8d8bef9SDimitry Andric cl::Hidden, cl::init(4000), 64e8d8bef9SDimitry Andric cl::desc("Cost of alloca argument")); 65e8d8bef9SDimitry Andric 66e8d8bef9SDimitry Andric // If the amount of scratch memory to eliminate exceeds our ability to allocate 67e8d8bef9SDimitry Andric // it into registers we gain nothing by aggressively inlining functions for that 68e8d8bef9SDimitry Andric // heuristic. 69e8d8bef9SDimitry Andric static cl::opt<unsigned> 70e8d8bef9SDimitry Andric ArgAllocaCutoff("amdgpu-inline-arg-alloca-cutoff", cl::Hidden, 71e8d8bef9SDimitry Andric cl::init(256), 72e8d8bef9SDimitry Andric cl::desc("Maximum alloca size to use for inline cost")); 73e8d8bef9SDimitry Andric 74e8d8bef9SDimitry Andric // Inliner constraint to achieve reasonable compilation time. 75e8d8bef9SDimitry Andric static cl::opt<size_t> InlineMaxBB( 76e8d8bef9SDimitry Andric "amdgpu-inline-max-bb", cl::Hidden, cl::init(1100), 77e8d8bef9SDimitry Andric cl::desc("Maximum number of BBs allowed in a function after inlining" 78e8d8bef9SDimitry Andric " (compile time constraint)")); 795ffd83dbSDimitry Andric 800b57cec5SDimitry Andric static bool dependsOnLocalPhi(const Loop *L, const Value *Cond, 810b57cec5SDimitry Andric unsigned Depth = 0) { 820b57cec5SDimitry Andric const Instruction *I = dyn_cast<Instruction>(Cond); 830b57cec5SDimitry Andric if (!I) 840b57cec5SDimitry Andric return false; 850b57cec5SDimitry Andric 860b57cec5SDimitry Andric for (const Value *V : I->operand_values()) { 870b57cec5SDimitry Andric if (!L->contains(I)) 880b57cec5SDimitry Andric continue; 890b57cec5SDimitry Andric if (const PHINode *PHI = dyn_cast<PHINode>(V)) { 900b57cec5SDimitry Andric if (llvm::none_of(L->getSubLoops(), [PHI](const Loop* SubLoop) { 910b57cec5SDimitry Andric return SubLoop->contains(PHI); })) 920b57cec5SDimitry Andric return true; 930b57cec5SDimitry Andric } else if (Depth < 10 && dependsOnLocalPhi(L, V, Depth+1)) 940b57cec5SDimitry Andric return true; 950b57cec5SDimitry Andric } 960b57cec5SDimitry Andric return false; 970b57cec5SDimitry Andric } 980b57cec5SDimitry Andric 99e8d8bef9SDimitry Andric AMDGPUTTIImpl::AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const Function &F) 100e8d8bef9SDimitry Andric : BaseT(TM, F.getParent()->getDataLayout()), 101e8d8bef9SDimitry Andric TargetTriple(TM->getTargetTriple()), 102e8d8bef9SDimitry Andric ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))), 103e8d8bef9SDimitry Andric TLI(ST->getTargetLowering()) {} 104e8d8bef9SDimitry Andric 1050b57cec5SDimitry Andric void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 106349cc55cSDimitry Andric TTI::UnrollingPreferences &UP, 107349cc55cSDimitry Andric OptimizationRemarkEmitter *ORE) { 108480093f4SDimitry Andric const Function &F = *L->getHeader()->getParent(); 109bdd1243dSDimitry Andric UP.Threshold = 110bdd1243dSDimitry Andric F.getFnAttributeAsParsedInteger("amdgpu-unroll-threshold", 300); 1110b57cec5SDimitry Andric UP.MaxCount = std::numeric_limits<unsigned>::max(); 1120b57cec5SDimitry Andric UP.Partial = true; 1130b57cec5SDimitry Andric 114fe6060f1SDimitry Andric // Conditional branch in a loop back edge needs 3 additional exec 115fe6060f1SDimitry Andric // manipulations in average. 116fe6060f1SDimitry Andric UP.BEInsns += 3; 117fe6060f1SDimitry Andric 1180b57cec5SDimitry Andric // TODO: Do we want runtime unrolling? 1190b57cec5SDimitry Andric 1200b57cec5SDimitry Andric // Maximum alloca size than can fit registers. Reserve 16 registers. 1210b57cec5SDimitry Andric const unsigned MaxAlloca = (256 - 16) * 4; 1220b57cec5SDimitry Andric unsigned ThresholdPrivate = UnrollThresholdPrivate; 1230b57cec5SDimitry Andric unsigned ThresholdLocal = UnrollThresholdLocal; 124e8d8bef9SDimitry Andric 125e8d8bef9SDimitry Andric // If this loop has the amdgpu.loop.unroll.threshold metadata we will use the 126e8d8bef9SDimitry Andric // provided threshold value as the default for Threshold 127e8d8bef9SDimitry Andric if (MDNode *LoopUnrollThreshold = 128e8d8bef9SDimitry Andric findOptionMDForLoop(L, "amdgpu.loop.unroll.threshold")) { 129e8d8bef9SDimitry Andric if (LoopUnrollThreshold->getNumOperands() == 2) { 130e8d8bef9SDimitry Andric ConstantInt *MetaThresholdValue = mdconst::extract_or_null<ConstantInt>( 131e8d8bef9SDimitry Andric LoopUnrollThreshold->getOperand(1)); 132e8d8bef9SDimitry Andric if (MetaThresholdValue) { 133e8d8bef9SDimitry Andric // We will also use the supplied value for PartialThreshold for now. 134e8d8bef9SDimitry Andric // We may introduce additional metadata if it becomes necessary in the 135e8d8bef9SDimitry Andric // future. 136e8d8bef9SDimitry Andric UP.Threshold = MetaThresholdValue->getSExtValue(); 137e8d8bef9SDimitry Andric UP.PartialThreshold = UP.Threshold; 138e8d8bef9SDimitry Andric ThresholdPrivate = std::min(ThresholdPrivate, UP.Threshold); 139e8d8bef9SDimitry Andric ThresholdLocal = std::min(ThresholdLocal, UP.Threshold); 140e8d8bef9SDimitry Andric } 141e8d8bef9SDimitry Andric } 142e8d8bef9SDimitry Andric } 143e8d8bef9SDimitry Andric 1440b57cec5SDimitry Andric unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal); 1450b57cec5SDimitry Andric for (const BasicBlock *BB : L->getBlocks()) { 1460b57cec5SDimitry Andric const DataLayout &DL = BB->getModule()->getDataLayout(); 1470b57cec5SDimitry Andric unsigned LocalGEPsSeen = 0; 1480b57cec5SDimitry Andric 1490b57cec5SDimitry Andric if (llvm::any_of(L->getSubLoops(), [BB](const Loop* SubLoop) { 1500b57cec5SDimitry Andric return SubLoop->contains(BB); })) 1510b57cec5SDimitry Andric continue; // Block belongs to an inner loop. 1520b57cec5SDimitry Andric 1530b57cec5SDimitry Andric for (const Instruction &I : *BB) { 1540b57cec5SDimitry Andric // Unroll a loop which contains an "if" statement whose condition 1550b57cec5SDimitry Andric // defined by a PHI belonging to the loop. This may help to eliminate 1560b57cec5SDimitry Andric // if region and potentially even PHI itself, saving on both divergence 1570b57cec5SDimitry Andric // and registers used for the PHI. 1580b57cec5SDimitry Andric // Add a small bonus for each of such "if" statements. 1590b57cec5SDimitry Andric if (const BranchInst *Br = dyn_cast<BranchInst>(&I)) { 1600b57cec5SDimitry Andric if (UP.Threshold < MaxBoost && Br->isConditional()) { 1610b57cec5SDimitry Andric BasicBlock *Succ0 = Br->getSuccessor(0); 1620b57cec5SDimitry Andric BasicBlock *Succ1 = Br->getSuccessor(1); 1630b57cec5SDimitry Andric if ((L->contains(Succ0) && L->isLoopExiting(Succ0)) || 1640b57cec5SDimitry Andric (L->contains(Succ1) && L->isLoopExiting(Succ1))) 1650b57cec5SDimitry Andric continue; 1660b57cec5SDimitry Andric if (dependsOnLocalPhi(L, Br->getCondition())) { 1670b57cec5SDimitry Andric UP.Threshold += UnrollThresholdIf; 1680b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold 1690b57cec5SDimitry Andric << " for loop:\n" 1700b57cec5SDimitry Andric << *L << " due to " << *Br << '\n'); 1710b57cec5SDimitry Andric if (UP.Threshold >= MaxBoost) 1720b57cec5SDimitry Andric return; 1730b57cec5SDimitry Andric } 1740b57cec5SDimitry Andric } 1750b57cec5SDimitry Andric continue; 1760b57cec5SDimitry Andric } 1770b57cec5SDimitry Andric 1780b57cec5SDimitry Andric const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I); 1790b57cec5SDimitry Andric if (!GEP) 1800b57cec5SDimitry Andric continue; 1810b57cec5SDimitry Andric 1820b57cec5SDimitry Andric unsigned AS = GEP->getAddressSpace(); 1830b57cec5SDimitry Andric unsigned Threshold = 0; 1840b57cec5SDimitry Andric if (AS == AMDGPUAS::PRIVATE_ADDRESS) 1850b57cec5SDimitry Andric Threshold = ThresholdPrivate; 1860b57cec5SDimitry Andric else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) 1870b57cec5SDimitry Andric Threshold = ThresholdLocal; 1880b57cec5SDimitry Andric else 1890b57cec5SDimitry Andric continue; 1900b57cec5SDimitry Andric 1910b57cec5SDimitry Andric if (UP.Threshold >= Threshold) 1920b57cec5SDimitry Andric continue; 1930b57cec5SDimitry Andric 1940b57cec5SDimitry Andric if (AS == AMDGPUAS::PRIVATE_ADDRESS) { 1950b57cec5SDimitry Andric const Value *Ptr = GEP->getPointerOperand(); 1960b57cec5SDimitry Andric const AllocaInst *Alloca = 197e8d8bef9SDimitry Andric dyn_cast<AllocaInst>(getUnderlyingObject(Ptr)); 1980b57cec5SDimitry Andric if (!Alloca || !Alloca->isStaticAlloca()) 1990b57cec5SDimitry Andric continue; 2000b57cec5SDimitry Andric Type *Ty = Alloca->getAllocatedType(); 2010b57cec5SDimitry Andric unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0; 2020b57cec5SDimitry Andric if (AllocaSize > MaxAlloca) 2030b57cec5SDimitry Andric continue; 2040b57cec5SDimitry Andric } else if (AS == AMDGPUAS::LOCAL_ADDRESS || 2050b57cec5SDimitry Andric AS == AMDGPUAS::REGION_ADDRESS) { 2060b57cec5SDimitry Andric LocalGEPsSeen++; 2070b57cec5SDimitry Andric // Inhibit unroll for local memory if we have seen addressing not to 2080b57cec5SDimitry Andric // a variable, most likely we will be unable to combine it. 2090b57cec5SDimitry Andric // Do not unroll too deep inner loops for local memory to give a chance 2100b57cec5SDimitry Andric // to unroll an outer loop for a more important reason. 2110b57cec5SDimitry Andric if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 || 2120b57cec5SDimitry Andric (!isa<GlobalVariable>(GEP->getPointerOperand()) && 2130b57cec5SDimitry Andric !isa<Argument>(GEP->getPointerOperand()))) 2140b57cec5SDimitry Andric continue; 2155ffd83dbSDimitry Andric LLVM_DEBUG(dbgs() << "Allow unroll runtime for loop:\n" 2165ffd83dbSDimitry Andric << *L << " due to LDS use.\n"); 2175ffd83dbSDimitry Andric UP.Runtime = UnrollRuntimeLocal; 2180b57cec5SDimitry Andric } 2190b57cec5SDimitry Andric 2200b57cec5SDimitry Andric // Check if GEP depends on a value defined by this loop itself. 2210b57cec5SDimitry Andric bool HasLoopDef = false; 2220b57cec5SDimitry Andric for (const Value *Op : GEP->operands()) { 2230b57cec5SDimitry Andric const Instruction *Inst = dyn_cast<Instruction>(Op); 2240b57cec5SDimitry Andric if (!Inst || L->isLoopInvariant(Op)) 2250b57cec5SDimitry Andric continue; 2260b57cec5SDimitry Andric 2270b57cec5SDimitry Andric if (llvm::any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) { 2280b57cec5SDimitry Andric return SubLoop->contains(Inst); })) 2290b57cec5SDimitry Andric continue; 2300b57cec5SDimitry Andric HasLoopDef = true; 2310b57cec5SDimitry Andric break; 2320b57cec5SDimitry Andric } 2330b57cec5SDimitry Andric if (!HasLoopDef) 2340b57cec5SDimitry Andric continue; 2350b57cec5SDimitry Andric 2360b57cec5SDimitry Andric // We want to do whatever we can to limit the number of alloca 2370b57cec5SDimitry Andric // instructions that make it through to the code generator. allocas 2380b57cec5SDimitry Andric // require us to use indirect addressing, which is slow and prone to 2390b57cec5SDimitry Andric // compiler bugs. If this loop does an address calculation on an 2400b57cec5SDimitry Andric // alloca ptr, then we want to use a higher than normal loop unroll 2410b57cec5SDimitry Andric // threshold. This will give SROA a better chance to eliminate these 2420b57cec5SDimitry Andric // allocas. 2430b57cec5SDimitry Andric // 2440b57cec5SDimitry Andric // We also want to have more unrolling for local memory to let ds 2450b57cec5SDimitry Andric // instructions with different offsets combine. 2460b57cec5SDimitry Andric // 2470b57cec5SDimitry Andric // Don't use the maximum allowed value here as it will make some 2480b57cec5SDimitry Andric // programs way too big. 2490b57cec5SDimitry Andric UP.Threshold = Threshold; 2500b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "Set unroll threshold " << Threshold 2510b57cec5SDimitry Andric << " for loop:\n" 2520b57cec5SDimitry Andric << *L << " due to " << *GEP << '\n'); 2530b57cec5SDimitry Andric if (UP.Threshold >= MaxBoost) 2540b57cec5SDimitry Andric return; 2550b57cec5SDimitry Andric } 2565ffd83dbSDimitry Andric 2575ffd83dbSDimitry Andric // If we got a GEP in a small BB from inner loop then increase max trip 2585ffd83dbSDimitry Andric // count to analyze for better estimation cost in unroll 259e8d8bef9SDimitry Andric if (L->isInnermost() && BB->size() < UnrollMaxBlockToAnalyze) 2605ffd83dbSDimitry Andric UP.MaxIterationsCountToAnalyze = 32; 2610b57cec5SDimitry Andric } 2620b57cec5SDimitry Andric } 2630b57cec5SDimitry Andric 2645ffd83dbSDimitry Andric void AMDGPUTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 2655ffd83dbSDimitry Andric TTI::PeelingPreferences &PP) { 2665ffd83dbSDimitry Andric BaseT::getPeelingPreferences(L, SE, PP); 2675ffd83dbSDimitry Andric } 268e8d8bef9SDimitry Andric 269e8d8bef9SDimitry Andric const FeatureBitset GCNTTIImpl::InlineFeatureIgnoreList = { 270e8d8bef9SDimitry Andric // Codegen control options which don't matter. 271e8d8bef9SDimitry Andric AMDGPU::FeatureEnableLoadStoreOpt, AMDGPU::FeatureEnableSIScheduler, 272e8d8bef9SDimitry Andric AMDGPU::FeatureEnableUnsafeDSOffsetFolding, AMDGPU::FeatureFlatForGlobal, 273e8d8bef9SDimitry Andric AMDGPU::FeaturePromoteAlloca, AMDGPU::FeatureUnalignedScratchAccess, 274e8d8bef9SDimitry Andric AMDGPU::FeatureUnalignedAccessMode, 275e8d8bef9SDimitry Andric 276e8d8bef9SDimitry Andric AMDGPU::FeatureAutoWaitcntBeforeBarrier, 277e8d8bef9SDimitry Andric 278e8d8bef9SDimitry Andric // Property of the kernel/environment which can't actually differ. 279e8d8bef9SDimitry Andric AMDGPU::FeatureSGPRInitBug, AMDGPU::FeatureXNACK, 280e8d8bef9SDimitry Andric AMDGPU::FeatureTrapHandler, 281e8d8bef9SDimitry Andric 282e8d8bef9SDimitry Andric // The default assumption needs to be ecc is enabled, but no directly 283e8d8bef9SDimitry Andric // exposed operations depend on it, so it can be safely inlined. 284e8d8bef9SDimitry Andric AMDGPU::FeatureSRAMECC, 285e8d8bef9SDimitry Andric 286e8d8bef9SDimitry Andric // Perf-tuning features 287e8d8bef9SDimitry Andric AMDGPU::FeatureFastFMAF32, AMDGPU::HalfRate64Ops}; 288e8d8bef9SDimitry Andric 289e8d8bef9SDimitry Andric GCNTTIImpl::GCNTTIImpl(const AMDGPUTargetMachine *TM, const Function &F) 290e8d8bef9SDimitry Andric : BaseT(TM, F.getParent()->getDataLayout()), 291e8d8bef9SDimitry Andric ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))), 292e8d8bef9SDimitry Andric TLI(ST->getTargetLowering()), CommonTTI(TM, F), 29381ad6265SDimitry Andric IsGraphics(AMDGPU::isGraphics(F.getCallingConv())) { 294e8d8bef9SDimitry Andric AMDGPU::SIModeRegisterDefaults Mode(F); 295e8d8bef9SDimitry Andric HasFP32Denormals = Mode.allFP32Denormals(); 296e8d8bef9SDimitry Andric HasFP64FP16Denormals = Mode.allFP64FP16Denormals(); 297e8d8bef9SDimitry Andric } 298e8d8bef9SDimitry Andric 29981ad6265SDimitry Andric unsigned GCNTTIImpl::getNumberOfRegisters(unsigned RCID) const { 30081ad6265SDimitry Andric // NB: RCID is not an RCID. In fact it is 0 or 1 for scalar or vector 30181ad6265SDimitry Andric // registers. See getRegisterClassForType for the implementation. 30281ad6265SDimitry Andric // In this case vector registers are not vector in terms of 30381ad6265SDimitry Andric // VGPRs, but those which can hold multiple values. 3040b57cec5SDimitry Andric 3050b57cec5SDimitry Andric // This is really the number of registers to fill when vectorizing / 3060b57cec5SDimitry Andric // interleaving loops, so we lie to avoid trying to use all registers. 30781ad6265SDimitry Andric return 4; 3085ffd83dbSDimitry Andric } 3095ffd83dbSDimitry Andric 310fe6060f1SDimitry Andric TypeSize 311fe6060f1SDimitry Andric GCNTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const { 312fe6060f1SDimitry Andric switch (K) { 313fe6060f1SDimitry Andric case TargetTransformInfo::RGK_Scalar: 314fe6060f1SDimitry Andric return TypeSize::getFixed(32); 315fe6060f1SDimitry Andric case TargetTransformInfo::RGK_FixedWidthVector: 316fe6060f1SDimitry Andric return TypeSize::getFixed(ST->hasPackedFP32Ops() ? 64 : 32); 317fe6060f1SDimitry Andric case TargetTransformInfo::RGK_ScalableVector: 318fe6060f1SDimitry Andric return TypeSize::getScalable(0); 319fe6060f1SDimitry Andric } 320fe6060f1SDimitry Andric llvm_unreachable("Unsupported register kind"); 3210b57cec5SDimitry Andric } 3220b57cec5SDimitry Andric 3230b57cec5SDimitry Andric unsigned GCNTTIImpl::getMinVectorRegisterBitWidth() const { 3240b57cec5SDimitry Andric return 32; 3250b57cec5SDimitry Andric } 3260b57cec5SDimitry Andric 327e8d8bef9SDimitry Andric unsigned GCNTTIImpl::getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { 328e8d8bef9SDimitry Andric if (Opcode == Instruction::Load || Opcode == Instruction::Store) 329e8d8bef9SDimitry Andric return 32 * 4 / ElemWidth; 330fe6060f1SDimitry Andric return (ElemWidth == 16 && ST->has16BitInsts()) ? 2 331fe6060f1SDimitry Andric : (ElemWidth == 32 && ST->hasPackedFP32Ops()) ? 2 332fe6060f1SDimitry Andric : 1; 333e8d8bef9SDimitry Andric } 334e8d8bef9SDimitry Andric 3350b57cec5SDimitry Andric unsigned GCNTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize, 3360b57cec5SDimitry Andric unsigned ChainSizeInBytes, 3370b57cec5SDimitry Andric VectorType *VecTy) const { 3380b57cec5SDimitry Andric unsigned VecRegBitWidth = VF * LoadSize; 3390b57cec5SDimitry Andric if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32) 3400b57cec5SDimitry Andric // TODO: Support element-size less than 32bit? 3410b57cec5SDimitry Andric return 128 / LoadSize; 3420b57cec5SDimitry Andric 3430b57cec5SDimitry Andric return VF; 3440b57cec5SDimitry Andric } 3450b57cec5SDimitry Andric 3460b57cec5SDimitry Andric unsigned GCNTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize, 3470b57cec5SDimitry Andric unsigned ChainSizeInBytes, 3480b57cec5SDimitry Andric VectorType *VecTy) const { 3490b57cec5SDimitry Andric unsigned VecRegBitWidth = VF * StoreSize; 3500b57cec5SDimitry Andric if (VecRegBitWidth > 128) 3510b57cec5SDimitry Andric return 128 / StoreSize; 3520b57cec5SDimitry Andric 3530b57cec5SDimitry Andric return VF; 3540b57cec5SDimitry Andric } 3550b57cec5SDimitry Andric 3560b57cec5SDimitry Andric unsigned GCNTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const { 3570b57cec5SDimitry Andric if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS || 3580b57cec5SDimitry Andric AddrSpace == AMDGPUAS::CONSTANT_ADDRESS || 3590b57cec5SDimitry Andric AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT || 3600b57cec5SDimitry Andric AddrSpace == AMDGPUAS::BUFFER_FAT_POINTER) { 3610b57cec5SDimitry Andric return 512; 3620b57cec5SDimitry Andric } 3630b57cec5SDimitry Andric 3640b57cec5SDimitry Andric if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) 3650b57cec5SDimitry Andric return 8 * ST->getMaxPrivateElementSize(); 3660b57cec5SDimitry Andric 3675ffd83dbSDimitry Andric // Common to flat, global, local and region. Assume for unknown addrspace. 3685ffd83dbSDimitry Andric return 128; 3690b57cec5SDimitry Andric } 3700b57cec5SDimitry Andric 3710b57cec5SDimitry Andric bool GCNTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes, 3725ffd83dbSDimitry Andric Align Alignment, 3730b57cec5SDimitry Andric unsigned AddrSpace) const { 3740b57cec5SDimitry Andric // We allow vectorization of flat stores, even though we may need to decompose 3750b57cec5SDimitry Andric // them later if they may access private memory. We don't have enough context 3760b57cec5SDimitry Andric // here, and legalization can handle it. 3770b57cec5SDimitry Andric if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) { 3780b57cec5SDimitry Andric return (Alignment >= 4 || ST->hasUnalignedScratchAccess()) && 3790b57cec5SDimitry Andric ChainSizeInBytes <= ST->getMaxPrivateElementSize(); 3800b57cec5SDimitry Andric } 3810b57cec5SDimitry Andric return true; 3820b57cec5SDimitry Andric } 3830b57cec5SDimitry Andric 3840b57cec5SDimitry Andric bool GCNTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, 3855ffd83dbSDimitry Andric Align Alignment, 3860b57cec5SDimitry Andric unsigned AddrSpace) const { 3870b57cec5SDimitry Andric return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace); 3880b57cec5SDimitry Andric } 3890b57cec5SDimitry Andric 3900b57cec5SDimitry Andric bool GCNTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, 3915ffd83dbSDimitry Andric Align Alignment, 3920b57cec5SDimitry Andric unsigned AddrSpace) const { 3930b57cec5SDimitry Andric return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace); 3940b57cec5SDimitry Andric } 3950b57cec5SDimitry Andric 3965ffd83dbSDimitry Andric // FIXME: Really we would like to issue multiple 128-bit loads and stores per 3975ffd83dbSDimitry Andric // iteration. Should we report a larger size and let it legalize? 3985ffd83dbSDimitry Andric // 3995ffd83dbSDimitry Andric // FIXME: Should we use narrower types for local/region, or account for when 4005ffd83dbSDimitry Andric // unaligned access is legal? 4015ffd83dbSDimitry Andric // 4025ffd83dbSDimitry Andric // FIXME: This could use fine tuning and microbenchmarks. 40381ad6265SDimitry Andric Type *GCNTTIImpl::getMemcpyLoopLoweringType( 40481ad6265SDimitry Andric LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, 40581ad6265SDimitry Andric unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign, 406bdd1243dSDimitry Andric std::optional<uint32_t> AtomicElementSize) const { 40781ad6265SDimitry Andric 40881ad6265SDimitry Andric if (AtomicElementSize) 40981ad6265SDimitry Andric return Type::getIntNTy(Context, *AtomicElementSize * 8); 41081ad6265SDimitry Andric 4115ffd83dbSDimitry Andric unsigned MinAlign = std::min(SrcAlign, DestAlign); 4125ffd83dbSDimitry Andric 4135ffd83dbSDimitry Andric // A (multi-)dword access at an address == 2 (mod 4) will be decomposed by the 4145ffd83dbSDimitry Andric // hardware into byte accesses. If you assume all alignments are equally 4155ffd83dbSDimitry Andric // probable, it's more efficient on average to use short accesses for this 4165ffd83dbSDimitry Andric // case. 4175ffd83dbSDimitry Andric if (MinAlign == 2) 4185ffd83dbSDimitry Andric return Type::getInt16Ty(Context); 4195ffd83dbSDimitry Andric 4205ffd83dbSDimitry Andric // Not all subtargets have 128-bit DS instructions, and we currently don't 4215ffd83dbSDimitry Andric // form them by default. 4225ffd83dbSDimitry Andric if (SrcAddrSpace == AMDGPUAS::LOCAL_ADDRESS || 4235ffd83dbSDimitry Andric SrcAddrSpace == AMDGPUAS::REGION_ADDRESS || 4245ffd83dbSDimitry Andric DestAddrSpace == AMDGPUAS::LOCAL_ADDRESS || 4255ffd83dbSDimitry Andric DestAddrSpace == AMDGPUAS::REGION_ADDRESS) { 4265ffd83dbSDimitry Andric return FixedVectorType::get(Type::getInt32Ty(Context), 2); 4275ffd83dbSDimitry Andric } 4285ffd83dbSDimitry Andric 4295ffd83dbSDimitry Andric // Global memory works best with 16-byte accesses. Private memory will also 4305ffd83dbSDimitry Andric // hit this, although they'll be decomposed. 4315ffd83dbSDimitry Andric return FixedVectorType::get(Type::getInt32Ty(Context), 4); 4325ffd83dbSDimitry Andric } 4335ffd83dbSDimitry Andric 4345ffd83dbSDimitry Andric void GCNTTIImpl::getMemcpyLoopResidualLoweringType( 4355ffd83dbSDimitry Andric SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context, 4365ffd83dbSDimitry Andric unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, 43781ad6265SDimitry Andric unsigned SrcAlign, unsigned DestAlign, 438bdd1243dSDimitry Andric std::optional<uint32_t> AtomicCpySize) const { 4395ffd83dbSDimitry Andric assert(RemainingBytes < 16); 4405ffd83dbSDimitry Andric 44181ad6265SDimitry Andric if (AtomicCpySize) 44281ad6265SDimitry Andric BaseT::getMemcpyLoopResidualLoweringType( 44381ad6265SDimitry Andric OpsOut, Context, RemainingBytes, SrcAddrSpace, DestAddrSpace, SrcAlign, 44481ad6265SDimitry Andric DestAlign, AtomicCpySize); 44581ad6265SDimitry Andric 4465ffd83dbSDimitry Andric unsigned MinAlign = std::min(SrcAlign, DestAlign); 4475ffd83dbSDimitry Andric 4485ffd83dbSDimitry Andric if (MinAlign != 2) { 4495ffd83dbSDimitry Andric Type *I64Ty = Type::getInt64Ty(Context); 4505ffd83dbSDimitry Andric while (RemainingBytes >= 8) { 4515ffd83dbSDimitry Andric OpsOut.push_back(I64Ty); 4525ffd83dbSDimitry Andric RemainingBytes -= 8; 4535ffd83dbSDimitry Andric } 4545ffd83dbSDimitry Andric 4555ffd83dbSDimitry Andric Type *I32Ty = Type::getInt32Ty(Context); 4565ffd83dbSDimitry Andric while (RemainingBytes >= 4) { 4575ffd83dbSDimitry Andric OpsOut.push_back(I32Ty); 4585ffd83dbSDimitry Andric RemainingBytes -= 4; 4595ffd83dbSDimitry Andric } 4605ffd83dbSDimitry Andric } 4615ffd83dbSDimitry Andric 4625ffd83dbSDimitry Andric Type *I16Ty = Type::getInt16Ty(Context); 4635ffd83dbSDimitry Andric while (RemainingBytes >= 2) { 4645ffd83dbSDimitry Andric OpsOut.push_back(I16Ty); 4655ffd83dbSDimitry Andric RemainingBytes -= 2; 4665ffd83dbSDimitry Andric } 4675ffd83dbSDimitry Andric 4685ffd83dbSDimitry Andric Type *I8Ty = Type::getInt8Ty(Context); 4695ffd83dbSDimitry Andric while (RemainingBytes) { 4705ffd83dbSDimitry Andric OpsOut.push_back(I8Ty); 4715ffd83dbSDimitry Andric --RemainingBytes; 4725ffd83dbSDimitry Andric } 4735ffd83dbSDimitry Andric } 4745ffd83dbSDimitry Andric 4750b57cec5SDimitry Andric unsigned GCNTTIImpl::getMaxInterleaveFactor(unsigned VF) { 4760b57cec5SDimitry Andric // Disable unrolling if the loop is not vectorized. 4770b57cec5SDimitry Andric // TODO: Enable this again. 4780b57cec5SDimitry Andric if (VF == 1) 4790b57cec5SDimitry Andric return 1; 4800b57cec5SDimitry Andric 4810b57cec5SDimitry Andric return 8; 4820b57cec5SDimitry Andric } 4830b57cec5SDimitry Andric 4840b57cec5SDimitry Andric bool GCNTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, 4850b57cec5SDimitry Andric MemIntrinsicInfo &Info) const { 4860b57cec5SDimitry Andric switch (Inst->getIntrinsicID()) { 4870b57cec5SDimitry Andric case Intrinsic::amdgcn_atomic_inc: 4880b57cec5SDimitry Andric case Intrinsic::amdgcn_atomic_dec: 4890b57cec5SDimitry Andric case Intrinsic::amdgcn_ds_ordered_add: 4900b57cec5SDimitry Andric case Intrinsic::amdgcn_ds_ordered_swap: 4910b57cec5SDimitry Andric case Intrinsic::amdgcn_ds_fadd: 4920b57cec5SDimitry Andric case Intrinsic::amdgcn_ds_fmin: 4930b57cec5SDimitry Andric case Intrinsic::amdgcn_ds_fmax: { 4940b57cec5SDimitry Andric auto *Ordering = dyn_cast<ConstantInt>(Inst->getArgOperand(2)); 4950b57cec5SDimitry Andric auto *Volatile = dyn_cast<ConstantInt>(Inst->getArgOperand(4)); 4960b57cec5SDimitry Andric if (!Ordering || !Volatile) 4970b57cec5SDimitry Andric return false; // Invalid. 4980b57cec5SDimitry Andric 4990b57cec5SDimitry Andric unsigned OrderingVal = Ordering->getZExtValue(); 5000b57cec5SDimitry Andric if (OrderingVal > static_cast<unsigned>(AtomicOrdering::SequentiallyConsistent)) 5010b57cec5SDimitry Andric return false; 5020b57cec5SDimitry Andric 5030b57cec5SDimitry Andric Info.PtrVal = Inst->getArgOperand(0); 5040b57cec5SDimitry Andric Info.Ordering = static_cast<AtomicOrdering>(OrderingVal); 5050b57cec5SDimitry Andric Info.ReadMem = true; 5060b57cec5SDimitry Andric Info.WriteMem = true; 507349cc55cSDimitry Andric Info.IsVolatile = !Volatile->isZero(); 5080b57cec5SDimitry Andric return true; 5090b57cec5SDimitry Andric } 5100b57cec5SDimitry Andric default: 5110b57cec5SDimitry Andric return false; 5120b57cec5SDimitry Andric } 5130b57cec5SDimitry Andric } 5140b57cec5SDimitry Andric 515fe6060f1SDimitry Andric InstructionCost GCNTTIImpl::getArithmeticInstrCost( 516fe6060f1SDimitry Andric unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 517bdd1243dSDimitry Andric TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, 518bdd1243dSDimitry Andric ArrayRef<const Value *> Args, 519480093f4SDimitry Andric const Instruction *CxtI) { 5200b57cec5SDimitry Andric 5210b57cec5SDimitry Andric // Legalize the type. 522bdd1243dSDimitry Andric std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty); 5230b57cec5SDimitry Andric int ISD = TLI->InstructionOpcodeToISD(Opcode); 5240b57cec5SDimitry Andric 5250b57cec5SDimitry Andric // Because we don't have any legal vector operations, but the legal types, we 5260b57cec5SDimitry Andric // need to account for split vectors. 5270b57cec5SDimitry Andric unsigned NElts = LT.second.isVector() ? 5280b57cec5SDimitry Andric LT.second.getVectorNumElements() : 1; 5290b57cec5SDimitry Andric 5300b57cec5SDimitry Andric MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy; 5310b57cec5SDimitry Andric 5320b57cec5SDimitry Andric switch (ISD) { 5330b57cec5SDimitry Andric case ISD::SHL: 5340b57cec5SDimitry Andric case ISD::SRL: 5350b57cec5SDimitry Andric case ISD::SRA: 5360b57cec5SDimitry Andric if (SLT == MVT::i64) 537e8d8bef9SDimitry Andric return get64BitInstrCost(CostKind) * LT.first * NElts; 5380b57cec5SDimitry Andric 539480093f4SDimitry Andric if (ST->has16BitInsts() && SLT == MVT::i16) 540480093f4SDimitry Andric NElts = (NElts + 1) / 2; 541480093f4SDimitry Andric 5420b57cec5SDimitry Andric // i32 5430b57cec5SDimitry Andric return getFullRateInstrCost() * LT.first * NElts; 5440b57cec5SDimitry Andric case ISD::ADD: 5450b57cec5SDimitry Andric case ISD::SUB: 5460b57cec5SDimitry Andric case ISD::AND: 5470b57cec5SDimitry Andric case ISD::OR: 5480b57cec5SDimitry Andric case ISD::XOR: 5490b57cec5SDimitry Andric if (SLT == MVT::i64) { 5500b57cec5SDimitry Andric // and, or and xor are typically split into 2 VALU instructions. 5510b57cec5SDimitry Andric return 2 * getFullRateInstrCost() * LT.first * NElts; 5520b57cec5SDimitry Andric } 5530b57cec5SDimitry Andric 554480093f4SDimitry Andric if (ST->has16BitInsts() && SLT == MVT::i16) 555480093f4SDimitry Andric NElts = (NElts + 1) / 2; 556480093f4SDimitry Andric 5570b57cec5SDimitry Andric return LT.first * NElts * getFullRateInstrCost(); 5580b57cec5SDimitry Andric case ISD::MUL: { 559e8d8bef9SDimitry Andric const int QuarterRateCost = getQuarterRateInstrCost(CostKind); 5600b57cec5SDimitry Andric if (SLT == MVT::i64) { 5610b57cec5SDimitry Andric const int FullRateCost = getFullRateInstrCost(); 5620b57cec5SDimitry Andric return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts; 5630b57cec5SDimitry Andric } 5640b57cec5SDimitry Andric 565480093f4SDimitry Andric if (ST->has16BitInsts() && SLT == MVT::i16) 566480093f4SDimitry Andric NElts = (NElts + 1) / 2; 567480093f4SDimitry Andric 5680b57cec5SDimitry Andric // i32 5690b57cec5SDimitry Andric return QuarterRateCost * NElts * LT.first; 5700b57cec5SDimitry Andric } 571e8d8bef9SDimitry Andric case ISD::FMUL: 572e8d8bef9SDimitry Andric // Check possible fuse {fadd|fsub}(a,fmul(b,c)) and return zero cost for 573e8d8bef9SDimitry Andric // fmul(b,c) supposing the fadd|fsub will get estimated cost for the whole 574e8d8bef9SDimitry Andric // fused operation. 575e8d8bef9SDimitry Andric if (CxtI && CxtI->hasOneUse()) 576e8d8bef9SDimitry Andric if (const auto *FAdd = dyn_cast<BinaryOperator>(*CxtI->user_begin())) { 577e8d8bef9SDimitry Andric const int OPC = TLI->InstructionOpcodeToISD(FAdd->getOpcode()); 578e8d8bef9SDimitry Andric if (OPC == ISD::FADD || OPC == ISD::FSUB) { 579e8d8bef9SDimitry Andric if (ST->hasMadMacF32Insts() && SLT == MVT::f32 && !HasFP32Denormals) 580e8d8bef9SDimitry Andric return TargetTransformInfo::TCC_Free; 581e8d8bef9SDimitry Andric if (ST->has16BitInsts() && SLT == MVT::f16 && !HasFP64FP16Denormals) 582e8d8bef9SDimitry Andric return TargetTransformInfo::TCC_Free; 583e8d8bef9SDimitry Andric 584e8d8bef9SDimitry Andric // Estimate all types may be fused with contract/unsafe flags 585e8d8bef9SDimitry Andric const TargetOptions &Options = TLI->getTargetMachine().Options; 586e8d8bef9SDimitry Andric if (Options.AllowFPOpFusion == FPOpFusion::Fast || 587e8d8bef9SDimitry Andric Options.UnsafeFPMath || 588e8d8bef9SDimitry Andric (FAdd->hasAllowContract() && CxtI->hasAllowContract())) 589e8d8bef9SDimitry Andric return TargetTransformInfo::TCC_Free; 590e8d8bef9SDimitry Andric } 591e8d8bef9SDimitry Andric } 592bdd1243dSDimitry Andric [[fallthrough]]; 5930b57cec5SDimitry Andric case ISD::FADD: 5940b57cec5SDimitry Andric case ISD::FSUB: 595fe6060f1SDimitry Andric if (ST->hasPackedFP32Ops() && SLT == MVT::f32) 596fe6060f1SDimitry Andric NElts = (NElts + 1) / 2; 5970b57cec5SDimitry Andric if (SLT == MVT::f64) 598e8d8bef9SDimitry Andric return LT.first * NElts * get64BitInstrCost(CostKind); 5990b57cec5SDimitry Andric 600480093f4SDimitry Andric if (ST->has16BitInsts() && SLT == MVT::f16) 601480093f4SDimitry Andric NElts = (NElts + 1) / 2; 602480093f4SDimitry Andric 6030b57cec5SDimitry Andric if (SLT == MVT::f32 || SLT == MVT::f16) 6040b57cec5SDimitry Andric return LT.first * NElts * getFullRateInstrCost(); 6050b57cec5SDimitry Andric break; 6060b57cec5SDimitry Andric case ISD::FDIV: 6070b57cec5SDimitry Andric case ISD::FREM: 6080b57cec5SDimitry Andric // FIXME: frem should be handled separately. The fdiv in it is most of it, 6090b57cec5SDimitry Andric // but the current lowering is also not entirely correct. 6100b57cec5SDimitry Andric if (SLT == MVT::f64) { 611e8d8bef9SDimitry Andric int Cost = 7 * get64BitInstrCost(CostKind) + 612e8d8bef9SDimitry Andric getQuarterRateInstrCost(CostKind) + 613e8d8bef9SDimitry Andric 3 * getHalfRateInstrCost(CostKind); 6140b57cec5SDimitry Andric // Add cost of workaround. 6150b57cec5SDimitry Andric if (!ST->hasUsableDivScaleConditionOutput()) 6160b57cec5SDimitry Andric Cost += 3 * getFullRateInstrCost(); 6170b57cec5SDimitry Andric 6180b57cec5SDimitry Andric return LT.first * Cost * NElts; 6190b57cec5SDimitry Andric } 6200b57cec5SDimitry Andric 6210b57cec5SDimitry Andric if (!Args.empty() && match(Args[0], PatternMatch::m_FPOne())) { 6220b57cec5SDimitry Andric // TODO: This is more complicated, unsafe flags etc. 623480093f4SDimitry Andric if ((SLT == MVT::f32 && !HasFP32Denormals) || 6240b57cec5SDimitry Andric (SLT == MVT::f16 && ST->has16BitInsts())) { 625e8d8bef9SDimitry Andric return LT.first * getQuarterRateInstrCost(CostKind) * NElts; 6260b57cec5SDimitry Andric } 6270b57cec5SDimitry Andric } 6280b57cec5SDimitry Andric 6290b57cec5SDimitry Andric if (SLT == MVT::f16 && ST->has16BitInsts()) { 6300b57cec5SDimitry Andric // 2 x v_cvt_f32_f16 6310b57cec5SDimitry Andric // f32 rcp 6320b57cec5SDimitry Andric // f32 fmul 6330b57cec5SDimitry Andric // v_cvt_f16_f32 6340b57cec5SDimitry Andric // f16 div_fixup 635e8d8bef9SDimitry Andric int Cost = 636e8d8bef9SDimitry Andric 4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost(CostKind); 6370b57cec5SDimitry Andric return LT.first * Cost * NElts; 6380b57cec5SDimitry Andric } 6390b57cec5SDimitry Andric 6400b57cec5SDimitry Andric if (SLT == MVT::f32 || SLT == MVT::f16) { 641e8d8bef9SDimitry Andric // 4 more v_cvt_* insts without f16 insts support 642e8d8bef9SDimitry Andric int Cost = (SLT == MVT::f16 ? 14 : 10) * getFullRateInstrCost() + 643e8d8bef9SDimitry Andric 1 * getQuarterRateInstrCost(CostKind); 6440b57cec5SDimitry Andric 645480093f4SDimitry Andric if (!HasFP32Denormals) { 6460b57cec5SDimitry Andric // FP mode switches. 6470b57cec5SDimitry Andric Cost += 2 * getFullRateInstrCost(); 6480b57cec5SDimitry Andric } 6490b57cec5SDimitry Andric 6500b57cec5SDimitry Andric return LT.first * NElts * Cost; 6510b57cec5SDimitry Andric } 6520b57cec5SDimitry Andric break; 6535ffd83dbSDimitry Andric case ISD::FNEG: 6545ffd83dbSDimitry Andric // Use the backend' estimation. If fneg is not free each element will cost 6555ffd83dbSDimitry Andric // one additional instruction. 6565ffd83dbSDimitry Andric return TLI->isFNegFree(SLT) ? 0 : NElts; 6570b57cec5SDimitry Andric default: 6580b57cec5SDimitry Andric break; 6590b57cec5SDimitry Andric } 6600b57cec5SDimitry Andric 661bdd1243dSDimitry Andric return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info, 662bdd1243dSDimitry Andric Args, CxtI); 6630b57cec5SDimitry Andric } 6640b57cec5SDimitry Andric 665e8d8bef9SDimitry Andric // Return true if there's a potential benefit from using v2f16/v2i16 666e8d8bef9SDimitry Andric // instructions for an intrinsic, even if it requires nontrivial legalization. 6675ffd83dbSDimitry Andric static bool intrinsicHasPackedVectorBenefit(Intrinsic::ID ID) { 6685ffd83dbSDimitry Andric switch (ID) { 6695ffd83dbSDimitry Andric case Intrinsic::fma: // TODO: fmuladd 6705ffd83dbSDimitry Andric // There's a small benefit to using vector ops in the legalized code. 6715ffd83dbSDimitry Andric case Intrinsic::round: 672e8d8bef9SDimitry Andric case Intrinsic::uadd_sat: 673e8d8bef9SDimitry Andric case Intrinsic::usub_sat: 674e8d8bef9SDimitry Andric case Intrinsic::sadd_sat: 675e8d8bef9SDimitry Andric case Intrinsic::ssub_sat: 6765ffd83dbSDimitry Andric return true; 6775ffd83dbSDimitry Andric default: 6785ffd83dbSDimitry Andric return false; 6795ffd83dbSDimitry Andric } 6805ffd83dbSDimitry Andric } 681480093f4SDimitry Andric 682fe6060f1SDimitry Andric InstructionCost 683fe6060f1SDimitry Andric GCNTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 6845ffd83dbSDimitry Andric TTI::TargetCostKind CostKind) { 6855ffd83dbSDimitry Andric if (ICA.getID() == Intrinsic::fabs) 6865ffd83dbSDimitry Andric return 0; 6875ffd83dbSDimitry Andric 6885ffd83dbSDimitry Andric if (!intrinsicHasPackedVectorBenefit(ICA.getID())) 6895ffd83dbSDimitry Andric return BaseT::getIntrinsicInstrCost(ICA, CostKind); 6905ffd83dbSDimitry Andric 6915ffd83dbSDimitry Andric Type *RetTy = ICA.getReturnType(); 692480093f4SDimitry Andric 693480093f4SDimitry Andric // Legalize the type. 694bdd1243dSDimitry Andric std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(RetTy); 695480093f4SDimitry Andric 696480093f4SDimitry Andric unsigned NElts = LT.second.isVector() ? 697480093f4SDimitry Andric LT.second.getVectorNumElements() : 1; 698480093f4SDimitry Andric 699480093f4SDimitry Andric MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy; 700480093f4SDimitry Andric 701480093f4SDimitry Andric if (SLT == MVT::f64) 702e8d8bef9SDimitry Andric return LT.first * NElts * get64BitInstrCost(CostKind); 703480093f4SDimitry Andric 704fe6060f1SDimitry Andric if ((ST->has16BitInsts() && SLT == MVT::f16) || 705fe6060f1SDimitry Andric (ST->hasPackedFP32Ops() && SLT == MVT::f32)) 706480093f4SDimitry Andric NElts = (NElts + 1) / 2; 707480093f4SDimitry Andric 7085ffd83dbSDimitry Andric // TODO: Get more refined intrinsic costs? 709e8d8bef9SDimitry Andric unsigned InstRate = getQuarterRateInstrCost(CostKind); 710fe6060f1SDimitry Andric 711fe6060f1SDimitry Andric switch (ICA.getID()) { 712fe6060f1SDimitry Andric case Intrinsic::fma: 713e8d8bef9SDimitry Andric InstRate = ST->hasFastFMAF32() ? getHalfRateInstrCost(CostKind) 714e8d8bef9SDimitry Andric : getQuarterRateInstrCost(CostKind); 715fe6060f1SDimitry Andric break; 716fe6060f1SDimitry Andric case Intrinsic::uadd_sat: 717fe6060f1SDimitry Andric case Intrinsic::usub_sat: 718fe6060f1SDimitry Andric case Intrinsic::sadd_sat: 719fe6060f1SDimitry Andric case Intrinsic::ssub_sat: 720fe6060f1SDimitry Andric static const auto ValidSatTys = {MVT::v2i16, MVT::v4i16}; 721fe6060f1SDimitry Andric if (any_of(ValidSatTys, [<](MVT M) { return M == LT.second; })) 722fe6060f1SDimitry Andric NElts = 1; 723fe6060f1SDimitry Andric break; 724480093f4SDimitry Andric } 725480093f4SDimitry Andric 7265ffd83dbSDimitry Andric return LT.first * NElts * InstRate; 727480093f4SDimitry Andric } 728480093f4SDimitry Andric 729fe6060f1SDimitry Andric InstructionCost GCNTTIImpl::getCFInstrCost(unsigned Opcode, 730fe6060f1SDimitry Andric TTI::TargetCostKind CostKind, 731fe6060f1SDimitry Andric const Instruction *I) { 732fe6060f1SDimitry Andric assert((I == nullptr || I->getOpcode() == Opcode) && 733fe6060f1SDimitry Andric "Opcode should reflect passed instruction."); 734fe6060f1SDimitry Andric const bool SCost = 735fe6060f1SDimitry Andric (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency); 736fe6060f1SDimitry Andric const int CBrCost = SCost ? 5 : 7; 7370b57cec5SDimitry Andric switch (Opcode) { 738fe6060f1SDimitry Andric case Instruction::Br: { 739fe6060f1SDimitry Andric // Branch instruction takes about 4 slots on gfx900. 740fe6060f1SDimitry Andric auto BI = dyn_cast_or_null<BranchInst>(I); 741fe6060f1SDimitry Andric if (BI && BI->isUnconditional()) 742fe6060f1SDimitry Andric return SCost ? 1 : 4; 743fe6060f1SDimitry Andric // Suppose conditional branch takes additional 3 exec manipulations 744fe6060f1SDimitry Andric // instructions in average. 745fe6060f1SDimitry Andric return CBrCost; 7460b57cec5SDimitry Andric } 747fe6060f1SDimitry Andric case Instruction::Switch: { 748fe6060f1SDimitry Andric auto SI = dyn_cast_or_null<SwitchInst>(I); 749fe6060f1SDimitry Andric // Each case (including default) takes 1 cmp + 1 cbr instructions in 750fe6060f1SDimitry Andric // average. 751fe6060f1SDimitry Andric return (SI ? (SI->getNumCases() + 1) : 4) * (CBrCost + 1); 752fe6060f1SDimitry Andric } 753fe6060f1SDimitry Andric case Instruction::Ret: 754fe6060f1SDimitry Andric return SCost ? 1 : 10; 755fe6060f1SDimitry Andric } 756fe6060f1SDimitry Andric return BaseT::getCFInstrCost(Opcode, CostKind, I); 7570b57cec5SDimitry Andric } 7580b57cec5SDimitry Andric 759fe6060f1SDimitry Andric InstructionCost 760fe6060f1SDimitry Andric GCNTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, 761bdd1243dSDimitry Andric std::optional<FastMathFlags> FMF, 7625ffd83dbSDimitry Andric TTI::TargetCostKind CostKind) { 763fe6060f1SDimitry Andric if (TTI::requiresOrderedReduction(FMF)) 764fe6060f1SDimitry Andric return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind); 765fe6060f1SDimitry Andric 7660b57cec5SDimitry Andric EVT OrigTy = TLI->getValueType(DL, Ty); 7670b57cec5SDimitry Andric 7680b57cec5SDimitry Andric // Computes cost on targets that have packed math instructions(which support 7690b57cec5SDimitry Andric // 16-bit types only). 770fe6060f1SDimitry Andric if (!ST->hasVOP3PInsts() || OrigTy.getScalarSizeInBits() != 16) 771fe6060f1SDimitry Andric return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind); 7720b57cec5SDimitry Andric 773bdd1243dSDimitry Andric std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty); 7740b57cec5SDimitry Andric return LT.first * getFullRateInstrCost(); 7750b57cec5SDimitry Andric } 7760b57cec5SDimitry Andric 777fe6060f1SDimitry Andric InstructionCost 778fe6060f1SDimitry Andric GCNTTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy, 779fe6060f1SDimitry Andric bool IsUnsigned, 7805ffd83dbSDimitry Andric TTI::TargetCostKind CostKind) { 7810b57cec5SDimitry Andric EVT OrigTy = TLI->getValueType(DL, Ty); 7820b57cec5SDimitry Andric 7830b57cec5SDimitry Andric // Computes cost on targets that have packed math instructions(which support 7840b57cec5SDimitry Andric // 16-bit types only). 785fe6060f1SDimitry Andric if (!ST->hasVOP3PInsts() || OrigTy.getScalarSizeInBits() != 16) 786fe6060f1SDimitry Andric return BaseT::getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind); 7870b57cec5SDimitry Andric 788bdd1243dSDimitry Andric std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty); 789e8d8bef9SDimitry Andric return LT.first * getHalfRateInstrCost(CostKind); 7900b57cec5SDimitry Andric } 7910b57cec5SDimitry Andric 792fe6060f1SDimitry Andric InstructionCost GCNTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy, 793bdd1243dSDimitry Andric TTI::TargetCostKind CostKind, 794bdd1243dSDimitry Andric unsigned Index, Value *Op0, 795bdd1243dSDimitry Andric Value *Op1) { 7960b57cec5SDimitry Andric switch (Opcode) { 7970b57cec5SDimitry Andric case Instruction::ExtractElement: 7980b57cec5SDimitry Andric case Instruction::InsertElement: { 7990b57cec5SDimitry Andric unsigned EltSize 8000b57cec5SDimitry Andric = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType()); 8010b57cec5SDimitry Andric if (EltSize < 32) { 8020b57cec5SDimitry Andric if (EltSize == 16 && Index == 0 && ST->has16BitInsts()) 8030b57cec5SDimitry Andric return 0; 804bdd1243dSDimitry Andric return BaseT::getVectorInstrCost(Opcode, ValTy, CostKind, Index, Op0, 805bdd1243dSDimitry Andric Op1); 8060b57cec5SDimitry Andric } 8070b57cec5SDimitry Andric 8080b57cec5SDimitry Andric // Extracts are just reads of a subregister, so are free. Inserts are 8090b57cec5SDimitry Andric // considered free because we don't want to have any cost for scalarizing 8100b57cec5SDimitry Andric // operations, and we don't have to copy into a different register class. 8110b57cec5SDimitry Andric 8120b57cec5SDimitry Andric // Dynamic indexing isn't free and is best avoided. 8130b57cec5SDimitry Andric return Index == ~0u ? 2 : 0; 8140b57cec5SDimitry Andric } 8150b57cec5SDimitry Andric default: 816bdd1243dSDimitry Andric return BaseT::getVectorInstrCost(Opcode, ValTy, CostKind, Index, Op0, Op1); 8170b57cec5SDimitry Andric } 8180b57cec5SDimitry Andric } 8190b57cec5SDimitry Andric 8205ffd83dbSDimitry Andric /// Analyze if the results of inline asm are divergent. If \p Indices is empty, 8215ffd83dbSDimitry Andric /// this is analyzing the collective result of all output registers. Otherwise, 8225ffd83dbSDimitry Andric /// this is only querying a specific result index if this returns multiple 8235ffd83dbSDimitry Andric /// registers in a struct. 8245ffd83dbSDimitry Andric bool GCNTTIImpl::isInlineAsmSourceOfDivergence( 8255ffd83dbSDimitry Andric const CallInst *CI, ArrayRef<unsigned> Indices) const { 8265ffd83dbSDimitry Andric // TODO: Handle complex extract indices 8275ffd83dbSDimitry Andric if (Indices.size() > 1) 8285ffd83dbSDimitry Andric return true; 8295ffd83dbSDimitry Andric 8305ffd83dbSDimitry Andric const DataLayout &DL = CI->getModule()->getDataLayout(); 8315ffd83dbSDimitry Andric const SIRegisterInfo *TRI = ST->getRegisterInfo(); 8325ffd83dbSDimitry Andric TargetLowering::AsmOperandInfoVector TargetConstraints = 8335ffd83dbSDimitry Andric TLI->ParseConstraints(DL, ST->getRegisterInfo(), *CI); 8345ffd83dbSDimitry Andric 8355ffd83dbSDimitry Andric const int TargetOutputIdx = Indices.empty() ? -1 : Indices[0]; 8365ffd83dbSDimitry Andric 8375ffd83dbSDimitry Andric int OutputIdx = 0; 8385ffd83dbSDimitry Andric for (auto &TC : TargetConstraints) { 8395ffd83dbSDimitry Andric if (TC.Type != InlineAsm::isOutput) 8405ffd83dbSDimitry Andric continue; 8415ffd83dbSDimitry Andric 8425ffd83dbSDimitry Andric // Skip outputs we don't care about. 8435ffd83dbSDimitry Andric if (TargetOutputIdx != -1 && TargetOutputIdx != OutputIdx++) 8445ffd83dbSDimitry Andric continue; 8455ffd83dbSDimitry Andric 8465ffd83dbSDimitry Andric TLI->ComputeConstraintToUse(TC, SDValue()); 8475ffd83dbSDimitry Andric 84804eeddc0SDimitry Andric const TargetRegisterClass *RC = TLI->getRegForInlineAsmConstraint( 84904eeddc0SDimitry Andric TRI, TC.ConstraintCode, TC.ConstraintVT).second; 8505ffd83dbSDimitry Andric 8515ffd83dbSDimitry Andric // For AGPR constraints null is returned on subtargets without AGPRs, so 8525ffd83dbSDimitry Andric // assume divergent for null. 8535ffd83dbSDimitry Andric if (!RC || !TRI->isSGPRClass(RC)) 8545ffd83dbSDimitry Andric return true; 8555ffd83dbSDimitry Andric } 8565ffd83dbSDimitry Andric 8575ffd83dbSDimitry Andric return false; 8585ffd83dbSDimitry Andric } 8595ffd83dbSDimitry Andric 8605ffd83dbSDimitry Andric /// \returns true if the new GPU divergence analysis is enabled. 8615ffd83dbSDimitry Andric bool GCNTTIImpl::useGPUDivergenceAnalysis() const { 8625ffd83dbSDimitry Andric return !UseLegacyDA; 8635ffd83dbSDimitry Andric } 8645ffd83dbSDimitry Andric 865bdd1243dSDimitry Andric bool GCNTTIImpl::isReadRegisterSourceOfDivergence( 866bdd1243dSDimitry Andric const IntrinsicInst *ReadReg) const { 867bdd1243dSDimitry Andric Metadata *MD = 868bdd1243dSDimitry Andric cast<MetadataAsValue>(ReadReg->getArgOperand(0))->getMetadata(); 869bdd1243dSDimitry Andric StringRef RegName = 870bdd1243dSDimitry Andric cast<MDString>(cast<MDNode>(MD)->getOperand(0))->getString(); 871bdd1243dSDimitry Andric 872bdd1243dSDimitry Andric // Special case registers that look like VCC. 873bdd1243dSDimitry Andric MVT VT = MVT::getVT(ReadReg->getType()); 874bdd1243dSDimitry Andric if (VT == MVT::i1) 875bdd1243dSDimitry Andric return true; 876bdd1243dSDimitry Andric 877bdd1243dSDimitry Andric // Special case scalar registers that start with 'v'. 878bdd1243dSDimitry Andric if (RegName.startswith("vcc") || RegName.empty()) 879bdd1243dSDimitry Andric return false; 880bdd1243dSDimitry Andric 881bdd1243dSDimitry Andric // VGPR or AGPR is divergent. There aren't any specially named vector 882bdd1243dSDimitry Andric // registers. 883bdd1243dSDimitry Andric return RegName[0] == 'v' || RegName[0] == 'a'; 884bdd1243dSDimitry Andric } 885bdd1243dSDimitry Andric 8860b57cec5SDimitry Andric /// \returns true if the result of the value could potentially be 8870b57cec5SDimitry Andric /// different across workitems in a wavefront. 8880b57cec5SDimitry Andric bool GCNTTIImpl::isSourceOfDivergence(const Value *V) const { 8890b57cec5SDimitry Andric if (const Argument *A = dyn_cast<Argument>(V)) 890e8d8bef9SDimitry Andric return !AMDGPU::isArgPassedInSGPR(A); 8910b57cec5SDimitry Andric 8920b57cec5SDimitry Andric // Loads from the private and flat address spaces are divergent, because 8930b57cec5SDimitry Andric // threads can execute the load instruction with the same inputs and get 8940b57cec5SDimitry Andric // different results. 8950b57cec5SDimitry Andric // 8960b57cec5SDimitry Andric // All other loads are not divergent, because if threads issue loads with the 8970b57cec5SDimitry Andric // same arguments, they will always get the same result. 8980b57cec5SDimitry Andric if (const LoadInst *Load = dyn_cast<LoadInst>(V)) 8990b57cec5SDimitry Andric return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS || 9000b57cec5SDimitry Andric Load->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS; 9010b57cec5SDimitry Andric 9020b57cec5SDimitry Andric // Atomics are divergent because they are executed sequentially: when an 9030b57cec5SDimitry Andric // atomic operation refers to the same address in each thread, then each 9040b57cec5SDimitry Andric // thread after the first sees the value written by the previous thread as 9050b57cec5SDimitry Andric // original value. 9060b57cec5SDimitry Andric if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V)) 9070b57cec5SDimitry Andric return true; 9080b57cec5SDimitry Andric 909bdd1243dSDimitry Andric if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) { 910bdd1243dSDimitry Andric if (Intrinsic->getIntrinsicID() == Intrinsic::read_register) 911bdd1243dSDimitry Andric return isReadRegisterSourceOfDivergence(Intrinsic); 912bdd1243dSDimitry Andric 9130b57cec5SDimitry Andric return AMDGPU::isIntrinsicSourceOfDivergence(Intrinsic->getIntrinsicID()); 914bdd1243dSDimitry Andric } 9150b57cec5SDimitry Andric 9160b57cec5SDimitry Andric // Assume all function calls are a source of divergence. 9175ffd83dbSDimitry Andric if (const CallInst *CI = dyn_cast<CallInst>(V)) { 9185ffd83dbSDimitry Andric if (CI->isInlineAsm()) 9195ffd83dbSDimitry Andric return isInlineAsmSourceOfDivergence(CI); 9205ffd83dbSDimitry Andric return true; 9215ffd83dbSDimitry Andric } 9225ffd83dbSDimitry Andric 9235ffd83dbSDimitry Andric // Assume all function calls are a source of divergence. 9245ffd83dbSDimitry Andric if (isa<InvokeInst>(V)) 9250b57cec5SDimitry Andric return true; 9260b57cec5SDimitry Andric 9270b57cec5SDimitry Andric return false; 9280b57cec5SDimitry Andric } 9290b57cec5SDimitry Andric 9300b57cec5SDimitry Andric bool GCNTTIImpl::isAlwaysUniform(const Value *V) const { 9310b57cec5SDimitry Andric if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) { 9320b57cec5SDimitry Andric switch (Intrinsic->getIntrinsicID()) { 9330b57cec5SDimitry Andric default: 9340b57cec5SDimitry Andric return false; 9350b57cec5SDimitry Andric case Intrinsic::amdgcn_readfirstlane: 9360b57cec5SDimitry Andric case Intrinsic::amdgcn_readlane: 9370b57cec5SDimitry Andric case Intrinsic::amdgcn_icmp: 9380b57cec5SDimitry Andric case Intrinsic::amdgcn_fcmp: 9395ffd83dbSDimitry Andric case Intrinsic::amdgcn_ballot: 9405ffd83dbSDimitry Andric case Intrinsic::amdgcn_if_break: 9410b57cec5SDimitry Andric return true; 9420b57cec5SDimitry Andric } 9430b57cec5SDimitry Andric } 9445ffd83dbSDimitry Andric 9455ffd83dbSDimitry Andric if (const CallInst *CI = dyn_cast<CallInst>(V)) { 9465ffd83dbSDimitry Andric if (CI->isInlineAsm()) 9475ffd83dbSDimitry Andric return !isInlineAsmSourceOfDivergence(CI); 9485ffd83dbSDimitry Andric return false; 9495ffd83dbSDimitry Andric } 9505ffd83dbSDimitry Andric 951bdd1243dSDimitry Andric // In most cases TID / wavefrontsize is uniform. 952bdd1243dSDimitry Andric // 953bdd1243dSDimitry Andric // However, if a kernel has uneven dimesions we can have a value of 954bdd1243dSDimitry Andric // workitem-id-x divided by the wavefrontsize non-uniform. For example 955bdd1243dSDimitry Andric // dimensions (65, 2) will have workitems with address (64, 0) and (0, 1) 956bdd1243dSDimitry Andric // packed into a same wave which gives 1 and 0 after the division by 64 957bdd1243dSDimitry Andric // respectively. 958bdd1243dSDimitry Andric // 959bdd1243dSDimitry Andric // FIXME: limit it to 1D kernels only, although that shall be possible 960bdd1243dSDimitry Andric // to perform this optimization is the size of the X dimension is a power 961bdd1243dSDimitry Andric // of 2, we just do not currently have infrastructure to query it. 962bdd1243dSDimitry Andric using namespace llvm::PatternMatch; 963bdd1243dSDimitry Andric uint64_t C; 964bdd1243dSDimitry Andric if (match(V, m_LShr(m_Intrinsic<Intrinsic::amdgcn_workitem_id_x>(), 965bdd1243dSDimitry Andric m_ConstantInt(C))) || 966bdd1243dSDimitry Andric match(V, m_AShr(m_Intrinsic<Intrinsic::amdgcn_workitem_id_x>(), 967bdd1243dSDimitry Andric m_ConstantInt(C)))) { 968bdd1243dSDimitry Andric const Function *F = cast<Instruction>(V)->getFunction(); 969bdd1243dSDimitry Andric return C >= ST->getWavefrontSizeLog2() && 970bdd1243dSDimitry Andric ST->getMaxWorkitemID(*F, 1) == 0 && ST->getMaxWorkitemID(*F, 2) == 0; 971bdd1243dSDimitry Andric } 972bdd1243dSDimitry Andric 973bdd1243dSDimitry Andric Value *Mask; 974bdd1243dSDimitry Andric if (match(V, m_c_And(m_Intrinsic<Intrinsic::amdgcn_workitem_id_x>(), 975bdd1243dSDimitry Andric m_Value(Mask)))) { 976bdd1243dSDimitry Andric const Function *F = cast<Instruction>(V)->getFunction(); 977bdd1243dSDimitry Andric const DataLayout &DL = F->getParent()->getDataLayout(); 978bdd1243dSDimitry Andric return computeKnownBits(Mask, DL).countMinTrailingZeros() >= 979bdd1243dSDimitry Andric ST->getWavefrontSizeLog2() && 980bdd1243dSDimitry Andric ST->getMaxWorkitemID(*F, 1) == 0 && ST->getMaxWorkitemID(*F, 2) == 0; 981bdd1243dSDimitry Andric } 982bdd1243dSDimitry Andric 9835ffd83dbSDimitry Andric const ExtractValueInst *ExtValue = dyn_cast<ExtractValueInst>(V); 9845ffd83dbSDimitry Andric if (!ExtValue) 9855ffd83dbSDimitry Andric return false; 9865ffd83dbSDimitry Andric 9875ffd83dbSDimitry Andric const CallInst *CI = dyn_cast<CallInst>(ExtValue->getOperand(0)); 9885ffd83dbSDimitry Andric if (!CI) 9895ffd83dbSDimitry Andric return false; 9905ffd83dbSDimitry Andric 9915ffd83dbSDimitry Andric if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(CI)) { 9925ffd83dbSDimitry Andric switch (Intrinsic->getIntrinsicID()) { 9935ffd83dbSDimitry Andric default: 9945ffd83dbSDimitry Andric return false; 9955ffd83dbSDimitry Andric case Intrinsic::amdgcn_if: 9965ffd83dbSDimitry Andric case Intrinsic::amdgcn_else: { 9975ffd83dbSDimitry Andric ArrayRef<unsigned> Indices = ExtValue->getIndices(); 9985ffd83dbSDimitry Andric return Indices.size() == 1 && Indices[0] == 1; 9995ffd83dbSDimitry Andric } 10005ffd83dbSDimitry Andric } 10015ffd83dbSDimitry Andric } 10025ffd83dbSDimitry Andric 10035ffd83dbSDimitry Andric // If we have inline asm returning mixed SGPR and VGPR results, we inferred 10045ffd83dbSDimitry Andric // divergent for the overall struct return. We need to override it in the 10055ffd83dbSDimitry Andric // case we're extracting an SGPR component here. 10065ffd83dbSDimitry Andric if (CI->isInlineAsm()) 10075ffd83dbSDimitry Andric return !isInlineAsmSourceOfDivergence(CI, ExtValue->getIndices()); 10085ffd83dbSDimitry Andric 10090b57cec5SDimitry Andric return false; 10100b57cec5SDimitry Andric } 10110b57cec5SDimitry Andric 10128bcb0991SDimitry Andric bool GCNTTIImpl::collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes, 10138bcb0991SDimitry Andric Intrinsic::ID IID) const { 10148bcb0991SDimitry Andric switch (IID) { 10158bcb0991SDimitry Andric case Intrinsic::amdgcn_atomic_inc: 10168bcb0991SDimitry Andric case Intrinsic::amdgcn_atomic_dec: 10178bcb0991SDimitry Andric case Intrinsic::amdgcn_ds_fadd: 10188bcb0991SDimitry Andric case Intrinsic::amdgcn_ds_fmin: 10198bcb0991SDimitry Andric case Intrinsic::amdgcn_ds_fmax: 10208bcb0991SDimitry Andric case Intrinsic::amdgcn_is_shared: 10218bcb0991SDimitry Andric case Intrinsic::amdgcn_is_private: 1022bdd1243dSDimitry Andric case Intrinsic::amdgcn_flat_atomic_fadd: 1023bdd1243dSDimitry Andric case Intrinsic::amdgcn_flat_atomic_fmax: 1024bdd1243dSDimitry Andric case Intrinsic::amdgcn_flat_atomic_fmin: 10258bcb0991SDimitry Andric OpIndexes.push_back(0); 10268bcb0991SDimitry Andric return true; 10278bcb0991SDimitry Andric default: 10288bcb0991SDimitry Andric return false; 10298bcb0991SDimitry Andric } 10308bcb0991SDimitry Andric } 10318bcb0991SDimitry Andric 10325ffd83dbSDimitry Andric Value *GCNTTIImpl::rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, 10335ffd83dbSDimitry Andric Value *OldV, 10345ffd83dbSDimitry Andric Value *NewV) const { 10358bcb0991SDimitry Andric auto IntrID = II->getIntrinsicID(); 10368bcb0991SDimitry Andric switch (IntrID) { 10378bcb0991SDimitry Andric case Intrinsic::amdgcn_atomic_inc: 10388bcb0991SDimitry Andric case Intrinsic::amdgcn_atomic_dec: 10398bcb0991SDimitry Andric case Intrinsic::amdgcn_ds_fadd: 10408bcb0991SDimitry Andric case Intrinsic::amdgcn_ds_fmin: 10418bcb0991SDimitry Andric case Intrinsic::amdgcn_ds_fmax: { 10428bcb0991SDimitry Andric const ConstantInt *IsVolatile = cast<ConstantInt>(II->getArgOperand(4)); 10438bcb0991SDimitry Andric if (!IsVolatile->isZero()) 10445ffd83dbSDimitry Andric return nullptr; 10458bcb0991SDimitry Andric Module *M = II->getParent()->getParent()->getParent(); 10468bcb0991SDimitry Andric Type *DestTy = II->getType(); 10478bcb0991SDimitry Andric Type *SrcTy = NewV->getType(); 10488bcb0991SDimitry Andric Function *NewDecl = 10498bcb0991SDimitry Andric Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy}); 10508bcb0991SDimitry Andric II->setArgOperand(0, NewV); 10518bcb0991SDimitry Andric II->setCalledFunction(NewDecl); 10525ffd83dbSDimitry Andric return II; 10538bcb0991SDimitry Andric } 10548bcb0991SDimitry Andric case Intrinsic::amdgcn_is_shared: 10558bcb0991SDimitry Andric case Intrinsic::amdgcn_is_private: { 10568bcb0991SDimitry Andric unsigned TrueAS = IntrID == Intrinsic::amdgcn_is_shared ? 10578bcb0991SDimitry Andric AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS; 10588bcb0991SDimitry Andric unsigned NewAS = NewV->getType()->getPointerAddressSpace(); 10598bcb0991SDimitry Andric LLVMContext &Ctx = NewV->getType()->getContext(); 10608bcb0991SDimitry Andric ConstantInt *NewVal = (TrueAS == NewAS) ? 10618bcb0991SDimitry Andric ConstantInt::getTrue(Ctx) : ConstantInt::getFalse(Ctx); 10625ffd83dbSDimitry Andric return NewVal; 10635ffd83dbSDimitry Andric } 10645ffd83dbSDimitry Andric case Intrinsic::ptrmask: { 10655ffd83dbSDimitry Andric unsigned OldAS = OldV->getType()->getPointerAddressSpace(); 10665ffd83dbSDimitry Andric unsigned NewAS = NewV->getType()->getPointerAddressSpace(); 10675ffd83dbSDimitry Andric Value *MaskOp = II->getArgOperand(1); 10685ffd83dbSDimitry Andric Type *MaskTy = MaskOp->getType(); 10695ffd83dbSDimitry Andric 10705ffd83dbSDimitry Andric bool DoTruncate = false; 1071e8d8bef9SDimitry Andric 1072e8d8bef9SDimitry Andric const GCNTargetMachine &TM = 1073e8d8bef9SDimitry Andric static_cast<const GCNTargetMachine &>(getTLI()->getTargetMachine()); 1074e8d8bef9SDimitry Andric if (!TM.isNoopAddrSpaceCast(OldAS, NewAS)) { 10755ffd83dbSDimitry Andric // All valid 64-bit to 32-bit casts work by chopping off the high 10765ffd83dbSDimitry Andric // bits. Any masking only clearing the low bits will also apply in the new 10775ffd83dbSDimitry Andric // address space. 10785ffd83dbSDimitry Andric if (DL.getPointerSizeInBits(OldAS) != 64 || 10795ffd83dbSDimitry Andric DL.getPointerSizeInBits(NewAS) != 32) 10805ffd83dbSDimitry Andric return nullptr; 10815ffd83dbSDimitry Andric 10825ffd83dbSDimitry Andric // TODO: Do we need to thread more context in here? 10835ffd83dbSDimitry Andric KnownBits Known = computeKnownBits(MaskOp, DL, 0, nullptr, II); 10845ffd83dbSDimitry Andric if (Known.countMinLeadingOnes() < 32) 10855ffd83dbSDimitry Andric return nullptr; 10865ffd83dbSDimitry Andric 10875ffd83dbSDimitry Andric DoTruncate = true; 10885ffd83dbSDimitry Andric } 10895ffd83dbSDimitry Andric 10905ffd83dbSDimitry Andric IRBuilder<> B(II); 10915ffd83dbSDimitry Andric if (DoTruncate) { 10925ffd83dbSDimitry Andric MaskTy = B.getInt32Ty(); 10935ffd83dbSDimitry Andric MaskOp = B.CreateTrunc(MaskOp, MaskTy); 10945ffd83dbSDimitry Andric } 10955ffd83dbSDimitry Andric 10965ffd83dbSDimitry Andric return B.CreateIntrinsic(Intrinsic::ptrmask, {NewV->getType(), MaskTy}, 10975ffd83dbSDimitry Andric {NewV, MaskOp}); 10988bcb0991SDimitry Andric } 1099bdd1243dSDimitry Andric case Intrinsic::amdgcn_flat_atomic_fadd: 1100bdd1243dSDimitry Andric case Intrinsic::amdgcn_flat_atomic_fmax: 1101bdd1243dSDimitry Andric case Intrinsic::amdgcn_flat_atomic_fmin: { 1102bdd1243dSDimitry Andric Module *M = II->getParent()->getParent()->getParent(); 1103bdd1243dSDimitry Andric Type *DestTy = II->getType(); 1104bdd1243dSDimitry Andric Type *SrcTy = NewV->getType(); 1105bdd1243dSDimitry Andric Function *NewDecl = Intrinsic::getDeclaration(M, II->getIntrinsicID(), 1106bdd1243dSDimitry Andric {DestTy, SrcTy, DestTy}); 1107bdd1243dSDimitry Andric II->setArgOperand(0, NewV); 1108bdd1243dSDimitry Andric II->setCalledFunction(NewDecl); 1109bdd1243dSDimitry Andric return II; 1110bdd1243dSDimitry Andric } 11118bcb0991SDimitry Andric default: 11125ffd83dbSDimitry Andric return nullptr; 11138bcb0991SDimitry Andric } 11148bcb0991SDimitry Andric } 11158bcb0991SDimitry Andric 1116fe6060f1SDimitry Andric InstructionCost GCNTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, 1117fe6060f1SDimitry Andric VectorType *VT, ArrayRef<int> Mask, 1118bdd1243dSDimitry Andric TTI::TargetCostKind CostKind, 111981ad6265SDimitry Andric int Index, VectorType *SubTp, 112081ad6265SDimitry Andric ArrayRef<const Value *> Args) { 1121fe6060f1SDimitry Andric Kind = improveShuffleKindFromMask(Kind, Mask); 11220b57cec5SDimitry Andric if (ST->hasVOP3PInsts()) { 11235ffd83dbSDimitry Andric if (cast<FixedVectorType>(VT)->getNumElements() == 2 && 11240b57cec5SDimitry Andric DL.getTypeSizeInBits(VT->getElementType()) == 16) { 11250b57cec5SDimitry Andric // With op_sel VOP3P instructions freely can access the low half or high 11260b57cec5SDimitry Andric // half of a register, so any swizzle is free. 11270b57cec5SDimitry Andric 11280b57cec5SDimitry Andric switch (Kind) { 11290b57cec5SDimitry Andric case TTI::SK_Broadcast: 11300b57cec5SDimitry Andric case TTI::SK_Reverse: 11310b57cec5SDimitry Andric case TTI::SK_PermuteSingleSrc: 11320b57cec5SDimitry Andric return 0; 11330b57cec5SDimitry Andric default: 11340b57cec5SDimitry Andric break; 11350b57cec5SDimitry Andric } 11360b57cec5SDimitry Andric } 11370b57cec5SDimitry Andric } 11380b57cec5SDimitry Andric 1139bdd1243dSDimitry Andric return BaseT::getShuffleCost(Kind, VT, Mask, CostKind, Index, SubTp); 11400b57cec5SDimitry Andric } 11410b57cec5SDimitry Andric 11420b57cec5SDimitry Andric bool GCNTTIImpl::areInlineCompatible(const Function *Caller, 11430b57cec5SDimitry Andric const Function *Callee) const { 11440b57cec5SDimitry Andric const TargetMachine &TM = getTLI()->getTargetMachine(); 1145480093f4SDimitry Andric const GCNSubtarget *CallerST 1146480093f4SDimitry Andric = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Caller)); 1147480093f4SDimitry Andric const GCNSubtarget *CalleeST 1148480093f4SDimitry Andric = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Callee)); 1149480093f4SDimitry Andric 1150480093f4SDimitry Andric const FeatureBitset &CallerBits = CallerST->getFeatureBits(); 1151480093f4SDimitry Andric const FeatureBitset &CalleeBits = CalleeST->getFeatureBits(); 11520b57cec5SDimitry Andric 11530b57cec5SDimitry Andric FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList; 11540b57cec5SDimitry Andric FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList; 11550b57cec5SDimitry Andric if ((RealCallerBits & RealCalleeBits) != RealCalleeBits) 11560b57cec5SDimitry Andric return false; 11570b57cec5SDimitry Andric 11580b57cec5SDimitry Andric // FIXME: dx10_clamp can just take the caller setting, but there seems to be 11590b57cec5SDimitry Andric // no way to support merge for backend defined attributes. 11605ffd83dbSDimitry Andric AMDGPU::SIModeRegisterDefaults CallerMode(*Caller); 11615ffd83dbSDimitry Andric AMDGPU::SIModeRegisterDefaults CalleeMode(*Callee); 1162e8d8bef9SDimitry Andric if (!CallerMode.isInlineCompatible(CalleeMode)) 1163e8d8bef9SDimitry Andric return false; 1164e8d8bef9SDimitry Andric 1165fe6060f1SDimitry Andric if (Callee->hasFnAttribute(Attribute::AlwaysInline) || 1166fe6060f1SDimitry Andric Callee->hasFnAttribute(Attribute::InlineHint)) 1167fe6060f1SDimitry Andric return true; 1168fe6060f1SDimitry Andric 1169e8d8bef9SDimitry Andric // Hack to make compile times reasonable. 1170fe6060f1SDimitry Andric if (InlineMaxBB) { 1171fe6060f1SDimitry Andric // Single BB does not increase total BB amount. 1172fe6060f1SDimitry Andric if (Callee->size() == 1) 1173fe6060f1SDimitry Andric return true; 1174e8d8bef9SDimitry Andric size_t BBSize = Caller->size() + Callee->size() - 1; 1175e8d8bef9SDimitry Andric return BBSize <= InlineMaxBB; 1176e8d8bef9SDimitry Andric } 1177e8d8bef9SDimitry Andric 1178e8d8bef9SDimitry Andric return true; 1179e8d8bef9SDimitry Andric } 1180e8d8bef9SDimitry Andric 1181e8d8bef9SDimitry Andric unsigned GCNTTIImpl::adjustInliningThreshold(const CallBase *CB) const { 1182e8d8bef9SDimitry Andric // If we have a pointer to private array passed into a function 1183e8d8bef9SDimitry Andric // it will not be optimized out, leaving scratch usage. 1184e8d8bef9SDimitry Andric // Increase the inline threshold to allow inlining in this case. 1185e8d8bef9SDimitry Andric uint64_t AllocaSize = 0; 1186e8d8bef9SDimitry Andric SmallPtrSet<const AllocaInst *, 8> AIVisited; 1187e8d8bef9SDimitry Andric for (Value *PtrArg : CB->args()) { 1188e8d8bef9SDimitry Andric PointerType *Ty = dyn_cast<PointerType>(PtrArg->getType()); 1189e8d8bef9SDimitry Andric if (!Ty || (Ty->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS && 1190e8d8bef9SDimitry Andric Ty->getAddressSpace() != AMDGPUAS::FLAT_ADDRESS)) 1191e8d8bef9SDimitry Andric continue; 1192e8d8bef9SDimitry Andric 1193e8d8bef9SDimitry Andric PtrArg = getUnderlyingObject(PtrArg); 1194e8d8bef9SDimitry Andric if (const AllocaInst *AI = dyn_cast<AllocaInst>(PtrArg)) { 1195e8d8bef9SDimitry Andric if (!AI->isStaticAlloca() || !AIVisited.insert(AI).second) 1196e8d8bef9SDimitry Andric continue; 1197e8d8bef9SDimitry Andric AllocaSize += DL.getTypeAllocSize(AI->getAllocatedType()); 1198e8d8bef9SDimitry Andric // If the amount of stack memory is excessive we will not be able 1199e8d8bef9SDimitry Andric // to get rid of the scratch anyway, bail out. 1200e8d8bef9SDimitry Andric if (AllocaSize > ArgAllocaCutoff) { 1201e8d8bef9SDimitry Andric AllocaSize = 0; 1202e8d8bef9SDimitry Andric break; 1203e8d8bef9SDimitry Andric } 1204e8d8bef9SDimitry Andric } 1205e8d8bef9SDimitry Andric } 1206e8d8bef9SDimitry Andric if (AllocaSize) 1207e8d8bef9SDimitry Andric return ArgAllocaCost; 1208e8d8bef9SDimitry Andric return 0; 12090b57cec5SDimitry Andric } 12100b57cec5SDimitry Andric 12110b57cec5SDimitry Andric void GCNTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 1212349cc55cSDimitry Andric TTI::UnrollingPreferences &UP, 1213349cc55cSDimitry Andric OptimizationRemarkEmitter *ORE) { 1214349cc55cSDimitry Andric CommonTTI.getUnrollingPreferences(L, SE, UP, ORE); 12150b57cec5SDimitry Andric } 12160b57cec5SDimitry Andric 12175ffd83dbSDimitry Andric void GCNTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 12185ffd83dbSDimitry Andric TTI::PeelingPreferences &PP) { 12195ffd83dbSDimitry Andric CommonTTI.getPeelingPreferences(L, SE, PP); 12208bcb0991SDimitry Andric } 12218bcb0991SDimitry Andric 1222e8d8bef9SDimitry Andric int GCNTTIImpl::get64BitInstrCost(TTI::TargetCostKind CostKind) const { 1223fe6060f1SDimitry Andric return ST->hasFullRate64Ops() 1224fe6060f1SDimitry Andric ? getFullRateInstrCost() 1225fe6060f1SDimitry Andric : ST->hasHalfRate64Ops() ? getHalfRateInstrCost(CostKind) 1226e8d8bef9SDimitry Andric : getQuarterRateInstrCost(CostKind); 1227e8d8bef9SDimitry Andric } 1228bdd1243dSDimitry Andric 1229bdd1243dSDimitry Andric std::pair<InstructionCost, MVT> 1230bdd1243dSDimitry Andric GCNTTIImpl::getTypeLegalizationCost(Type *Ty) const { 1231bdd1243dSDimitry Andric std::pair<InstructionCost, MVT> Cost = BaseT::getTypeLegalizationCost(Ty); 1232bdd1243dSDimitry Andric auto Size = DL.getTypeSizeInBits(Ty); 1233bdd1243dSDimitry Andric // Maximum load or store can handle 8 dwords for scalar and 4 for 1234bdd1243dSDimitry Andric // vector ALU. Let's assume anything above 8 dwords is expensive 1235bdd1243dSDimitry Andric // even if legal. 1236bdd1243dSDimitry Andric if (Size <= 256) 1237bdd1243dSDimitry Andric return Cost; 1238bdd1243dSDimitry Andric 1239bdd1243dSDimitry Andric Cost.first += (Size + 255) / 256; 1240bdd1243dSDimitry Andric return Cost; 1241bdd1243dSDimitry Andric } 1242