1 //===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file provides a helper that implements much of the TTI interface in 11 /// terms of the target-independent code generator and TargetLowering 12 /// interfaces. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #ifndef LLVM_CODEGEN_BASICTTIIMPL_H 17 #define LLVM_CODEGEN_BASICTTIIMPL_H 18 19 #include "llvm/ADT/APInt.h" 20 #include "llvm/ADT/ArrayRef.h" 21 #include "llvm/ADT/BitVector.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/Analysis/LoopInfo.h" 25 #include "llvm/Analysis/TargetTransformInfo.h" 26 #include "llvm/Analysis/TargetTransformInfoImpl.h" 27 #include "llvm/CodeGen/ISDOpcodes.h" 28 #include "llvm/CodeGen/TargetLowering.h" 29 #include "llvm/CodeGen/TargetSubtargetInfo.h" 30 #include "llvm/CodeGen/ValueTypes.h" 31 #include "llvm/IR/BasicBlock.h" 32 #include "llvm/IR/Constant.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/InstrTypes.h" 37 #include "llvm/IR/Instruction.h" 38 #include "llvm/IR/Instructions.h" 39 #include "llvm/IR/Intrinsics.h" 40 #include "llvm/IR/Operator.h" 41 #include "llvm/IR/Type.h" 42 #include "llvm/IR/Value.h" 43 #include "llvm/MC/MCSchedule.h" 44 #include "llvm/Support/Casting.h" 45 #include "llvm/Support/CommandLine.h" 46 #include "llvm/Support/ErrorHandling.h" 47 #include "llvm/Support/MachineValueType.h" 48 #include "llvm/Support/MathExtras.h" 49 #include <algorithm> 50 #include <cassert> 51 #include <cstdint> 52 #include <limits> 53 #include <utility> 54 55 namespace llvm { 56 57 class Function; 58 class GlobalValue; 59 class LLVMContext; 60 class ScalarEvolution; 61 class SCEV; 62 class TargetMachine; 63 64 extern cl::opt<unsigned> PartialUnrollingThreshold; 65 66 /// Base class which can be used to help build a TTI implementation. 67 /// 68 /// This class provides as much implementation of the TTI interface as is 69 /// possible using the target independent parts of the code generator. 70 /// 71 /// In order to subclass it, your class must implement a getST() method to 72 /// return the subtarget, and a getTLI() method to return the target lowering. 73 /// We need these methods implemented in the derived class so that this class 74 /// doesn't have to duplicate storage for them. 75 template <typename T> 76 class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> { 77 private: 78 using BaseT = TargetTransformInfoImplCRTPBase<T>; 79 using TTI = TargetTransformInfo; 80 81 /// Helper function to access this as a T. thisT()82 T *thisT() { return static_cast<T *>(this); } 83 84 /// Estimate a cost of Broadcast as an extract and sequence of insert 85 /// operations. getBroadcastShuffleOverhead(FixedVectorType * VTy)86 unsigned getBroadcastShuffleOverhead(FixedVectorType *VTy) { 87 unsigned Cost = 0; 88 // Broadcast cost is equal to the cost of extracting the zero'th element 89 // plus the cost of inserting it into every element of the result vector. 90 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, 0); 91 92 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) { 93 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, i); 94 } 95 return Cost; 96 } 97 98 /// Estimate a cost of shuffle as a sequence of extract and insert 99 /// operations. getPermuteShuffleOverhead(FixedVectorType * VTy)100 unsigned getPermuteShuffleOverhead(FixedVectorType *VTy) { 101 unsigned Cost = 0; 102 // Shuffle cost is equal to the cost of extracting element from its argument 103 // plus the cost of inserting them onto the result vector. 104 105 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from 106 // index 0 of first vector, index 1 of second vector,index 2 of first 107 // vector and finally index 3 of second vector and insert them at index 108 // <0,1,2,3> of result vector. 109 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) { 110 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, i); 111 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, i); 112 } 113 return Cost; 114 } 115 116 /// Estimate a cost of subvector extraction as a sequence of extract and 117 /// insert operations. getExtractSubvectorOverhead(FixedVectorType * VTy,int Index,FixedVectorType * SubVTy)118 unsigned getExtractSubvectorOverhead(FixedVectorType *VTy, int Index, 119 FixedVectorType *SubVTy) { 120 assert(VTy && SubVTy && 121 "Can only extract subvectors from vectors"); 122 int NumSubElts = SubVTy->getNumElements(); 123 assert((Index + NumSubElts) <= (int)VTy->getNumElements() && 124 "SK_ExtractSubvector index out of range"); 125 126 unsigned Cost = 0; 127 // Subvector extraction cost is equal to the cost of extracting element from 128 // the source type plus the cost of inserting them into the result vector 129 // type. 130 for (int i = 0; i != NumSubElts; ++i) { 131 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, 132 i + Index); 133 Cost += 134 thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy, i); 135 } 136 return Cost; 137 } 138 139 /// Estimate a cost of subvector insertion as a sequence of extract and 140 /// insert operations. getInsertSubvectorOverhead(FixedVectorType * VTy,int Index,FixedVectorType * SubVTy)141 unsigned getInsertSubvectorOverhead(FixedVectorType *VTy, int Index, 142 FixedVectorType *SubVTy) { 143 assert(VTy && SubVTy && 144 "Can only insert subvectors into vectors"); 145 int NumSubElts = SubVTy->getNumElements(); 146 assert((Index + NumSubElts) <= (int)VTy->getNumElements() && 147 "SK_InsertSubvector index out of range"); 148 149 unsigned Cost = 0; 150 // Subvector insertion cost is equal to the cost of extracting element from 151 // the source type plus the cost of inserting them into the result vector 152 // type. 153 for (int i = 0; i != NumSubElts; ++i) { 154 Cost += 155 thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy, i); 156 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, 157 i + Index); 158 } 159 return Cost; 160 } 161 162 /// Local query method delegates up to T which *must* implement this! getST()163 const TargetSubtargetInfo *getST() const { 164 return static_cast<const T *>(this)->getST(); 165 } 166 167 /// Local query method delegates up to T which *must* implement this! getTLI()168 const TargetLoweringBase *getTLI() const { 169 return static_cast<const T *>(this)->getTLI(); 170 } 171 getISDIndexedMode(TTI::MemIndexedMode M)172 static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) { 173 switch (M) { 174 case TTI::MIM_Unindexed: 175 return ISD::UNINDEXED; 176 case TTI::MIM_PreInc: 177 return ISD::PRE_INC; 178 case TTI::MIM_PreDec: 179 return ISD::PRE_DEC; 180 case TTI::MIM_PostInc: 181 return ISD::POST_INC; 182 case TTI::MIM_PostDec: 183 return ISD::POST_DEC; 184 } 185 llvm_unreachable("Unexpected MemIndexedMode"); 186 } 187 188 protected: BasicTTIImplBase(const TargetMachine * TM,const DataLayout & DL)189 explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL) 190 : BaseT(DL) {} 191 virtual ~BasicTTIImplBase() = default; 192 193 using TargetTransformInfoImplBase::DL; 194 195 public: 196 /// \name Scalar TTI Implementations 197 /// @{ allowsMisalignedMemoryAccesses(LLVMContext & Context,unsigned BitWidth,unsigned AddressSpace,unsigned Alignment,bool * Fast)198 bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, 199 unsigned AddressSpace, unsigned Alignment, 200 bool *Fast) const { 201 EVT E = EVT::getIntegerVT(Context, BitWidth); 202 return getTLI()->allowsMisalignedMemoryAccesses( 203 E, AddressSpace, Alignment, MachineMemOperand::MONone, Fast); 204 } 205 hasBranchDivergence()206 bool hasBranchDivergence() { return false; } 207 useGPUDivergenceAnalysis()208 bool useGPUDivergenceAnalysis() { return false; } 209 isSourceOfDivergence(const Value * V)210 bool isSourceOfDivergence(const Value *V) { return false; } 211 isAlwaysUniform(const Value * V)212 bool isAlwaysUniform(const Value *V) { return false; } 213 getFlatAddressSpace()214 unsigned getFlatAddressSpace() { 215 // Return an invalid address space. 216 return -1; 217 } 218 collectFlatAddressOperands(SmallVectorImpl<int> & OpIndexes,Intrinsic::ID IID)219 bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes, 220 Intrinsic::ID IID) const { 221 return false; 222 } 223 isNoopAddrSpaceCast(unsigned FromAS,unsigned ToAS)224 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const { 225 return getTLI()->isNoopAddrSpaceCast(FromAS, ToAS); 226 } 227 rewriteIntrinsicWithAddressSpace(IntrinsicInst * II,Value * OldV,Value * NewV)228 Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, 229 Value *NewV) const { 230 return nullptr; 231 } 232 isLegalAddImmediate(int64_t imm)233 bool isLegalAddImmediate(int64_t imm) { 234 return getTLI()->isLegalAddImmediate(imm); 235 } 236 isLegalICmpImmediate(int64_t imm)237 bool isLegalICmpImmediate(int64_t imm) { 238 return getTLI()->isLegalICmpImmediate(imm); 239 } 240 241 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, 242 bool HasBaseReg, int64_t Scale, 243 unsigned AddrSpace, Instruction *I = nullptr) { 244 TargetLoweringBase::AddrMode AM; 245 AM.BaseGV = BaseGV; 246 AM.BaseOffs = BaseOffset; 247 AM.HasBaseReg = HasBaseReg; 248 AM.Scale = Scale; 249 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I); 250 } 251 isIndexedLoadLegal(TTI::MemIndexedMode M,Type * Ty,const DataLayout & DL)252 bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty, 253 const DataLayout &DL) const { 254 EVT VT = getTLI()->getValueType(DL, Ty); 255 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT); 256 } 257 isIndexedStoreLegal(TTI::MemIndexedMode M,Type * Ty,const DataLayout & DL)258 bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty, 259 const DataLayout &DL) const { 260 EVT VT = getTLI()->getValueType(DL, Ty); 261 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT); 262 } 263 isLSRCostLess(TTI::LSRCost C1,TTI::LSRCost C2)264 bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2) { 265 return TargetTransformInfoImplBase::isLSRCostLess(C1, C2); 266 } 267 isProfitableLSRChainElement(Instruction * I)268 bool isProfitableLSRChainElement(Instruction *I) { 269 return TargetTransformInfoImplBase::isProfitableLSRChainElement(I); 270 } 271 getScalingFactorCost(Type * Ty,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg,int64_t Scale,unsigned AddrSpace)272 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, 273 bool HasBaseReg, int64_t Scale, unsigned AddrSpace) { 274 TargetLoweringBase::AddrMode AM; 275 AM.BaseGV = BaseGV; 276 AM.BaseOffs = BaseOffset; 277 AM.HasBaseReg = HasBaseReg; 278 AM.Scale = Scale; 279 return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace); 280 } 281 isTruncateFree(Type * Ty1,Type * Ty2)282 bool isTruncateFree(Type *Ty1, Type *Ty2) { 283 return getTLI()->isTruncateFree(Ty1, Ty2); 284 } 285 isProfitableToHoist(Instruction * I)286 bool isProfitableToHoist(Instruction *I) { 287 return getTLI()->isProfitableToHoist(I); 288 } 289 useAA()290 bool useAA() const { return getST()->useAA(); } 291 isTypeLegal(Type * Ty)292 bool isTypeLegal(Type *Ty) { 293 EVT VT = getTLI()->getValueType(DL, Ty); 294 return getTLI()->isTypeLegal(VT); 295 } 296 getGEPCost(Type * PointeeType,const Value * Ptr,ArrayRef<const Value * > Operands)297 int getGEPCost(Type *PointeeType, const Value *Ptr, 298 ArrayRef<const Value *> Operands) { 299 return BaseT::getGEPCost(PointeeType, Ptr, Operands); 300 } 301 getEstimatedNumberOfCaseClusters(const SwitchInst & SI,unsigned & JumpTableSize,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI)302 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, 303 unsigned &JumpTableSize, 304 ProfileSummaryInfo *PSI, 305 BlockFrequencyInfo *BFI) { 306 /// Try to find the estimated number of clusters. Note that the number of 307 /// clusters identified in this function could be different from the actual 308 /// numbers found in lowering. This function ignore switches that are 309 /// lowered with a mix of jump table / bit test / BTree. This function was 310 /// initially intended to be used when estimating the cost of switch in 311 /// inline cost heuristic, but it's a generic cost model to be used in other 312 /// places (e.g., in loop unrolling). 313 unsigned N = SI.getNumCases(); 314 const TargetLoweringBase *TLI = getTLI(); 315 const DataLayout &DL = this->getDataLayout(); 316 317 JumpTableSize = 0; 318 bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent()); 319 320 // Early exit if both a jump table and bit test are not allowed. 321 if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N)) 322 return N; 323 324 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue(); 325 APInt MinCaseVal = MaxCaseVal; 326 for (auto CI : SI.cases()) { 327 const APInt &CaseVal = CI.getCaseValue()->getValue(); 328 if (CaseVal.sgt(MaxCaseVal)) 329 MaxCaseVal = CaseVal; 330 if (CaseVal.slt(MinCaseVal)) 331 MinCaseVal = CaseVal; 332 } 333 334 // Check if suitable for a bit test 335 if (N <= DL.getIndexSizeInBits(0u)) { 336 SmallPtrSet<const BasicBlock *, 4> Dests; 337 for (auto I : SI.cases()) 338 Dests.insert(I.getCaseSuccessor()); 339 340 if (TLI->isSuitableForBitTests(Dests.size(), N, MinCaseVal, MaxCaseVal, 341 DL)) 342 return 1; 343 } 344 345 // Check if suitable for a jump table. 346 if (IsJTAllowed) { 347 if (N < 2 || N < TLI->getMinimumJumpTableEntries()) 348 return N; 349 uint64_t Range = 350 (MaxCaseVal - MinCaseVal) 351 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1; 352 // Check whether a range of clusters is dense enough for a jump table 353 if (TLI->isSuitableForJumpTable(&SI, N, Range, PSI, BFI)) { 354 JumpTableSize = Range; 355 return 1; 356 } 357 } 358 return N; 359 } 360 shouldBuildLookupTables()361 bool shouldBuildLookupTables() { 362 const TargetLoweringBase *TLI = getTLI(); 363 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) || 364 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other); 365 } 366 haveFastSqrt(Type * Ty)367 bool haveFastSqrt(Type *Ty) { 368 const TargetLoweringBase *TLI = getTLI(); 369 EVT VT = TLI->getValueType(DL, Ty); 370 return TLI->isTypeLegal(VT) && 371 TLI->isOperationLegalOrCustom(ISD::FSQRT, VT); 372 } 373 isFCmpOrdCheaperThanFCmpZero(Type * Ty)374 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) { 375 return true; 376 } 377 getFPOpCost(Type * Ty)378 unsigned getFPOpCost(Type *Ty) { 379 // Check whether FADD is available, as a proxy for floating-point in 380 // general. 381 const TargetLoweringBase *TLI = getTLI(); 382 EVT VT = TLI->getValueType(DL, Ty); 383 if (TLI->isOperationLegalOrCustomOrPromote(ISD::FADD, VT)) 384 return TargetTransformInfo::TCC_Basic; 385 return TargetTransformInfo::TCC_Expensive; 386 } 387 getInliningThresholdMultiplier()388 unsigned getInliningThresholdMultiplier() { return 1; } 389 getInlinerVectorBonusPercent()390 int getInlinerVectorBonusPercent() { return 150; } 391 getUnrollingPreferences(Loop * L,ScalarEvolution & SE,TTI::UnrollingPreferences & UP)392 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 393 TTI::UnrollingPreferences &UP) { 394 // This unrolling functionality is target independent, but to provide some 395 // motivation for its intended use, for x86: 396 397 // According to the Intel 64 and IA-32 Architectures Optimization Reference 398 // Manual, Intel Core models and later have a loop stream detector (and 399 // associated uop queue) that can benefit from partial unrolling. 400 // The relevant requirements are: 401 // - The loop must have no more than 4 (8 for Nehalem and later) branches 402 // taken, and none of them may be calls. 403 // - The loop can have no more than 18 (28 for Nehalem and later) uops. 404 405 // According to the Software Optimization Guide for AMD Family 15h 406 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor 407 // and loop buffer which can benefit from partial unrolling. 408 // The relevant requirements are: 409 // - The loop must have fewer than 16 branches 410 // - The loop must have less than 40 uops in all executed loop branches 411 412 // The number of taken branches in a loop is hard to estimate here, and 413 // benchmarking has revealed that it is better not to be conservative when 414 // estimating the branch count. As a result, we'll ignore the branch limits 415 // until someone finds a case where it matters in practice. 416 417 unsigned MaxOps; 418 const TargetSubtargetInfo *ST = getST(); 419 if (PartialUnrollingThreshold.getNumOccurrences() > 0) 420 MaxOps = PartialUnrollingThreshold; 421 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0) 422 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize; 423 else 424 return; 425 426 // Scan the loop: don't unroll loops with calls. 427 for (BasicBlock *BB : L->blocks()) { 428 for (Instruction &I : *BB) { 429 if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 430 if (const Function *F = cast<CallBase>(I).getCalledFunction()) { 431 if (!thisT()->isLoweredToCall(F)) 432 continue; 433 } 434 435 return; 436 } 437 } 438 } 439 440 // Enable runtime and partial unrolling up to the specified size. 441 // Enable using trip count upper bound to unroll loops. 442 UP.Partial = UP.Runtime = UP.UpperBound = true; 443 UP.PartialThreshold = MaxOps; 444 445 // Avoid unrolling when optimizing for size. 446 UP.OptSizeThreshold = 0; 447 UP.PartialOptSizeThreshold = 0; 448 449 // Set number of instructions optimized when "back edge" 450 // becomes "fall through" to default value of 2. 451 UP.BEInsns = 2; 452 } 453 getPeelingPreferences(Loop * L,ScalarEvolution & SE,TTI::PeelingPreferences & PP)454 void getPeelingPreferences(Loop *L, ScalarEvolution &SE, 455 TTI::PeelingPreferences &PP) { 456 PP.PeelCount = 0; 457 PP.AllowPeeling = true; 458 PP.AllowLoopNestsPeeling = false; 459 PP.PeelProfiledIterations = true; 460 } 461 isHardwareLoopProfitable(Loop * L,ScalarEvolution & SE,AssumptionCache & AC,TargetLibraryInfo * LibInfo,HardwareLoopInfo & HWLoopInfo)462 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, 463 AssumptionCache &AC, 464 TargetLibraryInfo *LibInfo, 465 HardwareLoopInfo &HWLoopInfo) { 466 return BaseT::isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo); 467 } 468 preferPredicateOverEpilogue(Loop * L,LoopInfo * LI,ScalarEvolution & SE,AssumptionCache & AC,TargetLibraryInfo * TLI,DominatorTree * DT,const LoopAccessInfo * LAI)469 bool preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE, 470 AssumptionCache &AC, TargetLibraryInfo *TLI, 471 DominatorTree *DT, 472 const LoopAccessInfo *LAI) { 473 return BaseT::preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LAI); 474 } 475 emitGetActiveLaneMask()476 bool emitGetActiveLaneMask() { 477 return BaseT::emitGetActiveLaneMask(); 478 } 479 getInstructionLatency(const Instruction * I)480 int getInstructionLatency(const Instruction *I) { 481 if (isa<LoadInst>(I)) 482 return getST()->getSchedModel().DefaultLoadLatency; 483 484 return BaseT::getInstructionLatency(I); 485 } 486 487 virtual Optional<unsigned> getCacheSize(TargetTransformInfo::CacheLevel Level)488 getCacheSize(TargetTransformInfo::CacheLevel Level) const { 489 return Optional<unsigned>( 490 getST()->getCacheSize(static_cast<unsigned>(Level))); 491 } 492 493 virtual Optional<unsigned> getCacheAssociativity(TargetTransformInfo::CacheLevel Level)494 getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const { 495 Optional<unsigned> TargetResult = 496 getST()->getCacheAssociativity(static_cast<unsigned>(Level)); 497 498 if (TargetResult) 499 return TargetResult; 500 501 return BaseT::getCacheAssociativity(Level); 502 } 503 getCacheLineSize()504 virtual unsigned getCacheLineSize() const { 505 return getST()->getCacheLineSize(); 506 } 507 getPrefetchDistance()508 virtual unsigned getPrefetchDistance() const { 509 return getST()->getPrefetchDistance(); 510 } 511 getMinPrefetchStride(unsigned NumMemAccesses,unsigned NumStridedMemAccesses,unsigned NumPrefetches,bool HasCall)512 virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, 513 unsigned NumStridedMemAccesses, 514 unsigned NumPrefetches, 515 bool HasCall) const { 516 return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses, 517 NumPrefetches, HasCall); 518 } 519 getMaxPrefetchIterationsAhead()520 virtual unsigned getMaxPrefetchIterationsAhead() const { 521 return getST()->getMaxPrefetchIterationsAhead(); 522 } 523 enableWritePrefetching()524 virtual bool enableWritePrefetching() const { 525 return getST()->enableWritePrefetching(); 526 } 527 528 /// @} 529 530 /// \name Vector TTI Implementations 531 /// @{ 532 getRegisterBitWidth(bool Vector)533 unsigned getRegisterBitWidth(bool Vector) const { return 32; } 534 535 /// Estimate the overhead of scalarizing an instruction. Insert and Extract 536 /// are set if the demanded result elements need to be inserted and/or 537 /// extracted from vectors. getScalarizationOverhead(VectorType * InTy,const APInt & DemandedElts,bool Insert,bool Extract)538 unsigned getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, 539 bool Insert, bool Extract) { 540 /// FIXME: a bitfield is not a reasonable abstraction for talking about 541 /// which elements are needed from a scalable vector 542 auto *Ty = cast<FixedVectorType>(InTy); 543 544 assert(DemandedElts.getBitWidth() == Ty->getNumElements() && 545 "Vector size mismatch"); 546 547 unsigned Cost = 0; 548 549 for (int i = 0, e = Ty->getNumElements(); i < e; ++i) { 550 if (!DemandedElts[i]) 551 continue; 552 if (Insert) 553 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty, i); 554 if (Extract) 555 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty, i); 556 } 557 558 return Cost; 559 } 560 561 /// Helper wrapper for the DemandedElts variant of getScalarizationOverhead. getScalarizationOverhead(VectorType * InTy,bool Insert,bool Extract)562 unsigned getScalarizationOverhead(VectorType *InTy, bool Insert, 563 bool Extract) { 564 auto *Ty = cast<FixedVectorType>(InTy); 565 566 APInt DemandedElts = APInt::getAllOnesValue(Ty->getNumElements()); 567 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract); 568 } 569 570 /// Estimate the overhead of scalarizing an instructions unique 571 /// non-constant operands. The types of the arguments are ordinarily 572 /// scalar, in which case the costs are multiplied with VF. getOperandsScalarizationOverhead(ArrayRef<const Value * > Args,unsigned VF)573 unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args, 574 unsigned VF) { 575 unsigned Cost = 0; 576 SmallPtrSet<const Value*, 4> UniqueOperands; 577 for (const Value *A : Args) { 578 if (!isa<Constant>(A) && UniqueOperands.insert(A).second) { 579 auto *VecTy = dyn_cast<VectorType>(A->getType()); 580 if (VecTy) { 581 // If A is a vector operand, VF should be 1 or correspond to A. 582 assert((VF == 1 || 583 VF == cast<FixedVectorType>(VecTy)->getNumElements()) && 584 "Vector argument does not match VF"); 585 } 586 else 587 VecTy = FixedVectorType::get(A->getType(), VF); 588 589 Cost += getScalarizationOverhead(VecTy, false, true); 590 } 591 } 592 593 return Cost; 594 } 595 getScalarizationOverhead(VectorType * InTy,ArrayRef<const Value * > Args)596 unsigned getScalarizationOverhead(VectorType *InTy, 597 ArrayRef<const Value *> Args) { 598 auto *Ty = cast<FixedVectorType>(InTy); 599 600 unsigned Cost = 0; 601 602 Cost += getScalarizationOverhead(Ty, true, false); 603 if (!Args.empty()) 604 Cost += getOperandsScalarizationOverhead(Args, Ty->getNumElements()); 605 else 606 // When no information on arguments is provided, we add the cost 607 // associated with one argument as a heuristic. 608 Cost += getScalarizationOverhead(Ty, false, true); 609 610 return Cost; 611 } 612 getMaxInterleaveFactor(unsigned VF)613 unsigned getMaxInterleaveFactor(unsigned VF) { return 1; } 614 615 unsigned getArithmeticInstrCost( 616 unsigned Opcode, Type *Ty, 617 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, 618 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue, 619 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue, 620 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None, 621 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None, 622 ArrayRef<const Value *> Args = ArrayRef<const Value *>(), 623 const Instruction *CxtI = nullptr) { 624 // Check if any of the operands are vector operands. 625 const TargetLoweringBase *TLI = getTLI(); 626 int ISD = TLI->InstructionOpcodeToISD(Opcode); 627 assert(ISD && "Invalid opcode"); 628 629 // TODO: Handle more cost kinds. 630 if (CostKind != TTI::TCK_RecipThroughput) 631 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, 632 Opd1Info, Opd2Info, 633 Opd1PropInfo, Opd2PropInfo, 634 Args, CxtI); 635 636 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 637 638 bool IsFloat = Ty->isFPOrFPVectorTy(); 639 // Assume that floating point arithmetic operations cost twice as much as 640 // integer operations. 641 unsigned OpCost = (IsFloat ? 2 : 1); 642 643 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) { 644 // The operation is legal. Assume it costs 1. 645 // TODO: Once we have extract/insert subvector cost we need to use them. 646 return LT.first * OpCost; 647 } 648 649 if (!TLI->isOperationExpand(ISD, LT.second)) { 650 // If the operation is custom lowered, then assume that the code is twice 651 // as expensive. 652 return LT.first * 2 * OpCost; 653 } 654 655 // Else, assume that we need to scalarize this op. 656 // TODO: If one of the types get legalized by splitting, handle this 657 // similarly to what getCastInstrCost() does. 658 if (auto *VTy = dyn_cast<VectorType>(Ty)) { 659 unsigned Num = cast<FixedVectorType>(VTy)->getNumElements(); 660 unsigned Cost = thisT()->getArithmeticInstrCost( 661 Opcode, VTy->getScalarType(), CostKind); 662 // Return the cost of multiple scalar invocation plus the cost of 663 // inserting and extracting the values. 664 return getScalarizationOverhead(VTy, Args) + Num * Cost; 665 } 666 667 // We don't know anything about this scalar instruction. 668 return OpCost; 669 } 670 getShuffleCost(TTI::ShuffleKind Kind,VectorType * Tp,int Index,VectorType * SubTp)671 unsigned getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, int Index, 672 VectorType *SubTp) { 673 674 switch (Kind) { 675 case TTI::SK_Broadcast: 676 return getBroadcastShuffleOverhead(cast<FixedVectorType>(Tp)); 677 case TTI::SK_Select: 678 case TTI::SK_Reverse: 679 case TTI::SK_Transpose: 680 case TTI::SK_PermuteSingleSrc: 681 case TTI::SK_PermuteTwoSrc: 682 return getPermuteShuffleOverhead(cast<FixedVectorType>(Tp)); 683 case TTI::SK_ExtractSubvector: 684 return getExtractSubvectorOverhead(cast<FixedVectorType>(Tp), Index, 685 cast<FixedVectorType>(SubTp)); 686 case TTI::SK_InsertSubvector: 687 return getInsertSubvectorOverhead(cast<FixedVectorType>(Tp), Index, 688 cast<FixedVectorType>(SubTp)); 689 } 690 llvm_unreachable("Unknown TTI::ShuffleKind"); 691 } 692 693 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 694 TTI::TargetCostKind CostKind, 695 const Instruction *I = nullptr) { 696 if (BaseT::getCastInstrCost(Opcode, Dst, Src, CostKind, I) == 0) 697 return 0; 698 699 const TargetLoweringBase *TLI = getTLI(); 700 int ISD = TLI->InstructionOpcodeToISD(Opcode); 701 assert(ISD && "Invalid opcode"); 702 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, Src); 703 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(DL, Dst); 704 705 TypeSize SrcSize = SrcLT.second.getSizeInBits(); 706 TypeSize DstSize = DstLT.second.getSizeInBits(); 707 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy(); 708 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy(); 709 710 switch (Opcode) { 711 default: 712 break; 713 case Instruction::Trunc: 714 // Check for NOOP conversions. 715 if (TLI->isTruncateFree(SrcLT.second, DstLT.second)) 716 return 0; 717 LLVM_FALLTHROUGH; 718 case Instruction::BitCast: 719 // Bitcast between types that are legalized to the same type are free and 720 // assume int to/from ptr of the same size is also free. 721 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst && 722 SrcSize == DstSize) 723 return 0; 724 break; 725 case Instruction::FPExt: 726 if (I && getTLI()->isExtFree(I)) 727 return 0; 728 break; 729 case Instruction::ZExt: 730 if (TLI->isZExtFree(SrcLT.second, DstLT.second)) 731 return 0; 732 LLVM_FALLTHROUGH; 733 case Instruction::SExt: 734 if (!I) 735 break; 736 737 if (getTLI()->isExtFree(I)) 738 return 0; 739 740 // If this is a zext/sext of a load, return 0 if the corresponding 741 // extending load exists on target. 742 if (I && isa<LoadInst>(I->getOperand(0))) { 743 EVT ExtVT = EVT::getEVT(Dst); 744 EVT LoadVT = EVT::getEVT(Src); 745 unsigned LType = 746 ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD); 747 if (TLI->isLoadExtLegal(LType, ExtVT, LoadVT)) 748 return 0; 749 } 750 break; 751 case Instruction::AddrSpaceCast: 752 if (TLI->isFreeAddrSpaceCast(Src->getPointerAddressSpace(), 753 Dst->getPointerAddressSpace())) 754 return 0; 755 break; 756 } 757 758 auto *SrcVTy = dyn_cast<VectorType>(Src); 759 auto *DstVTy = dyn_cast<VectorType>(Dst); 760 761 // If the cast is marked as legal (or promote) then assume low cost. 762 if (SrcLT.first == DstLT.first && 763 TLI->isOperationLegalOrPromote(ISD, DstLT.second)) 764 return SrcLT.first; 765 766 // Handle scalar conversions. 767 if (!SrcVTy && !DstVTy) { 768 // Just check the op cost. If the operation is legal then assume it costs 769 // 1. 770 if (!TLI->isOperationExpand(ISD, DstLT.second)) 771 return 1; 772 773 // Assume that illegal scalar instruction are expensive. 774 return 4; 775 } 776 777 // Check vector-to-vector casts. 778 if (DstVTy && SrcVTy) { 779 // If the cast is between same-sized registers, then the check is simple. 780 if (SrcLT.first == DstLT.first && SrcSize == DstSize) { 781 782 // Assume that Zext is done using AND. 783 if (Opcode == Instruction::ZExt) 784 return SrcLT.first; 785 786 // Assume that sext is done using SHL and SRA. 787 if (Opcode == Instruction::SExt) 788 return SrcLT.first * 2; 789 790 // Just check the op cost. If the operation is legal then assume it 791 // costs 792 // 1 and multiply by the type-legalization overhead. 793 if (!TLI->isOperationExpand(ISD, DstLT.second)) 794 return SrcLT.first * 1; 795 } 796 797 // If we are legalizing by splitting, query the concrete TTI for the cost 798 // of casting the original vector twice. We also need to factor in the 799 // cost of the split itself. Count that as 1, to be consistent with 800 // TLI->getTypeLegalizationCost(). 801 bool SplitSrc = 802 TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) == 803 TargetLowering::TypeSplitVector; 804 bool SplitDst = 805 TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) == 806 TargetLowering::TypeSplitVector; 807 if ((SplitSrc || SplitDst) && 808 cast<FixedVectorType>(SrcVTy)->getNumElements() > 1 && 809 cast<FixedVectorType>(DstVTy)->getNumElements() > 1) { 810 Type *SplitDstTy = VectorType::getHalfElementsVectorType(DstVTy); 811 Type *SplitSrcTy = VectorType::getHalfElementsVectorType(SrcVTy); 812 T *TTI = static_cast<T *>(this); 813 // If both types need to be split then the split is free. 814 unsigned SplitCost = 815 (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0; 816 return SplitCost + 817 (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, 818 CostKind, I)); 819 } 820 821 // In other cases where the source or destination are illegal, assume 822 // the operation will get scalarized. 823 unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements(); 824 unsigned Cost = thisT()->getCastInstrCost( 825 Opcode, Dst->getScalarType(), Src->getScalarType(), CostKind, I); 826 827 // Return the cost of multiple scalar invocation plus the cost of 828 // inserting and extracting the values. 829 return getScalarizationOverhead(DstVTy, true, true) + Num * Cost; 830 } 831 832 // We already handled vector-to-vector and scalar-to-scalar conversions. 833 // This 834 // is where we handle bitcast between vectors and scalars. We need to assume 835 // that the conversion is scalarized in one way or another. 836 if (Opcode == Instruction::BitCast) { 837 // Illegal bitcasts are done by storing and loading from a stack slot. 838 return (SrcVTy ? getScalarizationOverhead(SrcVTy, false, true) : 0) + 839 (DstVTy ? getScalarizationOverhead(DstVTy, true, false) : 0); 840 } 841 842 llvm_unreachable("Unhandled cast"); 843 } 844 getExtractWithExtendCost(unsigned Opcode,Type * Dst,VectorType * VecTy,unsigned Index)845 unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst, 846 VectorType *VecTy, unsigned Index) { 847 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy, 848 Index) + 849 thisT()->getCastInstrCost(Opcode, Dst, VecTy->getElementType(), 850 TTI::TCK_RecipThroughput); 851 } 852 getCFInstrCost(unsigned Opcode,TTI::TargetCostKind CostKind)853 unsigned getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) { 854 return BaseT::getCFInstrCost(Opcode, CostKind); 855 } 856 857 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 858 TTI::TargetCostKind CostKind, 859 const Instruction *I = nullptr) { 860 const TargetLoweringBase *TLI = getTLI(); 861 int ISD = TLI->InstructionOpcodeToISD(Opcode); 862 assert(ISD && "Invalid opcode"); 863 864 // TODO: Handle other cost kinds. 865 if (CostKind != TTI::TCK_RecipThroughput) 866 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind, I); 867 868 // Selects on vectors are actually vector selects. 869 if (ISD == ISD::SELECT) { 870 assert(CondTy && "CondTy must exist"); 871 if (CondTy->isVectorTy()) 872 ISD = ISD::VSELECT; 873 } 874 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 875 876 if (!(ValTy->isVectorTy() && !LT.second.isVector()) && 877 !TLI->isOperationExpand(ISD, LT.second)) { 878 // The operation is legal. Assume it costs 1. Multiply 879 // by the type-legalization overhead. 880 return LT.first * 1; 881 } 882 883 // Otherwise, assume that the cast is scalarized. 884 // TODO: If one of the types get legalized by splitting, handle this 885 // similarly to what getCastInstrCost() does. 886 if (auto *ValVTy = dyn_cast<VectorType>(ValTy)) { 887 unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements(); 888 if (CondTy) 889 CondTy = CondTy->getScalarType(); 890 unsigned Cost = thisT()->getCmpSelInstrCost( 891 Opcode, ValVTy->getScalarType(), CondTy, CostKind, I); 892 893 // Return the cost of multiple scalar invocation plus the cost of 894 // inserting and extracting the values. 895 return getScalarizationOverhead(ValVTy, true, false) + Num * Cost; 896 } 897 898 // Unknown scalar opcode. 899 return 1; 900 } 901 getVectorInstrCost(unsigned Opcode,Type * Val,unsigned Index)902 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 903 std::pair<unsigned, MVT> LT = 904 getTLI()->getTypeLegalizationCost(DL, Val->getScalarType()); 905 906 return LT.first; 907 } 908 909 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, 910 unsigned AddressSpace, 911 TTI::TargetCostKind CostKind, 912 const Instruction *I = nullptr) { 913 assert(!Src->isVoidTy() && "Invalid type"); 914 // Assume types, such as structs, are expensive. 915 if (getTLI()->getValueType(DL, Src, true) == MVT::Other) 916 return 4; 917 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Src); 918 919 // Assuming that all loads of legal types cost 1. 920 unsigned Cost = LT.first; 921 if (CostKind != TTI::TCK_RecipThroughput) 922 return Cost; 923 924 if (Src->isVectorTy() && 925 Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) { 926 // This is a vector load that legalizes to a larger type than the vector 927 // itself. Unless the corresponding extending load or truncating store is 928 // legal, then this will scalarize. 929 TargetLowering::LegalizeAction LA = TargetLowering::Expand; 930 EVT MemVT = getTLI()->getValueType(DL, Src); 931 if (Opcode == Instruction::Store) 932 LA = getTLI()->getTruncStoreAction(LT.second, MemVT); 933 else 934 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT); 935 936 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) { 937 // This is a vector load/store for some illegal type that is scalarized. 938 // We must account for the cost of building or decomposing the vector. 939 Cost += getScalarizationOverhead(cast<VectorType>(Src), 940 Opcode != Instruction::Store, 941 Opcode == Instruction::Store); 942 } 943 } 944 945 return Cost; 946 } 947 948 unsigned getInterleavedMemoryOpCost( 949 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 950 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 951 bool UseMaskForCond = false, bool UseMaskForGaps = false) { 952 auto *VT = cast<FixedVectorType>(VecTy); 953 954 unsigned NumElts = VT->getNumElements(); 955 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor"); 956 957 unsigned NumSubElts = NumElts / Factor; 958 auto *SubVT = FixedVectorType::get(VT->getElementType(), NumSubElts); 959 960 // Firstly, the cost of load/store operation. 961 unsigned Cost; 962 if (UseMaskForCond || UseMaskForGaps) 963 Cost = thisT()->getMaskedMemoryOpCost(Opcode, VecTy, Alignment, 964 AddressSpace, CostKind); 965 else 966 Cost = thisT()->getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace, 967 CostKind); 968 969 // Legalize the vector type, and get the legalized and unlegalized type 970 // sizes. 971 MVT VecTyLT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 972 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy); 973 unsigned VecTyLTSize = VecTyLT.getStoreSize(); 974 975 // Return the ceiling of dividing A by B. 976 auto ceil = [](unsigned A, unsigned B) { return (A + B - 1) / B; }; 977 978 // Scale the cost of the memory operation by the fraction of legalized 979 // instructions that will actually be used. We shouldn't account for the 980 // cost of dead instructions since they will be removed. 981 // 982 // E.g., An interleaved load of factor 8: 983 // %vec = load <16 x i64>, <16 x i64>* %ptr 984 // %v0 = shufflevector %vec, undef, <0, 8> 985 // 986 // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be 987 // used (those corresponding to elements [0:1] and [8:9] of the unlegalized 988 // type). The other loads are unused. 989 // 990 // We only scale the cost of loads since interleaved store groups aren't 991 // allowed to have gaps. 992 if (Opcode == Instruction::Load && VecTySize > VecTyLTSize) { 993 // The number of loads of a legal type it will take to represent a load 994 // of the unlegalized vector type. 995 unsigned NumLegalInsts = ceil(VecTySize, VecTyLTSize); 996 997 // The number of elements of the unlegalized type that correspond to a 998 // single legal instruction. 999 unsigned NumEltsPerLegalInst = ceil(NumElts, NumLegalInsts); 1000 1001 // Determine which legal instructions will be used. 1002 BitVector UsedInsts(NumLegalInsts, false); 1003 for (unsigned Index : Indices) 1004 for (unsigned Elt = 0; Elt < NumSubElts; ++Elt) 1005 UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst); 1006 1007 // Scale the cost of the load by the fraction of legal instructions that 1008 // will be used. 1009 Cost *= UsedInsts.count() / NumLegalInsts; 1010 } 1011 1012 // Then plus the cost of interleave operation. 1013 if (Opcode == Instruction::Load) { 1014 // The interleave cost is similar to extract sub vectors' elements 1015 // from the wide vector, and insert them into sub vectors. 1016 // 1017 // E.g. An interleaved load of factor 2 (with one member of index 0): 1018 // %vec = load <8 x i32>, <8 x i32>* %ptr 1019 // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0 1020 // The cost is estimated as extract elements at 0, 2, 4, 6 from the 1021 // <8 x i32> vector and insert them into a <4 x i32> vector. 1022 1023 assert(Indices.size() <= Factor && 1024 "Interleaved memory op has too many members"); 1025 1026 for (unsigned Index : Indices) { 1027 assert(Index < Factor && "Invalid index for interleaved memory op"); 1028 1029 // Extract elements from loaded vector for each sub vector. 1030 for (unsigned i = 0; i < NumSubElts; i++) 1031 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VT, 1032 Index + i * Factor); 1033 } 1034 1035 unsigned InsSubCost = 0; 1036 for (unsigned i = 0; i < NumSubElts; i++) 1037 InsSubCost += 1038 thisT()->getVectorInstrCost(Instruction::InsertElement, SubVT, i); 1039 1040 Cost += Indices.size() * InsSubCost; 1041 } else { 1042 // The interleave cost is extract all elements from sub vectors, and 1043 // insert them into the wide vector. 1044 // 1045 // E.g. An interleaved store of factor 2: 1046 // %v0_v1 = shuffle %v0, %v1, <0, 4, 1, 5, 2, 6, 3, 7> 1047 // store <8 x i32> %interleaved.vec, <8 x i32>* %ptr 1048 // The cost is estimated as extract all elements from both <4 x i32> 1049 // vectors and insert into the <8 x i32> vector. 1050 1051 unsigned ExtSubCost = 0; 1052 for (unsigned i = 0; i < NumSubElts; i++) 1053 ExtSubCost += 1054 thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVT, i); 1055 Cost += ExtSubCost * Factor; 1056 1057 for (unsigned i = 0; i < NumElts; i++) 1058 Cost += static_cast<T *>(this) 1059 ->getVectorInstrCost(Instruction::InsertElement, VT, i); 1060 } 1061 1062 if (!UseMaskForCond) 1063 return Cost; 1064 1065 Type *I8Type = Type::getInt8Ty(VT->getContext()); 1066 auto *MaskVT = FixedVectorType::get(I8Type, NumElts); 1067 SubVT = FixedVectorType::get(I8Type, NumSubElts); 1068 1069 // The Mask shuffling cost is extract all the elements of the Mask 1070 // and insert each of them Factor times into the wide vector: 1071 // 1072 // E.g. an interleaved group with factor 3: 1073 // %mask = icmp ult <8 x i32> %vec1, %vec2 1074 // %interleaved.mask = shufflevector <8 x i1> %mask, <8 x i1> undef, 1075 // <24 x i32> <0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7> 1076 // The cost is estimated as extract all mask elements from the <8xi1> mask 1077 // vector and insert them factor times into the <24xi1> shuffled mask 1078 // vector. 1079 for (unsigned i = 0; i < NumSubElts; i++) 1080 Cost += 1081 thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVT, i); 1082 1083 for (unsigned i = 0; i < NumElts; i++) 1084 Cost += 1085 thisT()->getVectorInstrCost(Instruction::InsertElement, MaskVT, i); 1086 1087 // The Gaps mask is invariant and created outside the loop, therefore the 1088 // cost of creating it is not accounted for here. However if we have both 1089 // a MaskForGaps and some other mask that guards the execution of the 1090 // memory access, we need to account for the cost of And-ing the two masks 1091 // inside the loop. 1092 if (UseMaskForGaps) 1093 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT, 1094 CostKind); 1095 1096 return Cost; 1097 } 1098 1099 /// Get intrinsic cost based on arguments. getIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind)1100 unsigned getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 1101 TTI::TargetCostKind CostKind) { 1102 Intrinsic::ID IID = ICA.getID(); 1103 1104 // Special case some scalar intrinsics. 1105 if (CostKind != TTI::TCK_RecipThroughput) { 1106 switch (IID) { 1107 default: 1108 break; 1109 case Intrinsic::cttz: 1110 if (getTLI()->isCheapToSpeculateCttz()) 1111 return TargetTransformInfo::TCC_Basic; 1112 break; 1113 case Intrinsic::ctlz: 1114 if (getTLI()->isCheapToSpeculateCtlz()) 1115 return TargetTransformInfo::TCC_Basic; 1116 break; 1117 case Intrinsic::memcpy: 1118 return thisT()->getMemcpyCost(ICA.getInst()); 1119 // TODO: other libc intrinsics. 1120 } 1121 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 1122 } 1123 1124 if (BaseT::getIntrinsicInstrCost(ICA, CostKind) == 0) 1125 return 0; 1126 1127 // TODO: Combine these two logic paths. 1128 if (ICA.isTypeBasedOnly()) 1129 return getTypeBasedIntrinsicInstrCost(ICA, CostKind); 1130 1131 Type *RetTy = ICA.getReturnType(); 1132 unsigned VF = ICA.getVectorFactor(); 1133 unsigned RetVF = 1134 (RetTy->isVectorTy() ? cast<FixedVectorType>(RetTy)->getNumElements() 1135 : 1); 1136 assert((RetVF == 1 || VF == 1) && "VF > 1 and RetVF is a vector type"); 1137 const IntrinsicInst *I = ICA.getInst(); 1138 const SmallVectorImpl<const Value *> &Args = ICA.getArgs(); 1139 FastMathFlags FMF = ICA.getFlags(); 1140 1141 switch (IID) { 1142 default: { 1143 // Assume that we need to scalarize this intrinsic. 1144 SmallVector<Type *, 4> Types; 1145 for (const Value *Op : Args) { 1146 Type *OpTy = Op->getType(); 1147 assert(VF == 1 || !OpTy->isVectorTy()); 1148 Types.push_back(VF == 1 ? OpTy : FixedVectorType::get(OpTy, VF)); 1149 } 1150 1151 if (VF > 1 && !RetTy->isVoidTy()) 1152 RetTy = FixedVectorType::get(RetTy, VF); 1153 1154 // Compute the scalarization overhead based on Args for a vector 1155 // intrinsic. A vectorizer will pass a scalar RetTy and VF > 1, while 1156 // CostModel will pass a vector RetTy and VF is 1. 1157 unsigned ScalarizationCost = std::numeric_limits<unsigned>::max(); 1158 if (RetVF > 1 || VF > 1) { 1159 ScalarizationCost = 0; 1160 if (!RetTy->isVoidTy()) 1161 ScalarizationCost += 1162 getScalarizationOverhead(cast<VectorType>(RetTy), true, false); 1163 ScalarizationCost += getOperandsScalarizationOverhead(Args, VF); 1164 } 1165 1166 IntrinsicCostAttributes Attrs(IID, RetTy, Types, FMF, 1167 ScalarizationCost, I); 1168 return thisT()->getIntrinsicInstrCost(Attrs, CostKind); 1169 } 1170 case Intrinsic::masked_scatter: { 1171 assert(VF == 1 && "Can't vectorize types here."); 1172 const Value *Mask = Args[3]; 1173 bool VarMask = !isa<Constant>(Mask); 1174 Align Alignment = cast<ConstantInt>(Args[2])->getAlignValue(); 1175 return thisT()->getGatherScatterOpCost(Instruction::Store, 1176 Args[0]->getType(), Args[1], 1177 VarMask, Alignment, CostKind, I); 1178 } 1179 case Intrinsic::masked_gather: { 1180 assert(VF == 1 && "Can't vectorize types here."); 1181 const Value *Mask = Args[2]; 1182 bool VarMask = !isa<Constant>(Mask); 1183 Align Alignment = cast<ConstantInt>(Args[1])->getAlignValue(); 1184 return thisT()->getGatherScatterOpCost(Instruction::Load, RetTy, Args[0], 1185 VarMask, Alignment, CostKind, I); 1186 } 1187 case Intrinsic::experimental_vector_reduce_add: 1188 case Intrinsic::experimental_vector_reduce_mul: 1189 case Intrinsic::experimental_vector_reduce_and: 1190 case Intrinsic::experimental_vector_reduce_or: 1191 case Intrinsic::experimental_vector_reduce_xor: 1192 case Intrinsic::experimental_vector_reduce_v2_fadd: 1193 case Intrinsic::experimental_vector_reduce_v2_fmul: 1194 case Intrinsic::experimental_vector_reduce_smax: 1195 case Intrinsic::experimental_vector_reduce_smin: 1196 case Intrinsic::experimental_vector_reduce_fmax: 1197 case Intrinsic::experimental_vector_reduce_fmin: 1198 case Intrinsic::experimental_vector_reduce_umax: 1199 case Intrinsic::experimental_vector_reduce_umin: { 1200 IntrinsicCostAttributes Attrs(IID, RetTy, Args[0]->getType(), FMF, 1, I); 1201 return getIntrinsicInstrCost(Attrs, CostKind); 1202 } 1203 case Intrinsic::fshl: 1204 case Intrinsic::fshr: { 1205 const Value *X = Args[0]; 1206 const Value *Y = Args[1]; 1207 const Value *Z = Args[2]; 1208 TTI::OperandValueProperties OpPropsX, OpPropsY, OpPropsZ, OpPropsBW; 1209 TTI::OperandValueKind OpKindX = TTI::getOperandInfo(X, OpPropsX); 1210 TTI::OperandValueKind OpKindY = TTI::getOperandInfo(Y, OpPropsY); 1211 TTI::OperandValueKind OpKindZ = TTI::getOperandInfo(Z, OpPropsZ); 1212 TTI::OperandValueKind OpKindBW = TTI::OK_UniformConstantValue; 1213 OpPropsBW = isPowerOf2_32(RetTy->getScalarSizeInBits()) ? TTI::OP_PowerOf2 1214 : TTI::OP_None; 1215 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 1216 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 1217 unsigned Cost = 0; 1218 Cost += 1219 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind); 1220 Cost += 1221 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind); 1222 Cost += thisT()->getArithmeticInstrCost( 1223 BinaryOperator::Shl, RetTy, CostKind, OpKindX, OpKindZ, OpPropsX); 1224 Cost += thisT()->getArithmeticInstrCost( 1225 BinaryOperator::LShr, RetTy, CostKind, OpKindY, OpKindZ, OpPropsY); 1226 // Non-constant shift amounts requires a modulo. 1227 if (OpKindZ != TTI::OK_UniformConstantValue && 1228 OpKindZ != TTI::OK_NonUniformConstantValue) 1229 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::URem, RetTy, 1230 CostKind, OpKindZ, OpKindBW, 1231 OpPropsZ, OpPropsBW); 1232 // For non-rotates (X != Y) we must add shift-by-zero handling costs. 1233 if (X != Y) { 1234 Type *CondTy = RetTy->getWithNewBitWidth(1); 1235 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy, 1236 CostKind); 1237 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, 1238 CondTy, CostKind); 1239 } 1240 return Cost; 1241 } 1242 } 1243 } 1244 1245 /// Get intrinsic cost based on argument types. 1246 /// If ScalarizationCostPassed is std::numeric_limits<unsigned>::max(), the 1247 /// cost of scalarizing the arguments and the return value will be computed 1248 /// based on types. getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind)1249 unsigned getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 1250 TTI::TargetCostKind CostKind) { 1251 Intrinsic::ID IID = ICA.getID(); 1252 Type *RetTy = ICA.getReturnType(); 1253 const SmallVectorImpl<Type *> &Tys = ICA.getArgTypes(); 1254 FastMathFlags FMF = ICA.getFlags(); 1255 unsigned ScalarizationCostPassed = ICA.getScalarizationCost(); 1256 bool SkipScalarizationCost = ICA.skipScalarizationCost(); 1257 1258 auto *VecOpTy = Tys.empty() ? nullptr : dyn_cast<VectorType>(Tys[0]); 1259 1260 SmallVector<unsigned, 2> ISDs; 1261 unsigned SingleCallCost = 10; // Library call cost. Make it expensive. 1262 switch (IID) { 1263 default: { 1264 // Assume that we need to scalarize this intrinsic. 1265 unsigned ScalarizationCost = ScalarizationCostPassed; 1266 unsigned ScalarCalls = 1; 1267 Type *ScalarRetTy = RetTy; 1268 if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) { 1269 if (!SkipScalarizationCost) 1270 ScalarizationCost = getScalarizationOverhead(RetVTy, true, false); 1271 ScalarCalls = std::max(ScalarCalls, 1272 cast<FixedVectorType>(RetVTy)->getNumElements()); 1273 ScalarRetTy = RetTy->getScalarType(); 1274 } 1275 SmallVector<Type *, 4> ScalarTys; 1276 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) { 1277 Type *Ty = Tys[i]; 1278 if (auto *VTy = dyn_cast<VectorType>(Ty)) { 1279 if (!SkipScalarizationCost) 1280 ScalarizationCost += getScalarizationOverhead(VTy, false, true); 1281 ScalarCalls = std::max(ScalarCalls, 1282 cast<FixedVectorType>(VTy)->getNumElements()); 1283 Ty = Ty->getScalarType(); 1284 } 1285 ScalarTys.push_back(Ty); 1286 } 1287 if (ScalarCalls == 1) 1288 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap. 1289 1290 IntrinsicCostAttributes ScalarAttrs(IID, ScalarRetTy, ScalarTys, FMF); 1291 unsigned ScalarCost = 1292 thisT()->getIntrinsicInstrCost(ScalarAttrs, CostKind); 1293 1294 return ScalarCalls * ScalarCost + ScalarizationCost; 1295 } 1296 // Look for intrinsics that can be lowered directly or turned into a scalar 1297 // intrinsic call. 1298 case Intrinsic::sqrt: 1299 ISDs.push_back(ISD::FSQRT); 1300 break; 1301 case Intrinsic::sin: 1302 ISDs.push_back(ISD::FSIN); 1303 break; 1304 case Intrinsic::cos: 1305 ISDs.push_back(ISD::FCOS); 1306 break; 1307 case Intrinsic::exp: 1308 ISDs.push_back(ISD::FEXP); 1309 break; 1310 case Intrinsic::exp2: 1311 ISDs.push_back(ISD::FEXP2); 1312 break; 1313 case Intrinsic::log: 1314 ISDs.push_back(ISD::FLOG); 1315 break; 1316 case Intrinsic::log10: 1317 ISDs.push_back(ISD::FLOG10); 1318 break; 1319 case Intrinsic::log2: 1320 ISDs.push_back(ISD::FLOG2); 1321 break; 1322 case Intrinsic::fabs: 1323 ISDs.push_back(ISD::FABS); 1324 break; 1325 case Intrinsic::canonicalize: 1326 ISDs.push_back(ISD::FCANONICALIZE); 1327 break; 1328 case Intrinsic::minnum: 1329 ISDs.push_back(ISD::FMINNUM); 1330 if (FMF.noNaNs()) 1331 ISDs.push_back(ISD::FMINIMUM); 1332 break; 1333 case Intrinsic::maxnum: 1334 ISDs.push_back(ISD::FMAXNUM); 1335 if (FMF.noNaNs()) 1336 ISDs.push_back(ISD::FMAXIMUM); 1337 break; 1338 case Intrinsic::copysign: 1339 ISDs.push_back(ISD::FCOPYSIGN); 1340 break; 1341 case Intrinsic::floor: 1342 ISDs.push_back(ISD::FFLOOR); 1343 break; 1344 case Intrinsic::ceil: 1345 ISDs.push_back(ISD::FCEIL); 1346 break; 1347 case Intrinsic::trunc: 1348 ISDs.push_back(ISD::FTRUNC); 1349 break; 1350 case Intrinsic::nearbyint: 1351 ISDs.push_back(ISD::FNEARBYINT); 1352 break; 1353 case Intrinsic::rint: 1354 ISDs.push_back(ISD::FRINT); 1355 break; 1356 case Intrinsic::round: 1357 ISDs.push_back(ISD::FROUND); 1358 break; 1359 case Intrinsic::roundeven: 1360 ISDs.push_back(ISD::FROUNDEVEN); 1361 break; 1362 case Intrinsic::pow: 1363 ISDs.push_back(ISD::FPOW); 1364 break; 1365 case Intrinsic::fma: 1366 ISDs.push_back(ISD::FMA); 1367 break; 1368 case Intrinsic::fmuladd: 1369 ISDs.push_back(ISD::FMA); 1370 break; 1371 case Intrinsic::experimental_constrained_fmuladd: 1372 ISDs.push_back(ISD::STRICT_FMA); 1373 break; 1374 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free. 1375 case Intrinsic::lifetime_start: 1376 case Intrinsic::lifetime_end: 1377 case Intrinsic::sideeffect: 1378 return 0; 1379 case Intrinsic::masked_store: { 1380 Type *Ty = Tys[0]; 1381 Align TyAlign = thisT()->DL.getABITypeAlign(Ty); 1382 return thisT()->getMaskedMemoryOpCost(Instruction::Store, Ty, TyAlign, 0, 1383 CostKind); 1384 } 1385 case Intrinsic::masked_load: { 1386 Type *Ty = RetTy; 1387 Align TyAlign = thisT()->DL.getABITypeAlign(Ty); 1388 return thisT()->getMaskedMemoryOpCost(Instruction::Load, Ty, TyAlign, 0, 1389 CostKind); 1390 } 1391 case Intrinsic::experimental_vector_reduce_add: 1392 return thisT()->getArithmeticReductionCost(Instruction::Add, VecOpTy, 1393 /*IsPairwiseForm=*/false, 1394 CostKind); 1395 case Intrinsic::experimental_vector_reduce_mul: 1396 return thisT()->getArithmeticReductionCost(Instruction::Mul, VecOpTy, 1397 /*IsPairwiseForm=*/false, 1398 CostKind); 1399 case Intrinsic::experimental_vector_reduce_and: 1400 return thisT()->getArithmeticReductionCost(Instruction::And, VecOpTy, 1401 /*IsPairwiseForm=*/false, 1402 CostKind); 1403 case Intrinsic::experimental_vector_reduce_or: 1404 return thisT()->getArithmeticReductionCost(Instruction::Or, VecOpTy, 1405 /*IsPairwiseForm=*/false, 1406 CostKind); 1407 case Intrinsic::experimental_vector_reduce_xor: 1408 return thisT()->getArithmeticReductionCost(Instruction::Xor, VecOpTy, 1409 /*IsPairwiseForm=*/false, 1410 CostKind); 1411 case Intrinsic::experimental_vector_reduce_v2_fadd: 1412 // FIXME: Add new flag for cost of strict reductions. 1413 return thisT()->getArithmeticReductionCost(Instruction::FAdd, VecOpTy, 1414 /*IsPairwiseForm=*/false, 1415 CostKind); 1416 case Intrinsic::experimental_vector_reduce_v2_fmul: 1417 // FIXME: Add new flag for cost of strict reductions. 1418 return thisT()->getArithmeticReductionCost(Instruction::FMul, VecOpTy, 1419 /*IsPairwiseForm=*/false, 1420 CostKind); 1421 case Intrinsic::experimental_vector_reduce_smax: 1422 case Intrinsic::experimental_vector_reduce_smin: 1423 case Intrinsic::experimental_vector_reduce_fmax: 1424 case Intrinsic::experimental_vector_reduce_fmin: 1425 return thisT()->getMinMaxReductionCost( 1426 VecOpTy, cast<VectorType>(CmpInst::makeCmpResultType(VecOpTy)), 1427 /*IsPairwiseForm=*/false, 1428 /*IsUnsigned=*/false, CostKind); 1429 case Intrinsic::experimental_vector_reduce_umax: 1430 case Intrinsic::experimental_vector_reduce_umin: 1431 return thisT()->getMinMaxReductionCost( 1432 VecOpTy, cast<VectorType>(CmpInst::makeCmpResultType(VecOpTy)), 1433 /*IsPairwiseForm=*/false, 1434 /*IsUnsigned=*/true, CostKind); 1435 case Intrinsic::sadd_sat: 1436 case Intrinsic::ssub_sat: { 1437 Type *CondTy = RetTy->getWithNewBitWidth(1); 1438 1439 Type *OpTy = StructType::create({RetTy, CondTy}); 1440 Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat 1441 ? Intrinsic::sadd_with_overflow 1442 : Intrinsic::ssub_with_overflow; 1443 1444 // SatMax -> Overflow && SumDiff < 0 1445 // SatMin -> Overflow && SumDiff >= 0 1446 unsigned Cost = 0; 1447 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF, 1448 ScalarizationCostPassed); 1449 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind); 1450 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy, 1451 CostKind); 1452 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, 1453 CondTy, CostKind); 1454 return Cost; 1455 } 1456 case Intrinsic::uadd_sat: 1457 case Intrinsic::usub_sat: { 1458 Type *CondTy = RetTy->getWithNewBitWidth(1); 1459 1460 Type *OpTy = StructType::create({RetTy, CondTy}); 1461 Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat 1462 ? Intrinsic::uadd_with_overflow 1463 : Intrinsic::usub_with_overflow; 1464 1465 unsigned Cost = 0; 1466 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF, 1467 ScalarizationCostPassed); 1468 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind); 1469 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy, 1470 CostKind); 1471 return Cost; 1472 } 1473 case Intrinsic::smul_fix: 1474 case Intrinsic::umul_fix: { 1475 unsigned ExtSize = RetTy->getScalarSizeInBits() * 2; 1476 Type *ExtTy = RetTy->getWithNewBitWidth(ExtSize); 1477 1478 unsigned ExtOp = 1479 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt; 1480 1481 unsigned Cost = 0; 1482 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CostKind); 1483 Cost += 1484 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind); 1485 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy, 1486 CostKind); 1487 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, RetTy, 1488 CostKind, TTI::OK_AnyValue, 1489 TTI::OK_UniformConstantValue); 1490 Cost += thisT()->getArithmeticInstrCost(Instruction::Shl, RetTy, CostKind, 1491 TTI::OK_AnyValue, 1492 TTI::OK_UniformConstantValue); 1493 Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy, CostKind); 1494 return Cost; 1495 } 1496 case Intrinsic::sadd_with_overflow: 1497 case Intrinsic::ssub_with_overflow: { 1498 Type *SumTy = RetTy->getContainedType(0); 1499 Type *OverflowTy = RetTy->getContainedType(1); 1500 unsigned Opcode = IID == Intrinsic::sadd_with_overflow 1501 ? BinaryOperator::Add 1502 : BinaryOperator::Sub; 1503 1504 // LHSSign -> LHS >= 0 1505 // RHSSign -> RHS >= 0 1506 // SumSign -> Sum >= 0 1507 // 1508 // Add: 1509 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign) 1510 // Sub: 1511 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign) 1512 unsigned Cost = 0; 1513 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind); 1514 Cost += 3 * thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy, 1515 OverflowTy, CostKind); 1516 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, OverflowTy, 1517 OverflowTy, CostKind); 1518 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, OverflowTy, 1519 CostKind); 1520 return Cost; 1521 } 1522 case Intrinsic::uadd_with_overflow: 1523 case Intrinsic::usub_with_overflow: { 1524 Type *SumTy = RetTy->getContainedType(0); 1525 Type *OverflowTy = RetTy->getContainedType(1); 1526 unsigned Opcode = IID == Intrinsic::uadd_with_overflow 1527 ? BinaryOperator::Add 1528 : BinaryOperator::Sub; 1529 1530 unsigned Cost = 0; 1531 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind); 1532 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy, 1533 OverflowTy, CostKind); 1534 return Cost; 1535 } 1536 case Intrinsic::smul_with_overflow: 1537 case Intrinsic::umul_with_overflow: { 1538 Type *MulTy = RetTy->getContainedType(0); 1539 Type *OverflowTy = RetTy->getContainedType(1); 1540 unsigned ExtSize = MulTy->getScalarSizeInBits() * 2; 1541 Type *ExtTy = MulTy->getWithNewBitWidth(ExtSize); 1542 1543 unsigned ExtOp = 1544 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt; 1545 1546 unsigned Cost = 0; 1547 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CostKind); 1548 Cost += 1549 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind); 1550 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy, 1551 CostKind); 1552 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, MulTy, 1553 CostKind, TTI::OK_AnyValue, 1554 TTI::OK_UniformConstantValue); 1555 1556 if (IID == Intrinsic::smul_with_overflow) 1557 Cost += thisT()->getArithmeticInstrCost(Instruction::AShr, MulTy, 1558 CostKind, TTI::OK_AnyValue, 1559 TTI::OK_UniformConstantValue); 1560 1561 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, MulTy, 1562 OverflowTy, CostKind); 1563 return Cost; 1564 } 1565 case Intrinsic::ctpop: 1566 ISDs.push_back(ISD::CTPOP); 1567 // In case of legalization use TCC_Expensive. This is cheaper than a 1568 // library call but still not a cheap instruction. 1569 SingleCallCost = TargetTransformInfo::TCC_Expensive; 1570 break; 1571 // FIXME: ctlz, cttz, ... 1572 case Intrinsic::bswap: 1573 ISDs.push_back(ISD::BSWAP); 1574 break; 1575 case Intrinsic::bitreverse: 1576 ISDs.push_back(ISD::BITREVERSE); 1577 break; 1578 } 1579 1580 const TargetLoweringBase *TLI = getTLI(); 1581 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy); 1582 1583 SmallVector<unsigned, 2> LegalCost; 1584 SmallVector<unsigned, 2> CustomCost; 1585 for (unsigned ISD : ISDs) { 1586 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) { 1587 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() && 1588 TLI->isFAbsFree(LT.second)) { 1589 return 0; 1590 } 1591 1592 // The operation is legal. Assume it costs 1. 1593 // If the type is split to multiple registers, assume that there is some 1594 // overhead to this. 1595 // TODO: Once we have extract/insert subvector cost we need to use them. 1596 if (LT.first > 1) 1597 LegalCost.push_back(LT.first * 2); 1598 else 1599 LegalCost.push_back(LT.first * 1); 1600 } else if (!TLI->isOperationExpand(ISD, LT.second)) { 1601 // If the operation is custom lowered then assume 1602 // that the code is twice as expensive. 1603 CustomCost.push_back(LT.first * 2); 1604 } 1605 } 1606 1607 auto MinLegalCostI = std::min_element(LegalCost.begin(), LegalCost.end()); 1608 if (MinLegalCostI != LegalCost.end()) 1609 return *MinLegalCostI; 1610 1611 auto MinCustomCostI = 1612 std::min_element(CustomCost.begin(), CustomCost.end()); 1613 if (MinCustomCostI != CustomCost.end()) 1614 return *MinCustomCostI; 1615 1616 // If we can't lower fmuladd into an FMA estimate the cost as a floating 1617 // point mul followed by an add. 1618 if (IID == Intrinsic::fmuladd) 1619 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy, 1620 CostKind) + 1621 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy, 1622 CostKind); 1623 if (IID == Intrinsic::experimental_constrained_fmuladd) { 1624 IntrinsicCostAttributes FMulAttrs( 1625 Intrinsic::experimental_constrained_fmul, RetTy, Tys); 1626 IntrinsicCostAttributes FAddAttrs( 1627 Intrinsic::experimental_constrained_fadd, RetTy, Tys); 1628 return thisT()->getIntrinsicInstrCost(FMulAttrs, CostKind) + 1629 thisT()->getIntrinsicInstrCost(FAddAttrs, CostKind); 1630 } 1631 1632 // Else, assume that we need to scalarize this intrinsic. For math builtins 1633 // this will emit a costly libcall, adding call overhead and spills. Make it 1634 // very expensive. 1635 if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) { 1636 unsigned ScalarizationCost = SkipScalarizationCost ? 1637 ScalarizationCostPassed : getScalarizationOverhead(RetVTy, true, false); 1638 1639 unsigned ScalarCalls = cast<FixedVectorType>(RetVTy)->getNumElements(); 1640 SmallVector<Type *, 4> ScalarTys; 1641 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) { 1642 Type *Ty = Tys[i]; 1643 if (Ty->isVectorTy()) 1644 Ty = Ty->getScalarType(); 1645 ScalarTys.push_back(Ty); 1646 } 1647 IntrinsicCostAttributes Attrs(IID, RetTy->getScalarType(), ScalarTys, FMF); 1648 unsigned ScalarCost = thisT()->getIntrinsicInstrCost(Attrs, CostKind); 1649 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) { 1650 if (auto *VTy = dyn_cast<VectorType>(Tys[i])) { 1651 if (!ICA.skipScalarizationCost()) 1652 ScalarizationCost += getScalarizationOverhead(VTy, false, true); 1653 ScalarCalls = std::max(ScalarCalls, 1654 cast<FixedVectorType>(VTy)->getNumElements()); 1655 } 1656 } 1657 return ScalarCalls * ScalarCost + ScalarizationCost; 1658 } 1659 1660 // This is going to be turned into a library call, make it expensive. 1661 return SingleCallCost; 1662 } 1663 1664 /// Compute a cost of the given call instruction. 1665 /// 1666 /// Compute the cost of calling function F with return type RetTy and 1667 /// argument types Tys. F might be nullptr, in this case the cost of an 1668 /// arbitrary call with the specified signature will be returned. 1669 /// This is used, for instance, when we estimate call of a vector 1670 /// counterpart of the given function. 1671 /// \param F Called function, might be nullptr. 1672 /// \param RetTy Return value types. 1673 /// \param Tys Argument types. 1674 /// \returns The cost of Call instruction. 1675 unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys, 1676 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency) { 1677 return 10; 1678 } 1679 getNumberOfParts(Type * Tp)1680 unsigned getNumberOfParts(Type *Tp) { 1681 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Tp); 1682 return LT.first; 1683 } 1684 getAddressComputationCost(Type * Ty,ScalarEvolution *,const SCEV *)1685 unsigned getAddressComputationCost(Type *Ty, ScalarEvolution *, 1686 const SCEV *) { 1687 return 0; 1688 } 1689 1690 /// Try to calculate arithmetic and shuffle op costs for reduction operations. 1691 /// We're assuming that reduction operation are performing the following way: 1692 /// 1. Non-pairwise reduction 1693 /// %val1 = shufflevector<n x t> %val, <n x t> %undef, 1694 /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef> 1695 /// \----------------v-------------/ \----------v------------/ 1696 /// n/2 elements n/2 elements 1697 /// %red1 = op <n x t> %val, <n x t> val1 1698 /// After this operation we have a vector %red1 where only the first n/2 1699 /// elements are meaningful, the second n/2 elements are undefined and can be 1700 /// dropped. All other operations are actually working with the vector of 1701 /// length n/2, not n, though the real vector length is still n. 1702 /// %val2 = shufflevector<n x t> %red1, <n x t> %undef, 1703 /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef> 1704 /// \----------------v-------------/ \----------v------------/ 1705 /// n/4 elements 3*n/4 elements 1706 /// %red2 = op <n x t> %red1, <n x t> val2 - working with the vector of 1707 /// length n/2, the resulting vector has length n/4 etc. 1708 /// 2. Pairwise reduction: 1709 /// Everything is the same except for an additional shuffle operation which 1710 /// is used to produce operands for pairwise kind of reductions. 1711 /// %val1 = shufflevector<n x t> %val, <n x t> %undef, 1712 /// <n x i32> <i32 0, i32 2, ..., i32 n-2, i32 undef, ..., i32 undef> 1713 /// \-------------v----------/ \----------v------------/ 1714 /// n/2 elements n/2 elements 1715 /// %val2 = shufflevector<n x t> %val, <n x t> %undef, 1716 /// <n x i32> <i32 1, i32 3, ..., i32 n-1, i32 undef, ..., i32 undef> 1717 /// \-------------v----------/ \----------v------------/ 1718 /// n/2 elements n/2 elements 1719 /// %red1 = op <n x t> %val1, <n x t> val2 1720 /// Again, the operation is performed on <n x t> vector, but the resulting 1721 /// vector %red1 is <n/2 x t> vector. 1722 /// 1723 /// The cost model should take into account that the actual length of the 1724 /// vector is reduced on each iteration. getArithmeticReductionCost(unsigned Opcode,VectorType * Ty,bool IsPairwise,TTI::TargetCostKind CostKind)1725 unsigned getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, 1726 bool IsPairwise, 1727 TTI::TargetCostKind CostKind) { 1728 Type *ScalarTy = Ty->getElementType(); 1729 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements(); 1730 unsigned NumReduxLevels = Log2_32(NumVecElts); 1731 unsigned ArithCost = 0; 1732 unsigned ShuffleCost = 0; 1733 std::pair<unsigned, MVT> LT = 1734 thisT()->getTLI()->getTypeLegalizationCost(DL, Ty); 1735 unsigned LongVectorCount = 0; 1736 unsigned MVTLen = 1737 LT.second.isVector() ? LT.second.getVectorNumElements() : 1; 1738 while (NumVecElts > MVTLen) { 1739 NumVecElts /= 2; 1740 VectorType *SubTy = FixedVectorType::get(ScalarTy, NumVecElts); 1741 // Assume the pairwise shuffles add a cost. 1742 ShuffleCost += 1743 (IsPairwise + 1) * thisT()->getShuffleCost(TTI::SK_ExtractSubvector, 1744 Ty, NumVecElts, SubTy); 1745 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy, CostKind); 1746 Ty = SubTy; 1747 ++LongVectorCount; 1748 } 1749 1750 NumReduxLevels -= LongVectorCount; 1751 1752 // The minimal length of the vector is limited by the real length of vector 1753 // operations performed on the current platform. That's why several final 1754 // reduction operations are performed on the vectors with the same 1755 // architecture-dependent length. 1756 1757 // Non pairwise reductions need one shuffle per reduction level. Pairwise 1758 // reductions need two shuffles on every level, but the last one. On that 1759 // level one of the shuffles is <0, u, u, ...> which is identity. 1760 unsigned NumShuffles = NumReduxLevels; 1761 if (IsPairwise && NumReduxLevels >= 1) 1762 NumShuffles += NumReduxLevels - 1; 1763 ShuffleCost += NumShuffles * 1764 thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty, 0, Ty); 1765 ArithCost += NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty); 1766 return ShuffleCost + ArithCost + 1767 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty, 0); 1768 } 1769 1770 /// Try to calculate op costs for min/max reduction operations. 1771 /// \param CondTy Conditional type for the Select instruction. getMinMaxReductionCost(VectorType * Ty,VectorType * CondTy,bool IsPairwise,bool IsUnsigned,TTI::TargetCostKind CostKind)1772 unsigned getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy, 1773 bool IsPairwise, bool IsUnsigned, 1774 TTI::TargetCostKind CostKind) { 1775 Type *ScalarTy = Ty->getElementType(); 1776 Type *ScalarCondTy = CondTy->getElementType(); 1777 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements(); 1778 unsigned NumReduxLevels = Log2_32(NumVecElts); 1779 unsigned CmpOpcode; 1780 if (Ty->isFPOrFPVectorTy()) { 1781 CmpOpcode = Instruction::FCmp; 1782 } else { 1783 assert(Ty->isIntOrIntVectorTy() && 1784 "expecting floating point or integer type for min/max reduction"); 1785 CmpOpcode = Instruction::ICmp; 1786 } 1787 unsigned MinMaxCost = 0; 1788 unsigned ShuffleCost = 0; 1789 std::pair<unsigned, MVT> LT = 1790 thisT()->getTLI()->getTypeLegalizationCost(DL, Ty); 1791 unsigned LongVectorCount = 0; 1792 unsigned MVTLen = 1793 LT.second.isVector() ? LT.second.getVectorNumElements() : 1; 1794 while (NumVecElts > MVTLen) { 1795 NumVecElts /= 2; 1796 auto *SubTy = FixedVectorType::get(ScalarTy, NumVecElts); 1797 CondTy = FixedVectorType::get(ScalarCondTy, NumVecElts); 1798 1799 // Assume the pairwise shuffles add a cost. 1800 ShuffleCost += 1801 (IsPairwise + 1) * thisT()->getShuffleCost(TTI::SK_ExtractSubvector, 1802 Ty, NumVecElts, SubTy); 1803 MinMaxCost += 1804 thisT()->getCmpSelInstrCost(CmpOpcode, SubTy, CondTy, CostKind) + 1805 thisT()->getCmpSelInstrCost(Instruction::Select, SubTy, CondTy, 1806 CostKind); 1807 Ty = SubTy; 1808 ++LongVectorCount; 1809 } 1810 1811 NumReduxLevels -= LongVectorCount; 1812 1813 // The minimal length of the vector is limited by the real length of vector 1814 // operations performed on the current platform. That's why several final 1815 // reduction opertions are perfomed on the vectors with the same 1816 // architecture-dependent length. 1817 1818 // Non pairwise reductions need one shuffle per reduction level. Pairwise 1819 // reductions need two shuffles on every level, but the last one. On that 1820 // level one of the shuffles is <0, u, u, ...> which is identity. 1821 unsigned NumShuffles = NumReduxLevels; 1822 if (IsPairwise && NumReduxLevels >= 1) 1823 NumShuffles += NumReduxLevels - 1; 1824 ShuffleCost += NumShuffles * 1825 thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty, 0, Ty); 1826 MinMaxCost += 1827 NumReduxLevels * 1828 (thisT()->getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CostKind) + 1829 thisT()->getCmpSelInstrCost(Instruction::Select, Ty, CondTy, 1830 CostKind)); 1831 // The last min/max should be in vector registers and we counted it above. 1832 // So just need a single extractelement. 1833 return ShuffleCost + MinMaxCost + 1834 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty, 0); 1835 } 1836 getVectorSplitCost()1837 unsigned getVectorSplitCost() { return 1; } 1838 1839 /// @} 1840 }; 1841 1842 /// Concrete BasicTTIImpl that can be used if no further customization 1843 /// is needed. 1844 class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> { 1845 using BaseT = BasicTTIImplBase<BasicTTIImpl>; 1846 1847 friend class BasicTTIImplBase<BasicTTIImpl>; 1848 1849 const TargetSubtargetInfo *ST; 1850 const TargetLoweringBase *TLI; 1851 getST()1852 const TargetSubtargetInfo *getST() const { return ST; } getTLI()1853 const TargetLoweringBase *getTLI() const { return TLI; } 1854 1855 public: 1856 explicit BasicTTIImpl(const TargetMachine *TM, const Function &F); 1857 }; 1858 1859 } // end namespace llvm 1860 1861 #endif // LLVM_CODEGEN_BASICTTIIMPL_H 1862