1 //===- TargetTransformInfoImpl.h --------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file provides helpers for the implementation of 10 /// a TargetTransformInfo-conforming class. 11 /// 12 //===----------------------------------------------------------------------===// 13 14 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H 15 #define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H 16 17 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 18 #include "llvm/Analysis/TargetTransformInfo.h" 19 #include "llvm/Analysis/VectorUtils.h" 20 #include "llvm/IR/CallSite.h" 21 #include "llvm/IR/DataLayout.h" 22 #include "llvm/IR/Function.h" 23 #include "llvm/IR/GetElementPtrTypeIterator.h" 24 #include "llvm/IR/Operator.h" 25 #include "llvm/IR/Type.h" 26 27 namespace llvm { 28 29 /// Base class for use as a mix-in that aids implementing 30 /// a TargetTransformInfo-compatible class. 31 class TargetTransformInfoImplBase { 32 protected: 33 typedef TargetTransformInfo TTI; 34 35 const DataLayout &DL; 36 37 explicit TargetTransformInfoImplBase(const DataLayout &DL) : DL(DL) {} 38 39 public: 40 // Provide value semantics. MSVC requires that we spell all of these out. 41 TargetTransformInfoImplBase(const TargetTransformInfoImplBase &Arg) 42 : DL(Arg.DL) {} 43 TargetTransformInfoImplBase(TargetTransformInfoImplBase &&Arg) : DL(Arg.DL) {} 44 45 const DataLayout &getDataLayout() const { return DL; } 46 47 unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) { 48 switch (Opcode) { 49 default: 50 // By default, just classify everything as 'basic'. 51 return TTI::TCC_Basic; 52 53 case Instruction::GetElementPtr: 54 llvm_unreachable("Use getGEPCost for GEP operations!"); 55 56 case Instruction::BitCast: 57 assert(OpTy && "Cast instructions must provide the operand type"); 58 if (Ty == OpTy || (Ty->isPointerTy() && OpTy->isPointerTy())) 59 // Identity and pointer-to-pointer casts are free. 60 return TTI::TCC_Free; 61 62 // Otherwise, the default basic cost is used. 63 return TTI::TCC_Basic; 64 65 case Instruction::FDiv: 66 case Instruction::FRem: 67 case Instruction::SDiv: 68 case Instruction::SRem: 69 case Instruction::UDiv: 70 case Instruction::URem: 71 return TTI::TCC_Expensive; 72 73 case Instruction::IntToPtr: { 74 // An inttoptr cast is free so long as the input is a legal integer type 75 // which doesn't contain values outside the range of a pointer. 76 unsigned OpSize = OpTy->getScalarSizeInBits(); 77 if (DL.isLegalInteger(OpSize) && 78 OpSize <= DL.getPointerTypeSizeInBits(Ty)) 79 return TTI::TCC_Free; 80 81 // Otherwise it's not a no-op. 82 return TTI::TCC_Basic; 83 } 84 case Instruction::PtrToInt: { 85 // A ptrtoint cast is free so long as the result is large enough to store 86 // the pointer, and a legal integer type. 87 unsigned DestSize = Ty->getScalarSizeInBits(); 88 if (DL.isLegalInteger(DestSize) && 89 DestSize >= DL.getPointerTypeSizeInBits(OpTy)) 90 return TTI::TCC_Free; 91 92 // Otherwise it's not a no-op. 93 return TTI::TCC_Basic; 94 } 95 case Instruction::Trunc: 96 // trunc to a native type is free (assuming the target has compare and 97 // shift-right of the same width). 98 if (DL.isLegalInteger(DL.getTypeSizeInBits(Ty))) 99 return TTI::TCC_Free; 100 101 return TTI::TCC_Basic; 102 } 103 } 104 105 int getGEPCost(Type *PointeeType, const Value *Ptr, 106 ArrayRef<const Value *> Operands) { 107 // In the basic model, we just assume that all-constant GEPs will be folded 108 // into their uses via addressing modes. 109 for (unsigned Idx = 0, Size = Operands.size(); Idx != Size; ++Idx) 110 if (!isa<Constant>(Operands[Idx])) 111 return TTI::TCC_Basic; 112 113 return TTI::TCC_Free; 114 } 115 116 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, 117 unsigned &JTSize) { 118 JTSize = 0; 119 return SI.getNumCases(); 120 } 121 122 int getExtCost(const Instruction *I, const Value *Src) { 123 return TTI::TCC_Basic; 124 } 125 126 unsigned getCallCost(FunctionType *FTy, int NumArgs, const User *U) { 127 assert(FTy && "FunctionType must be provided to this routine."); 128 129 // The target-independent implementation just measures the size of the 130 // function by approximating that each argument will take on average one 131 // instruction to prepare. 132 133 if (NumArgs < 0) 134 // Set the argument number to the number of explicit arguments in the 135 // function. 136 NumArgs = FTy->getNumParams(); 137 138 return TTI::TCC_Basic * (NumArgs + 1); 139 } 140 141 unsigned getInliningThresholdMultiplier() { return 1; } 142 143 int getInlinerVectorBonusPercent() { return 150; } 144 145 unsigned getMemcpyCost(const Instruction *I) { 146 return TTI::TCC_Expensive; 147 } 148 149 bool hasBranchDivergence() { return false; } 150 151 bool isSourceOfDivergence(const Value *V) { return false; } 152 153 bool isAlwaysUniform(const Value *V) { return false; } 154 155 unsigned getFlatAddressSpace () { 156 return -1; 157 } 158 159 bool isLoweredToCall(const Function *F) { 160 assert(F && "A concrete function must be provided to this routine."); 161 162 // FIXME: These should almost certainly not be handled here, and instead 163 // handled with the help of TLI or the target itself. This was largely 164 // ported from existing analysis heuristics here so that such refactorings 165 // can take place in the future. 166 167 if (F->isIntrinsic()) 168 return false; 169 170 if (F->hasLocalLinkage() || !F->hasName()) 171 return true; 172 173 StringRef Name = F->getName(); 174 175 // These will all likely lower to a single selection DAG node. 176 if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" || 177 Name == "fabs" || Name == "fabsf" || Name == "fabsl" || Name == "sin" || 178 Name == "fmin" || Name == "fminf" || Name == "fminl" || 179 Name == "fmax" || Name == "fmaxf" || Name == "fmaxl" || 180 Name == "sinf" || Name == "sinl" || Name == "cos" || Name == "cosf" || 181 Name == "cosl" || Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl") 182 return false; 183 184 // These are all likely to be optimized into something smaller. 185 if (Name == "pow" || Name == "powf" || Name == "powl" || Name == "exp2" || 186 Name == "exp2l" || Name == "exp2f" || Name == "floor" || 187 Name == "floorf" || Name == "ceil" || Name == "round" || 188 Name == "ffs" || Name == "ffsl" || Name == "abs" || Name == "labs" || 189 Name == "llabs") 190 return false; 191 192 return true; 193 } 194 195 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, 196 AssumptionCache &AC, 197 TargetLibraryInfo *LibInfo, 198 HardwareLoopInfo &HWLoopInfo) { 199 return false; 200 } 201 202 void getUnrollingPreferences(Loop *, ScalarEvolution &, 203 TTI::UnrollingPreferences &) {} 204 205 bool isLegalAddImmediate(int64_t Imm) { return false; } 206 207 bool isLegalICmpImmediate(int64_t Imm) { return false; } 208 209 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, 210 bool HasBaseReg, int64_t Scale, 211 unsigned AddrSpace, Instruction *I = nullptr) { 212 // Guess that only reg and reg+reg addressing is allowed. This heuristic is 213 // taken from the implementation of LSR. 214 return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1); 215 } 216 217 bool isLSRCostLess(TTI::LSRCost &C1, TTI::LSRCost &C2) { 218 return std::tie(C1.NumRegs, C1.AddRecCost, C1.NumIVMuls, C1.NumBaseAdds, 219 C1.ScaleCost, C1.ImmCost, C1.SetupCost) < 220 std::tie(C2.NumRegs, C2.AddRecCost, C2.NumIVMuls, C2.NumBaseAdds, 221 C2.ScaleCost, C2.ImmCost, C2.SetupCost); 222 } 223 224 bool canMacroFuseCmp() { return false; } 225 226 bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI, 227 DominatorTree *DT, AssumptionCache *AC, 228 TargetLibraryInfo *LibInfo) { 229 return false; 230 } 231 232 bool shouldFavorPostInc() const { return false; } 233 234 bool shouldFavorBackedgeIndex(const Loop *L) const { return false; } 235 236 bool isLegalMaskedStore(Type *DataType) { return false; } 237 238 bool isLegalMaskedLoad(Type *DataType) { return false; } 239 240 bool isLegalNTStore(Type *DataType, unsigned Alignment) { 241 // By default, assume nontemporal memory stores are available for stores 242 // that are aligned and have a size that is a power of 2. 243 unsigned DataSize = DL.getTypeStoreSize(DataType); 244 return Alignment >= DataSize && isPowerOf2_32(DataSize); 245 } 246 247 bool isLegalNTLoad(Type *DataType, unsigned Alignment) { 248 // By default, assume nontemporal memory loads are available for loads that 249 // are aligned and have a size that is a power of 2. 250 unsigned DataSize = DL.getTypeStoreSize(DataType); 251 return Alignment >= DataSize && isPowerOf2_32(DataSize); 252 } 253 254 bool isLegalMaskedScatter(Type *DataType) { return false; } 255 256 bool isLegalMaskedGather(Type *DataType) { return false; } 257 258 bool isLegalMaskedCompressStore(Type *DataType) { return false; } 259 260 bool isLegalMaskedExpandLoad(Type *DataType) { return false; } 261 262 bool hasDivRemOp(Type *DataType, bool IsSigned) { return false; } 263 264 bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) { return false; } 265 266 bool prefersVectorizedAddressing() { return true; } 267 268 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, 269 bool HasBaseReg, int64_t Scale, unsigned AddrSpace) { 270 // Guess that all legal addressing mode are free. 271 if (isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, 272 Scale, AddrSpace)) 273 return 0; 274 return -1; 275 } 276 277 bool LSRWithInstrQueries() { return false; } 278 279 bool isTruncateFree(Type *Ty1, Type *Ty2) { return false; } 280 281 bool isProfitableToHoist(Instruction *I) { return true; } 282 283 bool useAA() { return false; } 284 285 bool isTypeLegal(Type *Ty) { return false; } 286 287 unsigned getJumpBufAlignment() { return 0; } 288 289 unsigned getJumpBufSize() { return 0; } 290 291 bool shouldBuildLookupTables() { return true; } 292 bool shouldBuildLookupTablesForConstant(Constant *C) { return true; } 293 294 bool useColdCCForColdCall(Function &F) { return false; } 295 296 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) { 297 return 0; 298 } 299 300 unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args, 301 unsigned VF) { return 0; } 302 303 bool supportsEfficientVectorElementLoadStore() { return false; } 304 305 bool enableAggressiveInterleaving(bool LoopHasReductions) { return false; } 306 307 TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, 308 bool IsZeroCmp) const { 309 return {}; 310 } 311 312 bool enableInterleavedAccessVectorization() { return false; } 313 314 bool enableMaskedInterleavedAccessVectorization() { return false; } 315 316 bool isFPVectorizationPotentiallyUnsafe() { return false; } 317 318 bool allowsMisalignedMemoryAccesses(LLVMContext &Context, 319 unsigned BitWidth, 320 unsigned AddressSpace, 321 unsigned Alignment, 322 bool *Fast) { return false; } 323 324 TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) { 325 return TTI::PSK_Software; 326 } 327 328 bool haveFastSqrt(Type *Ty) { return false; } 329 330 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) { return true; } 331 332 unsigned getFPOpCost(Type *Ty) { return TargetTransformInfo::TCC_Basic; } 333 334 int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 335 Type *Ty) { 336 return 0; 337 } 338 339 unsigned getIntImmCost(const APInt &Imm, Type *Ty) { return TTI::TCC_Basic; } 340 341 unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 342 Type *Ty) { 343 return TTI::TCC_Free; 344 } 345 346 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 347 Type *Ty) { 348 return TTI::TCC_Free; 349 } 350 351 unsigned getNumberOfRegisters(bool Vector) { return 8; } 352 353 unsigned getRegisterBitWidth(bool Vector) const { return 32; } 354 355 unsigned getMinVectorRegisterBitWidth() { return 128; } 356 357 bool shouldMaximizeVectorBandwidth(bool OptSize) const { return false; } 358 359 unsigned getMinimumVF(unsigned ElemWidth) const { return 0; } 360 361 bool 362 shouldConsiderAddressTypePromotion(const Instruction &I, 363 bool &AllowPromotionWithoutCommonHeader) { 364 AllowPromotionWithoutCommonHeader = false; 365 return false; 366 } 367 368 unsigned getCacheLineSize() { return 0; } 369 370 llvm::Optional<unsigned> getCacheSize(TargetTransformInfo::CacheLevel Level) { 371 switch (Level) { 372 case TargetTransformInfo::CacheLevel::L1D: 373 LLVM_FALLTHROUGH; 374 case TargetTransformInfo::CacheLevel::L2D: 375 return llvm::Optional<unsigned>(); 376 } 377 378 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 379 } 380 381 llvm::Optional<unsigned> getCacheAssociativity( 382 TargetTransformInfo::CacheLevel Level) { 383 switch (Level) { 384 case TargetTransformInfo::CacheLevel::L1D: 385 LLVM_FALLTHROUGH; 386 case TargetTransformInfo::CacheLevel::L2D: 387 return llvm::Optional<unsigned>(); 388 } 389 390 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 391 } 392 393 unsigned getPrefetchDistance() { return 0; } 394 395 unsigned getMinPrefetchStride() { return 1; } 396 397 unsigned getMaxPrefetchIterationsAhead() { return UINT_MAX; } 398 399 unsigned getMaxInterleaveFactor(unsigned VF) { return 1; } 400 401 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, 402 TTI::OperandValueKind Opd1Info, 403 TTI::OperandValueKind Opd2Info, 404 TTI::OperandValueProperties Opd1PropInfo, 405 TTI::OperandValueProperties Opd2PropInfo, 406 ArrayRef<const Value *> Args) { 407 return 1; 408 } 409 410 unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Ty, int Index, 411 Type *SubTp) { 412 return 1; 413 } 414 415 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 416 const Instruction *I) { return 1; } 417 418 unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst, 419 VectorType *VecTy, unsigned Index) { 420 return 1; 421 } 422 423 unsigned getCFInstrCost(unsigned Opcode) { return 1; } 424 425 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 426 const Instruction *I) { 427 return 1; 428 } 429 430 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 431 return 1; 432 } 433 434 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 435 unsigned AddressSpace, const Instruction *I) { 436 return 1; 437 } 438 439 unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 440 unsigned AddressSpace) { 441 return 1; 442 } 443 444 unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr, 445 bool VariableMask, 446 unsigned Alignment) { 447 return 1; 448 } 449 450 unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 451 unsigned Factor, 452 ArrayRef<unsigned> Indices, 453 unsigned Alignment, unsigned AddressSpace, 454 bool UseMaskForCond = false, 455 bool UseMaskForGaps = false) { 456 return 1; 457 } 458 459 unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, 460 ArrayRef<Type *> Tys, FastMathFlags FMF, 461 unsigned ScalarizationCostPassed) { 462 return 1; 463 } 464 unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, 465 ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) { 466 return 1; 467 } 468 469 unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) { 470 return 1; 471 } 472 473 unsigned getNumberOfParts(Type *Tp) { return 0; } 474 475 unsigned getAddressComputationCost(Type *Tp, ScalarEvolution *, 476 const SCEV *) { 477 return 0; 478 } 479 480 unsigned getArithmeticReductionCost(unsigned, Type *, bool) { return 1; } 481 482 unsigned getMinMaxReductionCost(Type *, Type *, bool, bool) { return 1; } 483 484 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { return 0; } 485 486 bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) { 487 return false; 488 } 489 490 unsigned getAtomicMemIntrinsicMaxElementSize() const { 491 // Note for overrides: You must ensure for all element unordered-atomic 492 // memory intrinsics that all power-of-2 element sizes up to, and 493 // including, the return value of this method have a corresponding 494 // runtime lib call. These runtime lib call definitions can be found 495 // in RuntimeLibcalls.h 496 return 0; 497 } 498 499 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, 500 Type *ExpectedType) { 501 return nullptr; 502 } 503 504 Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, 505 unsigned SrcAlign, unsigned DestAlign) const { 506 return Type::getInt8Ty(Context); 507 } 508 509 void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut, 510 LLVMContext &Context, 511 unsigned RemainingBytes, 512 unsigned SrcAlign, 513 unsigned DestAlign) const { 514 for (unsigned i = 0; i != RemainingBytes; ++i) 515 OpsOut.push_back(Type::getInt8Ty(Context)); 516 } 517 518 bool areInlineCompatible(const Function *Caller, 519 const Function *Callee) const { 520 return (Caller->getFnAttribute("target-cpu") == 521 Callee->getFnAttribute("target-cpu")) && 522 (Caller->getFnAttribute("target-features") == 523 Callee->getFnAttribute("target-features")); 524 } 525 526 bool areFunctionArgsABICompatible(const Function *Caller, const Function *Callee, 527 SmallPtrSetImpl<Argument *> &Args) const { 528 return (Caller->getFnAttribute("target-cpu") == 529 Callee->getFnAttribute("target-cpu")) && 530 (Caller->getFnAttribute("target-features") == 531 Callee->getFnAttribute("target-features")); 532 } 533 534 bool isIndexedLoadLegal(TTI::MemIndexedMode Mode, Type *Ty, 535 const DataLayout &DL) const { 536 return false; 537 } 538 539 bool isIndexedStoreLegal(TTI::MemIndexedMode Mode, Type *Ty, 540 const DataLayout &DL) const { 541 return false; 542 } 543 544 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const { return 128; } 545 546 bool isLegalToVectorizeLoad(LoadInst *LI) const { return true; } 547 548 bool isLegalToVectorizeStore(StoreInst *SI) const { return true; } 549 550 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, 551 unsigned Alignment, 552 unsigned AddrSpace) const { 553 return true; 554 } 555 556 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, 557 unsigned Alignment, 558 unsigned AddrSpace) const { 559 return true; 560 } 561 562 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, 563 unsigned ChainSizeInBytes, 564 VectorType *VecTy) const { 565 return VF; 566 } 567 568 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, 569 unsigned ChainSizeInBytes, 570 VectorType *VecTy) const { 571 return VF; 572 } 573 574 bool useReductionIntrinsic(unsigned Opcode, Type *Ty, 575 TTI::ReductionFlags Flags) const { 576 return false; 577 } 578 579 bool shouldExpandReduction(const IntrinsicInst *II) const { 580 return true; 581 } 582 583 unsigned getGISelRematGlobalCost() const { 584 return 1; 585 } 586 587 protected: 588 // Obtain the minimum required size to hold the value (without the sign) 589 // In case of a vector it returns the min required size for one element. 590 unsigned minRequiredElementSize(const Value* Val, bool &isSigned) { 591 if (isa<ConstantDataVector>(Val) || isa<ConstantVector>(Val)) { 592 const auto* VectorValue = cast<Constant>(Val); 593 594 // In case of a vector need to pick the max between the min 595 // required size for each element 596 auto *VT = cast<VectorType>(Val->getType()); 597 598 // Assume unsigned elements 599 isSigned = false; 600 601 // The max required size is the total vector width divided by num 602 // of elements in the vector 603 unsigned MaxRequiredSize = VT->getBitWidth() / VT->getNumElements(); 604 605 unsigned MinRequiredSize = 0; 606 for(unsigned i = 0, e = VT->getNumElements(); i < e; ++i) { 607 if (auto* IntElement = 608 dyn_cast<ConstantInt>(VectorValue->getAggregateElement(i))) { 609 bool signedElement = IntElement->getValue().isNegative(); 610 // Get the element min required size. 611 unsigned ElementMinRequiredSize = 612 IntElement->getValue().getMinSignedBits() - 1; 613 // In case one element is signed then all the vector is signed. 614 isSigned |= signedElement; 615 // Save the max required bit size between all the elements. 616 MinRequiredSize = std::max(MinRequiredSize, ElementMinRequiredSize); 617 } 618 else { 619 // not an int constant element 620 return MaxRequiredSize; 621 } 622 } 623 return MinRequiredSize; 624 } 625 626 if (const auto* CI = dyn_cast<ConstantInt>(Val)) { 627 isSigned = CI->getValue().isNegative(); 628 return CI->getValue().getMinSignedBits() - 1; 629 } 630 631 if (const auto* Cast = dyn_cast<SExtInst>(Val)) { 632 isSigned = true; 633 return Cast->getSrcTy()->getScalarSizeInBits() - 1; 634 } 635 636 if (const auto* Cast = dyn_cast<ZExtInst>(Val)) { 637 isSigned = false; 638 return Cast->getSrcTy()->getScalarSizeInBits(); 639 } 640 641 isSigned = false; 642 return Val->getType()->getScalarSizeInBits(); 643 } 644 645 bool isStridedAccess(const SCEV *Ptr) { 646 return Ptr && isa<SCEVAddRecExpr>(Ptr); 647 } 648 649 const SCEVConstant *getConstantStrideStep(ScalarEvolution *SE, 650 const SCEV *Ptr) { 651 if (!isStridedAccess(Ptr)) 652 return nullptr; 653 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ptr); 654 return dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(*SE)); 655 } 656 657 bool isConstantStridedAccessLessThan(ScalarEvolution *SE, const SCEV *Ptr, 658 int64_t MergeDistance) { 659 const SCEVConstant *Step = getConstantStrideStep(SE, Ptr); 660 if (!Step) 661 return false; 662 APInt StrideVal = Step->getAPInt(); 663 if (StrideVal.getBitWidth() > 64) 664 return false; 665 // FIXME: Need to take absolute value for negative stride case. 666 return StrideVal.getSExtValue() < MergeDistance; 667 } 668 }; 669 670 /// CRTP base class for use as a mix-in that aids implementing 671 /// a TargetTransformInfo-compatible class. 672 template <typename T> 673 class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase { 674 private: 675 typedef TargetTransformInfoImplBase BaseT; 676 677 protected: 678 explicit TargetTransformInfoImplCRTPBase(const DataLayout &DL) : BaseT(DL) {} 679 680 public: 681 using BaseT::getCallCost; 682 683 unsigned getCallCost(const Function *F, int NumArgs, const User *U) { 684 assert(F && "A concrete function must be provided to this routine."); 685 686 if (NumArgs < 0) 687 // Set the argument number to the number of explicit arguments in the 688 // function. 689 NumArgs = F->arg_size(); 690 691 if (Intrinsic::ID IID = F->getIntrinsicID()) { 692 FunctionType *FTy = F->getFunctionType(); 693 SmallVector<Type *, 8> ParamTys(FTy->param_begin(), FTy->param_end()); 694 return static_cast<T *>(this) 695 ->getIntrinsicCost(IID, FTy->getReturnType(), ParamTys, U); 696 } 697 698 if (!static_cast<T *>(this)->isLoweredToCall(F)) 699 return TTI::TCC_Basic; // Give a basic cost if it will be lowered 700 // directly. 701 702 return static_cast<T *>(this)->getCallCost(F->getFunctionType(), NumArgs, U); 703 } 704 705 unsigned getCallCost(const Function *F, ArrayRef<const Value *> Arguments, 706 const User *U) { 707 // Simply delegate to generic handling of the call. 708 // FIXME: We should use instsimplify or something else to catch calls which 709 // will constant fold with these arguments. 710 return static_cast<T *>(this)->getCallCost(F, Arguments.size(), U); 711 } 712 713 using BaseT::getGEPCost; 714 715 int getGEPCost(Type *PointeeType, const Value *Ptr, 716 ArrayRef<const Value *> Operands) { 717 assert(PointeeType && Ptr && "can't get GEPCost of nullptr"); 718 // TODO: will remove this when pointers have an opaque type. 719 assert(Ptr->getType()->getScalarType()->getPointerElementType() == 720 PointeeType && 721 "explicit pointee type doesn't match operand's pointee type"); 722 auto *BaseGV = dyn_cast<GlobalValue>(Ptr->stripPointerCasts()); 723 bool HasBaseReg = (BaseGV == nullptr); 724 725 auto PtrSizeBits = DL.getPointerTypeSizeInBits(Ptr->getType()); 726 APInt BaseOffset(PtrSizeBits, 0); 727 int64_t Scale = 0; 728 729 auto GTI = gep_type_begin(PointeeType, Operands); 730 Type *TargetType = nullptr; 731 732 // Handle the case where the GEP instruction has a single operand, 733 // the basis, therefore TargetType is a nullptr. 734 if (Operands.empty()) 735 return !BaseGV ? TTI::TCC_Free : TTI::TCC_Basic; 736 737 for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) { 738 TargetType = GTI.getIndexedType(); 739 // We assume that the cost of Scalar GEP with constant index and the 740 // cost of Vector GEP with splat constant index are the same. 741 const ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I); 742 if (!ConstIdx) 743 if (auto Splat = getSplatValue(*I)) 744 ConstIdx = dyn_cast<ConstantInt>(Splat); 745 if (StructType *STy = GTI.getStructTypeOrNull()) { 746 // For structures the index is always splat or scalar constant 747 assert(ConstIdx && "Unexpected GEP index"); 748 uint64_t Field = ConstIdx->getZExtValue(); 749 BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field); 750 } else { 751 int64_t ElementSize = DL.getTypeAllocSize(GTI.getIndexedType()); 752 if (ConstIdx) { 753 BaseOffset += 754 ConstIdx->getValue().sextOrTrunc(PtrSizeBits) * ElementSize; 755 } else { 756 // Needs scale register. 757 if (Scale != 0) 758 // No addressing mode takes two scale registers. 759 return TTI::TCC_Basic; 760 Scale = ElementSize; 761 } 762 } 763 } 764 765 if (static_cast<T *>(this)->isLegalAddressingMode( 766 TargetType, const_cast<GlobalValue *>(BaseGV), 767 BaseOffset.sextOrTrunc(64).getSExtValue(), HasBaseReg, Scale, 768 Ptr->getType()->getPointerAddressSpace())) 769 return TTI::TCC_Free; 770 return TTI::TCC_Basic; 771 } 772 773 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, 774 ArrayRef<Type *> ParamTys, const User *U) { 775 switch (IID) { 776 default: 777 // Intrinsics rarely (if ever) have normal argument setup constraints. 778 // Model them as having a basic instruction cost. 779 return TTI::TCC_Basic; 780 781 // TODO: other libc intrinsics. 782 case Intrinsic::memcpy: 783 return static_cast<T *>(this)->getMemcpyCost(dyn_cast<Instruction>(U)); 784 785 case Intrinsic::annotation: 786 case Intrinsic::assume: 787 case Intrinsic::sideeffect: 788 case Intrinsic::dbg_declare: 789 case Intrinsic::dbg_value: 790 case Intrinsic::dbg_label: 791 case Intrinsic::invariant_start: 792 case Intrinsic::invariant_end: 793 case Intrinsic::launder_invariant_group: 794 case Intrinsic::strip_invariant_group: 795 case Intrinsic::is_constant: 796 case Intrinsic::lifetime_start: 797 case Intrinsic::lifetime_end: 798 case Intrinsic::objectsize: 799 case Intrinsic::ptr_annotation: 800 case Intrinsic::var_annotation: 801 case Intrinsic::experimental_gc_result: 802 case Intrinsic::experimental_gc_relocate: 803 case Intrinsic::coro_alloc: 804 case Intrinsic::coro_begin: 805 case Intrinsic::coro_free: 806 case Intrinsic::coro_end: 807 case Intrinsic::coro_frame: 808 case Intrinsic::coro_size: 809 case Intrinsic::coro_suspend: 810 case Intrinsic::coro_param: 811 case Intrinsic::coro_subfn_addr: 812 // These intrinsics don't actually represent code after lowering. 813 return TTI::TCC_Free; 814 } 815 } 816 817 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, 818 ArrayRef<const Value *> Arguments, const User *U) { 819 // Delegate to the generic intrinsic handling code. This mostly provides an 820 // opportunity for targets to (for example) special case the cost of 821 // certain intrinsics based on constants used as arguments. 822 SmallVector<Type *, 8> ParamTys; 823 ParamTys.reserve(Arguments.size()); 824 for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx) 825 ParamTys.push_back(Arguments[Idx]->getType()); 826 return static_cast<T *>(this)->getIntrinsicCost(IID, RetTy, ParamTys, U); 827 } 828 829 unsigned getUserCost(const User *U, ArrayRef<const Value *> Operands) { 830 if (isa<PHINode>(U)) 831 return TTI::TCC_Free; // Model all PHI nodes as free. 832 833 // Static alloca doesn't generate target instructions. 834 if (auto *A = dyn_cast<AllocaInst>(U)) 835 if (A->isStaticAlloca()) 836 return TTI::TCC_Free; 837 838 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { 839 return static_cast<T *>(this)->getGEPCost(GEP->getSourceElementType(), 840 GEP->getPointerOperand(), 841 Operands.drop_front()); 842 } 843 844 if (auto CS = ImmutableCallSite(U)) { 845 const Function *F = CS.getCalledFunction(); 846 if (!F) { 847 // Just use the called value type. 848 Type *FTy = CS.getCalledValue()->getType()->getPointerElementType(); 849 return static_cast<T *>(this) 850 ->getCallCost(cast<FunctionType>(FTy), CS.arg_size(), U); 851 } 852 853 SmallVector<const Value *, 8> Arguments(CS.arg_begin(), CS.arg_end()); 854 return static_cast<T *>(this)->getCallCost(F, Arguments, U); 855 } 856 857 if (isa<SExtInst>(U) || isa<ZExtInst>(U) || isa<FPExtInst>(U)) 858 // The old behaviour of generally treating extensions of icmp to be free 859 // has been removed. A target that needs it should override getUserCost(). 860 return static_cast<T *>(this)->getExtCost(cast<Instruction>(U), 861 Operands.back()); 862 863 return static_cast<T *>(this)->getOperationCost( 864 Operator::getOpcode(U), U->getType(), 865 U->getNumOperands() == 1 ? U->getOperand(0)->getType() : nullptr); 866 } 867 868 int getInstructionLatency(const Instruction *I) { 869 SmallVector<const Value *, 4> Operands(I->value_op_begin(), 870 I->value_op_end()); 871 if (getUserCost(I, Operands) == TTI::TCC_Free) 872 return 0; 873 874 if (isa<LoadInst>(I)) 875 return 4; 876 877 Type *DstTy = I->getType(); 878 879 // Usually an intrinsic is a simple instruction. 880 // A real function call is much slower. 881 if (auto *CI = dyn_cast<CallInst>(I)) { 882 const Function *F = CI->getCalledFunction(); 883 if (!F || static_cast<T *>(this)->isLoweredToCall(F)) 884 return 40; 885 // Some intrinsics return a value and a flag, we use the value type 886 // to decide its latency. 887 if (StructType* StructTy = dyn_cast<StructType>(DstTy)) 888 DstTy = StructTy->getElementType(0); 889 // Fall through to simple instructions. 890 } 891 892 if (VectorType *VectorTy = dyn_cast<VectorType>(DstTy)) 893 DstTy = VectorTy->getElementType(); 894 if (DstTy->isFloatingPointTy()) 895 return 3; 896 897 return 1; 898 } 899 }; 900 } 901 902 #endif 903