1 //===-- X86TargetTransformInfo.h - X86 specific TTI -------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file a TargetTransformInfo::Concept conforming object specific to the 10 /// X86 target machine. It uses the target's detailed information to 11 /// provide more precise answers to certain TTI queries, while letting the 12 /// target independent and default TTI implementations handle the rest. 13 /// 14 //===----------------------------------------------------------------------===// 15 16 #ifndef LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H 17 #define LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H 18 19 #include "X86.h" 20 #include "X86TargetMachine.h" 21 #include "llvm/Analysis/TargetTransformInfo.h" 22 #include "llvm/CodeGen/BasicTTIImpl.h" 23 #include "llvm/CodeGen/TargetLowering.h" 24 25 namespace llvm { 26 27 class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> { 28 typedef BasicTTIImplBase<X86TTIImpl> BaseT; 29 typedef TargetTransformInfo TTI; 30 friend BaseT; 31 32 const X86Subtarget *ST; 33 const X86TargetLowering *TLI; 34 35 const X86Subtarget *getST() const { return ST; } 36 const X86TargetLowering *getTLI() const { return TLI; } 37 38 const FeatureBitset InlineFeatureIgnoreList = { 39 // This indicates the CPU is 64 bit capable not that we are in 64-bit 40 // mode. 41 X86::Feature64Bit, 42 43 // These features don't have any intrinsics or ABI effect. 44 X86::FeatureNOPL, 45 X86::FeatureCMPXCHG16B, 46 X86::FeatureLAHFSAHF, 47 48 // Codegen control options. 49 X86::FeatureFast11ByteNOP, 50 X86::FeatureFast15ByteNOP, 51 X86::FeatureFastBEXTR, 52 X86::FeatureFastHorizontalOps, 53 X86::FeatureFastLZCNT, 54 X86::FeatureFastPartialYMMorZMMWrite, 55 X86::FeatureFastScalarFSQRT, 56 X86::FeatureFastSHLDRotate, 57 X86::FeatureFastScalarShiftMasks, 58 X86::FeatureFastVectorShiftMasks, 59 X86::FeatureFastVariableShuffle, 60 X86::FeatureFastVectorFSQRT, 61 X86::FeatureLEAForSP, 62 X86::FeatureLEAUsesAG, 63 X86::FeatureLZCNTFalseDeps, 64 X86::FeatureBranchFusion, 65 X86::FeatureMacroFusion, 66 X86::FeatureMergeToThreeWayBranch, 67 X86::FeaturePadShortFunctions, 68 X86::FeaturePOPCNTFalseDeps, 69 X86::FeatureSSEUnalignedMem, 70 X86::FeatureSlow3OpsLEA, 71 X86::FeatureSlowDivide32, 72 X86::FeatureSlowDivide64, 73 X86::FeatureSlowIncDec, 74 X86::FeatureSlowLEA, 75 X86::FeatureSlowPMADDWD, 76 X86::FeatureSlowPMULLD, 77 X86::FeatureSlowSHLD, 78 X86::FeatureSlowTwoMemOps, 79 X86::FeatureSlowUAMem16, 80 81 // Perf-tuning flags. 82 X86::FeatureHasFastGather, 83 X86::FeatureSlowUAMem32, 84 85 // Based on whether user set the -mprefer-vector-width command line. 86 X86::FeaturePrefer256Bit, 87 88 // CPU name enums. These just follow CPU string. 89 X86::ProcIntelAtom, 90 X86::ProcIntelGLM, 91 X86::ProcIntelGLP, 92 X86::ProcIntelSLM, 93 X86::ProcIntelTRM, 94 }; 95 96 public: 97 explicit X86TTIImpl(const X86TargetMachine *TM, const Function &F) 98 : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)), 99 TLI(ST->getTargetLowering()) {} 100 101 /// \name Scalar TTI Implementations 102 /// @{ 103 TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth); 104 105 /// @} 106 107 /// \name Cache TTI Implementation 108 /// @{ 109 llvm::Optional<unsigned> getCacheSize( 110 TargetTransformInfo::CacheLevel Level) const; 111 llvm::Optional<unsigned> getCacheAssociativity( 112 TargetTransformInfo::CacheLevel Level) const; 113 /// @} 114 115 /// \name Vector TTI Implementations 116 /// @{ 117 118 unsigned getNumberOfRegisters(bool Vector); 119 unsigned getRegisterBitWidth(bool Vector) const; 120 unsigned getLoadStoreVecRegBitWidth(unsigned AS) const; 121 unsigned getMaxInterleaveFactor(unsigned VF); 122 int getArithmeticInstrCost( 123 unsigned Opcode, Type *Ty, 124 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue, 125 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue, 126 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None, 127 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None, 128 ArrayRef<const Value *> Args = ArrayRef<const Value *>()); 129 int getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp); 130 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 131 const Instruction *I = nullptr); 132 int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 133 const Instruction *I = nullptr); 134 int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index); 135 int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 136 unsigned AddressSpace, const Instruction *I = nullptr); 137 int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 138 unsigned AddressSpace); 139 int getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr, 140 bool VariableMask, unsigned Alignment); 141 int getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, 142 const SCEV *Ptr); 143 144 unsigned getAtomicMemIntrinsicMaxElementSize() const; 145 146 int getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 147 ArrayRef<Type *> Tys, FastMathFlags FMF, 148 unsigned ScalarizationCostPassed = UINT_MAX); 149 int getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 150 ArrayRef<Value *> Args, FastMathFlags FMF, 151 unsigned VF = 1); 152 153 int getArithmeticReductionCost(unsigned Opcode, Type *Ty, 154 bool IsPairwiseForm); 155 156 int getMinMaxReductionCost(Type *Ty, Type *CondTy, bool IsPairwiseForm, 157 bool IsUnsigned); 158 159 int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 160 unsigned Factor, ArrayRef<unsigned> Indices, 161 unsigned Alignment, unsigned AddressSpace, 162 bool UseMaskForCond = false, 163 bool UseMaskForGaps = false); 164 int getInterleavedMemoryOpCostAVX512(unsigned Opcode, Type *VecTy, 165 unsigned Factor, ArrayRef<unsigned> Indices, 166 unsigned Alignment, unsigned AddressSpace, 167 bool UseMaskForCond = false, 168 bool UseMaskForGaps = false); 169 int getInterleavedMemoryOpCostAVX2(unsigned Opcode, Type *VecTy, 170 unsigned Factor, ArrayRef<unsigned> Indices, 171 unsigned Alignment, unsigned AddressSpace, 172 bool UseMaskForCond = false, 173 bool UseMaskForGaps = false); 174 175 int getIntImmCost(int64_t); 176 177 int getIntImmCost(const APInt &Imm, Type *Ty); 178 179 unsigned getUserCost(const User *U, ArrayRef<const Value *> Operands); 180 181 int getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty); 182 int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 183 Type *Ty); 184 bool isLSRCostLess(TargetTransformInfo::LSRCost &C1, 185 TargetTransformInfo::LSRCost &C2); 186 bool canMacroFuseCmp(); 187 bool isLegalMaskedLoad(Type *DataType); 188 bool isLegalMaskedStore(Type *DataType); 189 bool isLegalNTLoad(Type *DataType, unsigned Alignment); 190 bool isLegalNTStore(Type *DataType, unsigned Alignment); 191 bool isLegalMaskedGather(Type *DataType); 192 bool isLegalMaskedScatter(Type *DataType); 193 bool isLegalMaskedExpandLoad(Type *DataType); 194 bool isLegalMaskedCompressStore(Type *DataType); 195 bool hasDivRemOp(Type *DataType, bool IsSigned); 196 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty); 197 bool areInlineCompatible(const Function *Caller, 198 const Function *Callee) const; 199 bool areFunctionArgsABICompatible(const Function *Caller, 200 const Function *Callee, 201 SmallPtrSetImpl<Argument *> &Args) const; 202 TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, 203 bool IsZeroCmp) const; 204 bool enableInterleavedAccessVectorization(); 205 private: 206 int getGSScalarCost(unsigned Opcode, Type *DataTy, bool VariableMask, 207 unsigned Alignment, unsigned AddressSpace); 208 int getGSVectorCost(unsigned Opcode, Type *DataTy, Value *Ptr, 209 unsigned Alignment, unsigned AddressSpace); 210 211 /// @} 212 }; 213 214 } // end namespace llvm 215 216 #endif 217