1 //===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 /// This file implements a TargetTransformInfo analysis pass specific to the
9 /// Hexagon target machine. It uses the target's detailed information to provide
10 /// more precise answers to certain TTI queries, while letting the target
11 /// independent and default TTI implementations handle the rest.
12 ///
13 //===----------------------------------------------------------------------===//
14
15 #include "HexagonTargetTransformInfo.h"
16 #include "HexagonSubtarget.h"
17 #include "llvm/Analysis/TargetTransformInfo.h"
18 #include "llvm/CodeGen/ValueTypes.h"
19 #include "llvm/IR/InstrTypes.h"
20 #include "llvm/IR/Instructions.h"
21 #include "llvm/IR/User.h"
22 #include "llvm/Support/Casting.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Transforms/Utils/UnrollLoop.h"
25
26 using namespace llvm;
27
28 #define DEBUG_TYPE "hexagontti"
29
30 static cl::opt<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
31 cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));
32
33 static cl::opt<bool> EmitLookupTables("hexagon-emit-lookup-tables",
34 cl::init(true), cl::Hidden,
35 cl::desc("Control lookup table emission on Hexagon target"));
36
37 // Constant "cost factor" to make floating point operations more expensive
38 // in terms of vectorization cost. This isn't the best way, but it should
39 // do. Ultimately, the cost should use cycles.
40 static const unsigned FloatFactor = 4;
41
useHVX() const42 bool HexagonTTIImpl::useHVX() const {
43 return ST.useHVXOps() && HexagonAutoHVX;
44 }
45
isTypeForHVX(Type * VecTy) const46 bool HexagonTTIImpl::isTypeForHVX(Type *VecTy) const {
47 assert(VecTy->isVectorTy());
48 if (cast<VectorType>(VecTy)->isScalable())
49 return false;
50 // Avoid types like <2 x i32*>.
51 if (!cast<VectorType>(VecTy)->getElementType()->isIntegerTy())
52 return false;
53 EVT VecVT = EVT::getEVT(VecTy);
54 if (!VecVT.isSimple() || VecVT.getSizeInBits() <= 64)
55 return false;
56 if (ST.isHVXVectorType(VecVT.getSimpleVT()))
57 return true;
58 auto Action = TLI.getPreferredVectorAction(VecVT.getSimpleVT());
59 return Action == TargetLoweringBase::TypeWidenVector;
60 }
61
getTypeNumElements(Type * Ty) const62 unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const {
63 if (Ty->isVectorTy())
64 return Ty->getVectorNumElements();
65 assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
66 "Expecting scalar type");
67 return 1;
68 }
69
70 TargetTransformInfo::PopcntSupportKind
getPopcntSupport(unsigned IntTyWidthInBit) const71 HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
72 // Return fast hardware support as every input < 64 bits will be promoted
73 // to 64 bits.
74 return TargetTransformInfo::PSK_FastHardware;
75 }
76
77 // The Hexagon target can unroll loops with run-time trip counts.
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,TTI::UnrollingPreferences & UP)78 void HexagonTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
79 TTI::UnrollingPreferences &UP) {
80 UP.Runtime = UP.Partial = true;
81 // Only try to peel innermost loops with small runtime trip counts.
82 if (L && L->empty() && canPeel(L) &&
83 SE.getSmallConstantTripCount(L) == 0 &&
84 SE.getSmallConstantMaxTripCount(L) > 0 &&
85 SE.getSmallConstantMaxTripCount(L) <= 5) {
86 UP.PeelCount = 2;
87 }
88 }
89
shouldFavorPostInc() const90 bool HexagonTTIImpl::shouldFavorPostInc() const {
91 return true;
92 }
93
94 /// --- Vector TTI begin ---
95
getNumberOfRegisters(bool Vector) const96 unsigned HexagonTTIImpl::getNumberOfRegisters(bool Vector) const {
97 if (Vector)
98 return useHVX() ? 32 : 0;
99 return 32;
100 }
101
getMaxInterleaveFactor(unsigned VF)102 unsigned HexagonTTIImpl::getMaxInterleaveFactor(unsigned VF) {
103 return useHVX() ? 2 : 0;
104 }
105
getRegisterBitWidth(bool Vector) const106 unsigned HexagonTTIImpl::getRegisterBitWidth(bool Vector) const {
107 return Vector ? getMinVectorRegisterBitWidth() : 32;
108 }
109
getMinVectorRegisterBitWidth() const110 unsigned HexagonTTIImpl::getMinVectorRegisterBitWidth() const {
111 return useHVX() ? ST.getVectorLength()*8 : 0;
112 }
113
getMinimumVF(unsigned ElemWidth) const114 unsigned HexagonTTIImpl::getMinimumVF(unsigned ElemWidth) const {
115 return (8 * ST.getVectorLength()) / ElemWidth;
116 }
117
getScalarizationOverhead(Type * Ty,bool Insert,bool Extract)118 unsigned HexagonTTIImpl::getScalarizationOverhead(Type *Ty, bool Insert,
119 bool Extract) {
120 return BaseT::getScalarizationOverhead(Ty, Insert, Extract);
121 }
122
getOperandsScalarizationOverhead(ArrayRef<const Value * > Args,unsigned VF)123 unsigned HexagonTTIImpl::getOperandsScalarizationOverhead(
124 ArrayRef<const Value*> Args, unsigned VF) {
125 return BaseT::getOperandsScalarizationOverhead(Args, VF);
126 }
127
getCallInstrCost(Function * F,Type * RetTy,ArrayRef<Type * > Tys)128 unsigned HexagonTTIImpl::getCallInstrCost(Function *F, Type *RetTy,
129 ArrayRef<Type*> Tys) {
130 return BaseT::getCallInstrCost(F, RetTy, Tys);
131 }
132
getIntrinsicInstrCost(Intrinsic::ID ID,Type * RetTy,ArrayRef<Value * > Args,FastMathFlags FMF,unsigned VF)133 unsigned HexagonTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
134 ArrayRef<Value*> Args, FastMathFlags FMF, unsigned VF) {
135 return BaseT::getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF);
136 }
137
getIntrinsicInstrCost(Intrinsic::ID ID,Type * RetTy,ArrayRef<Type * > Tys,FastMathFlags FMF,unsigned ScalarizationCostPassed)138 unsigned HexagonTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
139 ArrayRef<Type*> Tys, FastMathFlags FMF,
140 unsigned ScalarizationCostPassed) {
141 if (ID == Intrinsic::bswap) {
142 std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, RetTy);
143 return LT.first + 2;
144 }
145 return BaseT::getIntrinsicInstrCost(ID, RetTy, Tys, FMF,
146 ScalarizationCostPassed);
147 }
148
getAddressComputationCost(Type * Tp,ScalarEvolution * SE,const SCEV * S)149 unsigned HexagonTTIImpl::getAddressComputationCost(Type *Tp,
150 ScalarEvolution *SE, const SCEV *S) {
151 return 0;
152 }
153
getMemoryOpCost(unsigned Opcode,Type * Src,MaybeAlign Alignment,unsigned AddressSpace,const Instruction * I)154 unsigned HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
155 MaybeAlign Alignment,
156 unsigned AddressSpace,
157 const Instruction *I) {
158 assert(Opcode == Instruction::Load || Opcode == Instruction::Store);
159 if (Opcode == Instruction::Store)
160 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, I);
161
162 if (Src->isVectorTy()) {
163 VectorType *VecTy = cast<VectorType>(Src);
164 unsigned VecWidth = VecTy->getBitWidth();
165 if (useHVX() && isTypeForHVX(VecTy)) {
166 unsigned RegWidth = getRegisterBitWidth(true);
167 assert(RegWidth && "Non-zero vector register width expected");
168 // Cost of HVX loads.
169 if (VecWidth % RegWidth == 0)
170 return VecWidth / RegWidth;
171 // Cost of constructing HVX vector from scalar loads
172 const Align RegAlign(RegWidth / 8);
173 if (!Alignment || *Alignment > RegAlign)
174 Alignment = RegAlign;
175 assert(Alignment);
176 unsigned AlignWidth = 8 * Alignment->value();
177 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
178 return 3 * NumLoads;
179 }
180
181 // Non-HVX vectors.
182 // Add extra cost for floating point types.
183 unsigned Cost =
184 VecTy->getElementType()->isFloatingPointTy() ? FloatFactor : 1;
185
186 // At this point unspecified alignment is considered as Align::None().
187 const Align BoundAlignment = std::min(Alignment.valueOrOne(), Align(8));
188 unsigned AlignWidth = 8 * BoundAlignment.value();
189 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
190 if (Alignment == Align(4) || Alignment == Align(8))
191 return Cost * NumLoads;
192 // Loads of less than 32 bits will need extra inserts to compose a vector.
193 assert(BoundAlignment <= Align(8));
194 unsigned LogA = Log2(BoundAlignment);
195 return (3 - LogA) * Cost * NumLoads;
196 }
197
198 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, I);
199 }
200
getMaskedMemoryOpCost(unsigned Opcode,Type * Src,unsigned Alignment,unsigned AddressSpace)201 unsigned HexagonTTIImpl::getMaskedMemoryOpCost(unsigned Opcode,
202 Type *Src, unsigned Alignment, unsigned AddressSpace) {
203 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
204 }
205
getShuffleCost(TTI::ShuffleKind Kind,Type * Tp,int Index,Type * SubTp)206 unsigned HexagonTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
207 int Index, Type *SubTp) {
208 return 1;
209 }
210
getGatherScatterOpCost(unsigned Opcode,Type * DataTy,Value * Ptr,bool VariableMask,unsigned Alignment)211 unsigned HexagonTTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
212 Value *Ptr, bool VariableMask, unsigned Alignment) {
213 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
214 Alignment);
215 }
216
getInterleavedMemoryOpCost(unsigned Opcode,Type * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,unsigned Alignment,unsigned AddressSpace,bool UseMaskForCond,bool UseMaskForGaps)217 unsigned HexagonTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode,
218 Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
219 unsigned Alignment, unsigned AddressSpace, bool UseMaskForCond,
220 bool UseMaskForGaps) {
221 if (Indices.size() != Factor || UseMaskForCond || UseMaskForGaps)
222 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
223 Alignment, AddressSpace,
224 UseMaskForCond, UseMaskForGaps);
225 return getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), AddressSpace,
226 nullptr);
227 }
228
getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy,const Instruction * I)229 unsigned HexagonTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
230 Type *CondTy, const Instruction *I) {
231 if (ValTy->isVectorTy()) {
232 std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, ValTy);
233 if (Opcode == Instruction::FCmp)
234 return LT.first + FloatFactor * getTypeNumElements(ValTy);
235 }
236 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
237 }
238
getArithmeticInstrCost(unsigned Opcode,Type * Ty,TTI::OperandValueKind Opd1Info,TTI::OperandValueKind Opd2Info,TTI::OperandValueProperties Opd1PropInfo,TTI::OperandValueProperties Opd2PropInfo,ArrayRef<const Value * > Args,const Instruction * CxtI)239 unsigned HexagonTTIImpl::getArithmeticInstrCost(
240 unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
241 TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
242 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
243 const Instruction *CxtI) {
244 if (Ty->isVectorTy()) {
245 std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, Ty);
246 if (LT.second.isFloatingPoint())
247 return LT.first + FloatFactor * getTypeNumElements(Ty);
248 }
249 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
250 Opd1PropInfo, Opd2PropInfo, Args, CxtI);
251 }
252
getCastInstrCost(unsigned Opcode,Type * DstTy,Type * SrcTy,const Instruction * I)253 unsigned HexagonTTIImpl::getCastInstrCost(unsigned Opcode, Type *DstTy,
254 Type *SrcTy, const Instruction *I) {
255 if (SrcTy->isFPOrFPVectorTy() || DstTy->isFPOrFPVectorTy()) {
256 unsigned SrcN = SrcTy->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy) : 0;
257 unsigned DstN = DstTy->isFPOrFPVectorTy() ? getTypeNumElements(DstTy) : 0;
258
259 std::pair<int, MVT> SrcLT = TLI.getTypeLegalizationCost(DL, SrcTy);
260 std::pair<int, MVT> DstLT = TLI.getTypeLegalizationCost(DL, DstTy);
261 return std::max(SrcLT.first, DstLT.first) + FloatFactor * (SrcN + DstN);
262 }
263 return 1;
264 }
265
getVectorInstrCost(unsigned Opcode,Type * Val,unsigned Index)266 unsigned HexagonTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
267 unsigned Index) {
268 Type *ElemTy = Val->isVectorTy() ? cast<VectorType>(Val)->getElementType()
269 : Val;
270 if (Opcode == Instruction::InsertElement) {
271 // Need two rotations for non-zero index.
272 unsigned Cost = (Index != 0) ? 2 : 0;
273 if (ElemTy->isIntegerTy(32))
274 return Cost;
275 // If it's not a 32-bit value, there will need to be an extract.
276 return Cost + getVectorInstrCost(Instruction::ExtractElement, Val, Index);
277 }
278
279 if (Opcode == Instruction::ExtractElement)
280 return 2;
281
282 return 1;
283 }
284
285 /// --- Vector TTI end ---
286
getPrefetchDistance() const287 unsigned HexagonTTIImpl::getPrefetchDistance() const {
288 return ST.getL1PrefetchDistance();
289 }
290
getCacheLineSize() const291 unsigned HexagonTTIImpl::getCacheLineSize() const {
292 return ST.getL1CacheLineSize();
293 }
294
getUserCost(const User * U,ArrayRef<const Value * > Operands)295 int HexagonTTIImpl::getUserCost(const User *U,
296 ArrayRef<const Value *> Operands) {
297 auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
298 if (!CI->isIntegerCast())
299 return false;
300 // Only extensions from an integer type shorter than 32-bit to i32
301 // can be folded into the load.
302 const DataLayout &DL = getDataLayout();
303 unsigned SBW = DL.getTypeSizeInBits(CI->getSrcTy());
304 unsigned DBW = DL.getTypeSizeInBits(CI->getDestTy());
305 if (DBW != 32 || SBW >= DBW)
306 return false;
307
308 const LoadInst *LI = dyn_cast<const LoadInst>(CI->getOperand(0));
309 // Technically, this code could allow multiple uses of the load, and
310 // check if all the uses are the same extension operation, but this
311 // should be sufficient for most cases.
312 return LI && LI->hasOneUse();
313 };
314
315 if (const CastInst *CI = dyn_cast<const CastInst>(U))
316 if (isCastFoldedIntoLoad(CI))
317 return TargetTransformInfo::TCC_Free;
318 return BaseT::getUserCost(U, Operands);
319 }
320
shouldBuildLookupTables() const321 bool HexagonTTIImpl::shouldBuildLookupTables() const {
322 return EmitLookupTables;
323 }
324