1 //===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 /// This file implements a TargetTransformInfo analysis pass specific to the
9 /// Hexagon target machine. It uses the target's detailed information to provide
10 /// more precise answers to certain TTI queries, while letting the target
11 /// independent and default TTI implementations handle the rest.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "HexagonTargetTransformInfo.h"
16 #include "HexagonSubtarget.h"
17 #include "llvm/Analysis/TargetTransformInfo.h"
18 #include "llvm/CodeGen/ValueTypes.h"
19 #include "llvm/IR/InstrTypes.h"
20 #include "llvm/IR/Instructions.h"
21 #include "llvm/IR/User.h"
22 #include "llvm/Support/Casting.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Transforms/Utils/LoopPeel.h"
25 #include "llvm/Transforms/Utils/UnrollLoop.h"
26 
27 using namespace llvm;
28 
29 #define DEBUG_TYPE "hexagontti"
30 
31 static cl::opt<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
32   cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));
33 
34 static cl::opt<bool> EmitLookupTables("hexagon-emit-lookup-tables",
35   cl::init(true), cl::Hidden,
36   cl::desc("Control lookup table emission on Hexagon target"));
37 
38 static cl::opt<bool> HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true),
39   cl::Hidden, cl::desc("Enable masked loads/stores for HVX"));
40 
41 // Constant "cost factor" to make floating point operations more expensive
42 // in terms of vectorization cost. This isn't the best way, but it should
43 // do. Ultimately, the cost should use cycles.
44 static const unsigned FloatFactor = 4;
45 
46 bool HexagonTTIImpl::useHVX() const {
47   return ST.useHVXOps() && HexagonAutoHVX;
48 }
49 
50 unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const {
51   if (auto *VTy = dyn_cast<FixedVectorType>(Ty))
52     return VTy->getNumElements();
53   assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
54          "Expecting scalar type");
55   return 1;
56 }
57 
58 TargetTransformInfo::PopcntSupportKind
59 HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
60   // Return fast hardware support as every input < 64 bits will be promoted
61   // to 64 bits.
62   return TargetTransformInfo::PSK_FastHardware;
63 }
64 
65 // The Hexagon target can unroll loops with run-time trip counts.
66 void HexagonTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
67                                              TTI::UnrollingPreferences &UP) {
68   UP.Runtime = UP.Partial = true;
69 }
70 
71 void HexagonTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
72                                            TTI::PeelingPreferences &PP) {
73   BaseT::getPeelingPreferences(L, SE, PP);
74   // Only try to peel innermost loops with small runtime trip counts.
75   if (L && L->isInnermost() && canPeel(L) &&
76       SE.getSmallConstantTripCount(L) == 0 &&
77       SE.getSmallConstantMaxTripCount(L) > 0 &&
78       SE.getSmallConstantMaxTripCount(L) <= 5) {
79     PP.PeelCount = 2;
80   }
81 }
82 
83 TTI::AddressingModeKind
84 HexagonTTIImpl::getPreferredAddressingMode(const Loop *L,
85                                            ScalarEvolution *SE) const {
86   return TTI::AMK_PostIndexed;
87 }
88 
89 /// --- Vector TTI begin ---
90 
91 unsigned HexagonTTIImpl::getNumberOfRegisters(bool Vector) const {
92   if (Vector)
93     return useHVX() ? 32 : 0;
94   return 32;
95 }
96 
97 unsigned HexagonTTIImpl::getMaxInterleaveFactor(unsigned VF) {
98   return useHVX() ? 2 : 1;
99 }
100 
101 TypeSize
102 HexagonTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
103   switch (K) {
104   case TargetTransformInfo::RGK_Scalar:
105     return TypeSize::getFixed(32);
106   case TargetTransformInfo::RGK_FixedWidthVector:
107     return TypeSize::getFixed(getMinVectorRegisterBitWidth());
108   case TargetTransformInfo::RGK_ScalableVector:
109     return TypeSize::getScalable(0);
110   }
111 
112   llvm_unreachable("Unsupported register kind");
113 }
114 
115 unsigned HexagonTTIImpl::getMinVectorRegisterBitWidth() const {
116   return useHVX() ? ST.getVectorLength()*8 : 32;
117 }
118 
119 ElementCount HexagonTTIImpl::getMinimumVF(unsigned ElemWidth,
120                                           bool IsScalable) const {
121   assert(!IsScalable && "Scalable VFs are not supported for Hexagon");
122   return ElementCount::getFixed((8 * ST.getVectorLength()) / ElemWidth);
123 }
124 
125 InstructionCost HexagonTTIImpl::getScalarizationOverhead(
126     VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract) {
127   return BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
128 }
129 
130 InstructionCost
131 HexagonTTIImpl::getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
132                                                  ArrayRef<Type *> Tys) {
133   return BaseT::getOperandsScalarizationOverhead(Args, Tys);
134 }
135 
136 InstructionCost HexagonTTIImpl::getCallInstrCost(Function *F, Type *RetTy,
137                                                  ArrayRef<Type *> Tys,
138                                                  TTI::TargetCostKind CostKind) {
139   return BaseT::getCallInstrCost(F, RetTy, Tys, CostKind);
140 }
141 
142 InstructionCost
143 HexagonTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
144                                       TTI::TargetCostKind CostKind) {
145   if (ICA.getID() == Intrinsic::bswap) {
146     std::pair<InstructionCost, MVT> LT =
147         TLI.getTypeLegalizationCost(DL, ICA.getReturnType());
148     return LT.first + 2;
149   }
150   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
151 }
152 
153 InstructionCost HexagonTTIImpl::getAddressComputationCost(Type *Tp,
154                                                           ScalarEvolution *SE,
155                                                           const SCEV *S) {
156   return 0;
157 }
158 
159 InstructionCost HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
160                                                 MaybeAlign Alignment,
161                                                 unsigned AddressSpace,
162                                                 TTI::TargetCostKind CostKind,
163                                                 const Instruction *I) {
164   assert(Opcode == Instruction::Load || Opcode == Instruction::Store);
165   // TODO: Handle other cost kinds.
166   if (CostKind != TTI::TCK_RecipThroughput)
167     return 1;
168 
169   if (Opcode == Instruction::Store)
170     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
171                                   CostKind, I);
172 
173   if (Src->isVectorTy()) {
174     VectorType *VecTy = cast<VectorType>(Src);
175     unsigned VecWidth = VecTy->getPrimitiveSizeInBits().getFixedSize();
176     if (useHVX() && ST.isTypeForHVX(VecTy)) {
177       unsigned RegWidth =
178           getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
179               .getFixedSize();
180       assert(RegWidth && "Non-zero vector register width expected");
181       // Cost of HVX loads.
182       if (VecWidth % RegWidth == 0)
183         return VecWidth / RegWidth;
184       // Cost of constructing HVX vector from scalar loads
185       const Align RegAlign(RegWidth / 8);
186       if (!Alignment || *Alignment > RegAlign)
187         Alignment = RegAlign;
188       assert(Alignment);
189       unsigned AlignWidth = 8 * Alignment->value();
190       unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
191       return 3 * NumLoads;
192     }
193 
194     // Non-HVX vectors.
195     // Add extra cost for floating point types.
196     unsigned Cost =
197         VecTy->getElementType()->isFloatingPointTy() ? FloatFactor : 1;
198 
199     // At this point unspecified alignment is considered as Align(1).
200     const Align BoundAlignment = std::min(Alignment.valueOrOne(), Align(8));
201     unsigned AlignWidth = 8 * BoundAlignment.value();
202     unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
203     if (Alignment == Align(4) || Alignment == Align(8))
204       return Cost * NumLoads;
205     // Loads of less than 32 bits will need extra inserts to compose a vector.
206     assert(BoundAlignment <= Align(8));
207     unsigned LogA = Log2(BoundAlignment);
208     return (3 - LogA) * Cost * NumLoads;
209   }
210 
211   return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
212                                 CostKind, I);
213 }
214 
215 InstructionCost
216 HexagonTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
217                                       Align Alignment, unsigned AddressSpace,
218                                       TTI::TargetCostKind CostKind) {
219   return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
220                                       CostKind);
221 }
222 
223 InstructionCost HexagonTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
224                                                ArrayRef<int> Mask, int Index,
225                                                Type *SubTp) {
226   return 1;
227 }
228 
229 InstructionCost HexagonTTIImpl::getGatherScatterOpCost(
230     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
231     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
232   return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
233                                        Alignment, CostKind, I);
234 }
235 
236 InstructionCost HexagonTTIImpl::getInterleavedMemoryOpCost(
237     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
238     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
239     bool UseMaskForCond, bool UseMaskForGaps) {
240   if (Indices.size() != Factor || UseMaskForCond || UseMaskForGaps)
241     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
242                                              Alignment, AddressSpace,
243                                              CostKind,
244                                              UseMaskForCond, UseMaskForGaps);
245   return getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), AddressSpace,
246                          CostKind);
247 }
248 
249 InstructionCost HexagonTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
250                                                    Type *CondTy,
251                                                    CmpInst::Predicate VecPred,
252                                                    TTI::TargetCostKind CostKind,
253                                                    const Instruction *I) {
254   if (ValTy->isVectorTy() && CostKind == TTI::TCK_RecipThroughput) {
255     std::pair<InstructionCost, MVT> LT = TLI.getTypeLegalizationCost(DL, ValTy);
256     if (Opcode == Instruction::FCmp)
257       return LT.first + FloatFactor * getTypeNumElements(ValTy);
258   }
259   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
260 }
261 
262 InstructionCost HexagonTTIImpl::getArithmeticInstrCost(
263     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
264     TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info,
265     TTI::OperandValueProperties Opd1PropInfo,
266     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
267     const Instruction *CxtI) {
268   // TODO: Handle more cost kinds.
269   if (CostKind != TTI::TCK_RecipThroughput)
270     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
271                                          Opd2Info, Opd1PropInfo,
272                                          Opd2PropInfo, Args, CxtI);
273 
274   if (Ty->isVectorTy()) {
275     std::pair<InstructionCost, MVT> LT = TLI.getTypeLegalizationCost(DL, Ty);
276     if (LT.second.isFloatingPoint())
277       return LT.first + FloatFactor * getTypeNumElements(Ty);
278   }
279   return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info,
280                                        Opd1PropInfo, Opd2PropInfo, Args, CxtI);
281 }
282 
283 InstructionCost HexagonTTIImpl::getCastInstrCost(unsigned Opcode, Type *DstTy,
284                                                  Type *SrcTy,
285                                                  TTI::CastContextHint CCH,
286                                                  TTI::TargetCostKind CostKind,
287                                                  const Instruction *I) {
288   if (SrcTy->isFPOrFPVectorTy() || DstTy->isFPOrFPVectorTy()) {
289     unsigned SrcN = SrcTy->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy) : 0;
290     unsigned DstN = DstTy->isFPOrFPVectorTy() ? getTypeNumElements(DstTy) : 0;
291 
292     std::pair<InstructionCost, MVT> SrcLT =
293         TLI.getTypeLegalizationCost(DL, SrcTy);
294     std::pair<InstructionCost, MVT> DstLT =
295         TLI.getTypeLegalizationCost(DL, DstTy);
296     InstructionCost Cost =
297         std::max(SrcLT.first, DstLT.first) + FloatFactor * (SrcN + DstN);
298     // TODO: Allow non-throughput costs that aren't binary.
299     if (CostKind != TTI::TCK_RecipThroughput)
300       return Cost == 0 ? 0 : 1;
301     return Cost;
302   }
303   return 1;
304 }
305 
306 InstructionCost HexagonTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
307                                                    unsigned Index) {
308   Type *ElemTy = Val->isVectorTy() ? cast<VectorType>(Val)->getElementType()
309                                    : Val;
310   if (Opcode == Instruction::InsertElement) {
311     // Need two rotations for non-zero index.
312     unsigned Cost = (Index != 0) ? 2 : 0;
313     if (ElemTy->isIntegerTy(32))
314       return Cost;
315     // If it's not a 32-bit value, there will need to be an extract.
316     return Cost + getVectorInstrCost(Instruction::ExtractElement, Val, Index);
317   }
318 
319   if (Opcode == Instruction::ExtractElement)
320     return 2;
321 
322   return 1;
323 }
324 
325 bool HexagonTTIImpl::isLegalMaskedStore(Type *DataType, Align /*Alignment*/) {
326   return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
327 }
328 
329 bool HexagonTTIImpl::isLegalMaskedLoad(Type *DataType, Align /*Alignment*/) {
330   return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
331 }
332 
333 /// --- Vector TTI end ---
334 
335 unsigned HexagonTTIImpl::getPrefetchDistance() const {
336   return ST.getL1PrefetchDistance();
337 }
338 
339 unsigned HexagonTTIImpl::getCacheLineSize() const {
340   return ST.getL1CacheLineSize();
341 }
342 
343 InstructionCost HexagonTTIImpl::getUserCost(const User *U,
344                                             ArrayRef<const Value *> Operands,
345                                             TTI::TargetCostKind CostKind) {
346   auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
347     if (!CI->isIntegerCast())
348       return false;
349     // Only extensions from an integer type shorter than 32-bit to i32
350     // can be folded into the load.
351     const DataLayout &DL = getDataLayout();
352     unsigned SBW = DL.getTypeSizeInBits(CI->getSrcTy());
353     unsigned DBW = DL.getTypeSizeInBits(CI->getDestTy());
354     if (DBW != 32 || SBW >= DBW)
355       return false;
356 
357     const LoadInst *LI = dyn_cast<const LoadInst>(CI->getOperand(0));
358     // Technically, this code could allow multiple uses of the load, and
359     // check if all the uses are the same extension operation, but this
360     // should be sufficient for most cases.
361     return LI && LI->hasOneUse();
362   };
363 
364   if (const CastInst *CI = dyn_cast<const CastInst>(U))
365     if (isCastFoldedIntoLoad(CI))
366       return TargetTransformInfo::TCC_Free;
367   return BaseT::getUserCost(U, Operands, CostKind);
368 }
369 
370 bool HexagonTTIImpl::shouldBuildLookupTables() const {
371   return EmitLookupTables;
372 }
373