1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "ARMTargetTransformInfo.h"
10 #include "ARMSubtarget.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "llvm/ADT/APInt.h"
13 #include "llvm/ADT/SmallVector.h"
14 #include "llvm/Analysis/LoopInfo.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/ISDOpcodes.h"
17 #include "llvm/CodeGen/MachineValueType.h"
18 #include "llvm/CodeGen/ValueTypes.h"
19 #include "llvm/IR/BasicBlock.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/DerivedTypes.h"
22 #include "llvm/IR/Instruction.h"
23 #include "llvm/IR/Instructions.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/Intrinsics.h"
26 #include "llvm/IR/IntrinsicsARM.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/IR/Type.h"
29 #include "llvm/Support/Casting.h"
30 #include "llvm/Support/KnownBits.h"
31 #include "llvm/Target/TargetMachine.h"
32 #include "llvm/TargetParser/SubtargetFeature.h"
33 #include "llvm/Transforms/InstCombine/InstCombiner.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 #include "llvm/Transforms/Utils/LoopUtils.h"
36 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
37 #include <algorithm>
38 #include <cassert>
39 #include <cstdint>
40 #include <optional>
41 #include <utility>
42
43 using namespace llvm;
44
45 #define DEBUG_TYPE "armtti"
46
47 static cl::opt<bool> EnableMaskedLoadStores(
48 "enable-arm-maskedldst", cl::Hidden, cl::init(true),
49 cl::desc("Enable the generation of masked loads and stores"));
50
51 static cl::opt<bool> DisableLowOverheadLoops(
52 "disable-arm-loloops", cl::Hidden, cl::init(false),
53 cl::desc("Disable the generation of low-overhead loops"));
54
55 static cl::opt<bool>
56 AllowWLSLoops("allow-arm-wlsloops", cl::Hidden, cl::init(true),
57 cl::desc("Enable the generation of WLS loops"));
58
59 extern cl::opt<TailPredication::Mode> EnableTailPredication;
60
61 extern cl::opt<bool> EnableMaskedGatherScatters;
62
63 extern cl::opt<unsigned> MVEMaxSupportedInterleaveFactor;
64
65 /// Convert a vector load intrinsic into a simple llvm load instruction.
66 /// This is beneficial when the underlying object being addressed comes
67 /// from a constant, since we get constant-folding for free.
simplifyNeonVld1(const IntrinsicInst & II,unsigned MemAlign,InstCombiner::BuilderTy & Builder)68 static Value *simplifyNeonVld1(const IntrinsicInst &II, unsigned MemAlign,
69 InstCombiner::BuilderTy &Builder) {
70 auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1));
71
72 if (!IntrAlign)
73 return nullptr;
74
75 unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign
76 ? MemAlign
77 : IntrAlign->getLimitedValue();
78
79 if (!isPowerOf2_32(Alignment))
80 return nullptr;
81
82 auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
83 PointerType::get(II.getType(), 0));
84 return Builder.CreateAlignedLoad(II.getType(), BCastInst, Align(Alignment));
85 }
86
areInlineCompatible(const Function * Caller,const Function * Callee) const87 bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
88 const Function *Callee) const {
89 const TargetMachine &TM = getTLI()->getTargetMachine();
90 const FeatureBitset &CallerBits =
91 TM.getSubtargetImpl(*Caller)->getFeatureBits();
92 const FeatureBitset &CalleeBits =
93 TM.getSubtargetImpl(*Callee)->getFeatureBits();
94
95 // To inline a callee, all features not in the allowed list must match exactly.
96 bool MatchExact = (CallerBits & ~InlineFeaturesAllowed) ==
97 (CalleeBits & ~InlineFeaturesAllowed);
98 // For features in the allowed list, the callee's features must be a subset of
99 // the callers'.
100 bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeaturesAllowed) ==
101 (CalleeBits & InlineFeaturesAllowed);
102 return MatchExact && MatchSubset;
103 }
104
105 TTI::AddressingModeKind
getPreferredAddressingMode(const Loop * L,ScalarEvolution * SE) const106 ARMTTIImpl::getPreferredAddressingMode(const Loop *L,
107 ScalarEvolution *SE) const {
108 if (ST->hasMVEIntegerOps())
109 return TTI::AMK_PostIndexed;
110
111 if (L->getHeader()->getParent()->hasOptSize())
112 return TTI::AMK_None;
113
114 if (ST->isMClass() && ST->isThumb2() &&
115 L->getNumBlocks() == 1)
116 return TTI::AMK_PreIndexed;
117
118 return TTI::AMK_None;
119 }
120
121 std::optional<Instruction *>
instCombineIntrinsic(InstCombiner & IC,IntrinsicInst & II) const122 ARMTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
123 using namespace PatternMatch;
124 Intrinsic::ID IID = II.getIntrinsicID();
125 switch (IID) {
126 default:
127 break;
128 case Intrinsic::arm_neon_vld1: {
129 Align MemAlign =
130 getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
131 &IC.getAssumptionCache(), &IC.getDominatorTree());
132 if (Value *V = simplifyNeonVld1(II, MemAlign.value(), IC.Builder)) {
133 return IC.replaceInstUsesWith(II, V);
134 }
135 break;
136 }
137
138 case Intrinsic::arm_neon_vld2:
139 case Intrinsic::arm_neon_vld3:
140 case Intrinsic::arm_neon_vld4:
141 case Intrinsic::arm_neon_vld2lane:
142 case Intrinsic::arm_neon_vld3lane:
143 case Intrinsic::arm_neon_vld4lane:
144 case Intrinsic::arm_neon_vst1:
145 case Intrinsic::arm_neon_vst2:
146 case Intrinsic::arm_neon_vst3:
147 case Intrinsic::arm_neon_vst4:
148 case Intrinsic::arm_neon_vst2lane:
149 case Intrinsic::arm_neon_vst3lane:
150 case Intrinsic::arm_neon_vst4lane: {
151 Align MemAlign =
152 getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
153 &IC.getAssumptionCache(), &IC.getDominatorTree());
154 unsigned AlignArg = II.arg_size() - 1;
155 Value *AlignArgOp = II.getArgOperand(AlignArg);
156 MaybeAlign Align = cast<ConstantInt>(AlignArgOp)->getMaybeAlignValue();
157 if (Align && *Align < MemAlign) {
158 return IC.replaceOperand(
159 II, AlignArg,
160 ConstantInt::get(Type::getInt32Ty(II.getContext()), MemAlign.value(),
161 false));
162 }
163 break;
164 }
165
166 case Intrinsic::arm_mve_pred_i2v: {
167 Value *Arg = II.getArgOperand(0);
168 Value *ArgArg;
169 if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
170 PatternMatch::m_Value(ArgArg))) &&
171 II.getType() == ArgArg->getType()) {
172 return IC.replaceInstUsesWith(II, ArgArg);
173 }
174 Constant *XorMask;
175 if (match(Arg, m_Xor(PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
176 PatternMatch::m_Value(ArgArg)),
177 PatternMatch::m_Constant(XorMask))) &&
178 II.getType() == ArgArg->getType()) {
179 if (auto *CI = dyn_cast<ConstantInt>(XorMask)) {
180 if (CI->getValue().trunc(16).isAllOnes()) {
181 auto TrueVector = IC.Builder.CreateVectorSplat(
182 cast<FixedVectorType>(II.getType())->getNumElements(),
183 IC.Builder.getTrue());
184 return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector);
185 }
186 }
187 }
188 KnownBits ScalarKnown(32);
189 if (IC.SimplifyDemandedBits(&II, 0, APInt::getLowBitsSet(32, 16),
190 ScalarKnown, 0)) {
191 return &II;
192 }
193 break;
194 }
195 case Intrinsic::arm_mve_pred_v2i: {
196 Value *Arg = II.getArgOperand(0);
197 Value *ArgArg;
198 if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_i2v>(
199 PatternMatch::m_Value(ArgArg)))) {
200 return IC.replaceInstUsesWith(II, ArgArg);
201 }
202 if (!II.getMetadata(LLVMContext::MD_range)) {
203 Type *IntTy32 = Type::getInt32Ty(II.getContext());
204 Metadata *M[] = {
205 ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0)),
206 ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0x10000))};
207 II.setMetadata(LLVMContext::MD_range, MDNode::get(II.getContext(), M));
208 II.setMetadata(LLVMContext::MD_noundef,
209 MDNode::get(II.getContext(), std::nullopt));
210 return &II;
211 }
212 break;
213 }
214 case Intrinsic::arm_mve_vadc:
215 case Intrinsic::arm_mve_vadc_predicated: {
216 unsigned CarryOp =
217 (II.getIntrinsicID() == Intrinsic::arm_mve_vadc_predicated) ? 3 : 2;
218 assert(II.getArgOperand(CarryOp)->getType()->getScalarSizeInBits() == 32 &&
219 "Bad type for intrinsic!");
220
221 KnownBits CarryKnown(32);
222 if (IC.SimplifyDemandedBits(&II, CarryOp, APInt::getOneBitSet(32, 29),
223 CarryKnown)) {
224 return &II;
225 }
226 break;
227 }
228 case Intrinsic::arm_mve_vmldava: {
229 Instruction *I = cast<Instruction>(&II);
230 if (I->hasOneUse()) {
231 auto *User = cast<Instruction>(*I->user_begin());
232 Value *OpZ;
233 if (match(User, m_c_Add(m_Specific(I), m_Value(OpZ))) &&
234 match(I->getOperand(3), m_Zero())) {
235 Value *OpX = I->getOperand(4);
236 Value *OpY = I->getOperand(5);
237 Type *OpTy = OpX->getType();
238
239 IC.Builder.SetInsertPoint(User);
240 Value *V =
241 IC.Builder.CreateIntrinsic(Intrinsic::arm_mve_vmldava, {OpTy},
242 {I->getOperand(0), I->getOperand(1),
243 I->getOperand(2), OpZ, OpX, OpY});
244
245 IC.replaceInstUsesWith(*User, V);
246 return IC.eraseInstFromFunction(*User);
247 }
248 }
249 return std::nullopt;
250 }
251 }
252 return std::nullopt;
253 }
254
simplifyDemandedVectorEltsIntrinsic(InstCombiner & IC,IntrinsicInst & II,APInt OrigDemandedElts,APInt & UndefElts,APInt & UndefElts2,APInt & UndefElts3,std::function<void (Instruction *,unsigned,APInt,APInt &)> SimplifyAndSetOp) const255 std::optional<Value *> ARMTTIImpl::simplifyDemandedVectorEltsIntrinsic(
256 InstCombiner &IC, IntrinsicInst &II, APInt OrigDemandedElts,
257 APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3,
258 std::function<void(Instruction *, unsigned, APInt, APInt &)>
259 SimplifyAndSetOp) const {
260
261 // Compute the demanded bits for a narrowing MVE intrinsic. The TopOpc is the
262 // opcode specifying a Top/Bottom instruction, which can change between
263 // instructions.
264 auto SimplifyNarrowInstrTopBottom =[&](unsigned TopOpc) {
265 unsigned NumElts = cast<FixedVectorType>(II.getType())->getNumElements();
266 unsigned IsTop = cast<ConstantInt>(II.getOperand(TopOpc))->getZExtValue();
267
268 // The only odd/even lanes of operand 0 will only be demanded depending
269 // on whether this is a top/bottom instruction.
270 APInt DemandedElts =
271 APInt::getSplat(NumElts, IsTop ? APInt::getLowBitsSet(2, 1)
272 : APInt::getHighBitsSet(2, 1));
273 SimplifyAndSetOp(&II, 0, OrigDemandedElts & DemandedElts, UndefElts);
274 // The other lanes will be defined from the inserted elements.
275 UndefElts &= APInt::getSplat(NumElts, IsTop ? APInt::getLowBitsSet(2, 1)
276 : APInt::getHighBitsSet(2, 1));
277 return std::nullopt;
278 };
279
280 switch (II.getIntrinsicID()) {
281 default:
282 break;
283 case Intrinsic::arm_mve_vcvt_narrow:
284 SimplifyNarrowInstrTopBottom(2);
285 break;
286 case Intrinsic::arm_mve_vqmovn:
287 SimplifyNarrowInstrTopBottom(4);
288 break;
289 case Intrinsic::arm_mve_vshrn:
290 SimplifyNarrowInstrTopBottom(7);
291 break;
292 }
293
294 return std::nullopt;
295 }
296
getIntImmCost(const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind)297 InstructionCost ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
298 TTI::TargetCostKind CostKind) {
299 assert(Ty->isIntegerTy());
300
301 unsigned Bits = Ty->getPrimitiveSizeInBits();
302 if (Bits == 0 || Imm.getActiveBits() >= 64)
303 return 4;
304
305 int64_t SImmVal = Imm.getSExtValue();
306 uint64_t ZImmVal = Imm.getZExtValue();
307 if (!ST->isThumb()) {
308 if ((SImmVal >= 0 && SImmVal < 65536) ||
309 (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
310 (ARM_AM::getSOImmVal(~ZImmVal) != -1))
311 return 1;
312 return ST->hasV6T2Ops() ? 2 : 3;
313 }
314 if (ST->isThumb2()) {
315 if ((SImmVal >= 0 && SImmVal < 65536) ||
316 (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
317 (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
318 return 1;
319 return ST->hasV6T2Ops() ? 2 : 3;
320 }
321 // Thumb1, any i8 imm cost 1.
322 if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256))
323 return 1;
324 if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
325 return 2;
326 // Load from constantpool.
327 return 3;
328 }
329
330 // Constants smaller than 256 fit in the immediate field of
331 // Thumb1 instructions so we return a zero cost and 1 otherwise.
getIntImmCodeSizeCost(unsigned Opcode,unsigned Idx,const APInt & Imm,Type * Ty)332 InstructionCost ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
333 const APInt &Imm, Type *Ty) {
334 if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
335 return 0;
336
337 return 1;
338 }
339
340 // Checks whether Inst is part of a min(max()) or max(min()) pattern
341 // that will match to an SSAT instruction. Returns the instruction being
342 // saturated, or null if no saturation pattern was found.
isSSATMinMaxPattern(Instruction * Inst,const APInt & Imm)343 static Value *isSSATMinMaxPattern(Instruction *Inst, const APInt &Imm) {
344 Value *LHS, *RHS;
345 ConstantInt *C;
346 SelectPatternFlavor InstSPF = matchSelectPattern(Inst, LHS, RHS).Flavor;
347
348 if (InstSPF == SPF_SMAX &&
349 PatternMatch::match(RHS, PatternMatch::m_ConstantInt(C)) &&
350 C->getValue() == Imm && Imm.isNegative() && Imm.isNegatedPowerOf2()) {
351
352 auto isSSatMin = [&](Value *MinInst) {
353 if (isa<SelectInst>(MinInst)) {
354 Value *MinLHS, *MinRHS;
355 ConstantInt *MinC;
356 SelectPatternFlavor MinSPF =
357 matchSelectPattern(MinInst, MinLHS, MinRHS).Flavor;
358 if (MinSPF == SPF_SMIN &&
359 PatternMatch::match(MinRHS, PatternMatch::m_ConstantInt(MinC)) &&
360 MinC->getValue() == ((-Imm) - 1))
361 return true;
362 }
363 return false;
364 };
365
366 if (isSSatMin(Inst->getOperand(1)))
367 return cast<Instruction>(Inst->getOperand(1))->getOperand(1);
368 if (Inst->hasNUses(2) &&
369 (isSSatMin(*Inst->user_begin()) || isSSatMin(*(++Inst->user_begin()))))
370 return Inst->getOperand(1);
371 }
372 return nullptr;
373 }
374
375 // Look for a FP Saturation pattern, where the instruction can be simplified to
376 // a fptosi.sat. max(min(fptosi)). The constant in this case is always free.
isFPSatMinMaxPattern(Instruction * Inst,const APInt & Imm)377 static bool isFPSatMinMaxPattern(Instruction *Inst, const APInt &Imm) {
378 if (Imm.getBitWidth() != 64 ||
379 Imm != APInt::getHighBitsSet(64, 33)) // -2147483648
380 return false;
381 Value *FP = isSSATMinMaxPattern(Inst, Imm);
382 if (!FP && isa<ICmpInst>(Inst) && Inst->hasOneUse())
383 FP = isSSATMinMaxPattern(cast<Instruction>(*Inst->user_begin()), Imm);
384 if (!FP)
385 return false;
386 return isa<FPToSIInst>(FP);
387 }
388
getIntImmCostInst(unsigned Opcode,unsigned Idx,const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind,Instruction * Inst)389 InstructionCost ARMTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
390 const APInt &Imm, Type *Ty,
391 TTI::TargetCostKind CostKind,
392 Instruction *Inst) {
393 // Division by a constant can be turned into multiplication, but only if we
394 // know it's constant. So it's not so much that the immediate is cheap (it's
395 // not), but that the alternative is worse.
396 // FIXME: this is probably unneeded with GlobalISel.
397 if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
398 Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
399 Idx == 1)
400 return 0;
401
402 // Leave any gep offsets for the CodeGenPrepare, which will do a better job at
403 // splitting any large offsets.
404 if (Opcode == Instruction::GetElementPtr && Idx != 0)
405 return 0;
406
407 if (Opcode == Instruction::And) {
408 // UXTB/UXTH
409 if (Imm == 255 || Imm == 65535)
410 return 0;
411 // Conversion to BIC is free, and means we can use ~Imm instead.
412 return std::min(getIntImmCost(Imm, Ty, CostKind),
413 getIntImmCost(~Imm, Ty, CostKind));
414 }
415
416 if (Opcode == Instruction::Add)
417 // Conversion to SUB is free, and means we can use -Imm instead.
418 return std::min(getIntImmCost(Imm, Ty, CostKind),
419 getIntImmCost(-Imm, Ty, CostKind));
420
421 if (Opcode == Instruction::ICmp && Imm.isNegative() &&
422 Ty->getIntegerBitWidth() == 32) {
423 int64_t NegImm = -Imm.getSExtValue();
424 if (ST->isThumb2() && NegImm < 1<<12)
425 // icmp X, #-C -> cmn X, #C
426 return 0;
427 if (ST->isThumb() && NegImm < 1<<8)
428 // icmp X, #-C -> adds X, #C
429 return 0;
430 }
431
432 // xor a, -1 can always be folded to MVN
433 if (Opcode == Instruction::Xor && Imm.isAllOnes())
434 return 0;
435
436 // Ensures negative constant of min(max()) or max(min()) patterns that
437 // match to SSAT instructions don't get hoisted
438 if (Inst && ((ST->hasV6Ops() && !ST->isThumb()) || ST->isThumb2()) &&
439 Ty->getIntegerBitWidth() <= 32) {
440 if (isSSATMinMaxPattern(Inst, Imm) ||
441 (isa<ICmpInst>(Inst) && Inst->hasOneUse() &&
442 isSSATMinMaxPattern(cast<Instruction>(*Inst->user_begin()), Imm)))
443 return 0;
444 }
445
446 if (Inst && ST->hasVFP2Base() && isFPSatMinMaxPattern(Inst, Imm))
447 return 0;
448
449 // We can convert <= -1 to < 0, which is generally quite cheap.
450 if (Inst && Opcode == Instruction::ICmp && Idx == 1 && Imm.isAllOnes()) {
451 ICmpInst::Predicate Pred = cast<ICmpInst>(Inst)->getPredicate();
452 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLE)
453 return std::min(getIntImmCost(Imm, Ty, CostKind),
454 getIntImmCost(Imm + 1, Ty, CostKind));
455 }
456
457 return getIntImmCost(Imm, Ty, CostKind);
458 }
459
getCFInstrCost(unsigned Opcode,TTI::TargetCostKind CostKind,const Instruction * I)460 InstructionCost ARMTTIImpl::getCFInstrCost(unsigned Opcode,
461 TTI::TargetCostKind CostKind,
462 const Instruction *I) {
463 if (CostKind == TTI::TCK_RecipThroughput &&
464 (ST->hasNEON() || ST->hasMVEIntegerOps())) {
465 // FIXME: The vectorizer is highly sensistive to the cost of these
466 // instructions, which suggests that it may be using the costs incorrectly.
467 // But, for now, just make them free to avoid performance regressions for
468 // vector targets.
469 return 0;
470 }
471 return BaseT::getCFInstrCost(Opcode, CostKind, I);
472 }
473
getCastInstrCost(unsigned Opcode,Type * Dst,Type * Src,TTI::CastContextHint CCH,TTI::TargetCostKind CostKind,const Instruction * I)474 InstructionCost ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
475 Type *Src,
476 TTI::CastContextHint CCH,
477 TTI::TargetCostKind CostKind,
478 const Instruction *I) {
479 int ISD = TLI->InstructionOpcodeToISD(Opcode);
480 assert(ISD && "Invalid opcode");
481
482 // TODO: Allow non-throughput costs that aren't binary.
483 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
484 if (CostKind != TTI::TCK_RecipThroughput)
485 return Cost == 0 ? 0 : 1;
486 return Cost;
487 };
488 auto IsLegalFPType = [this](EVT VT) {
489 EVT EltVT = VT.getScalarType();
490 return (EltVT == MVT::f32 && ST->hasVFP2Base()) ||
491 (EltVT == MVT::f64 && ST->hasFP64()) ||
492 (EltVT == MVT::f16 && ST->hasFullFP16());
493 };
494
495 EVT SrcTy = TLI->getValueType(DL, Src);
496 EVT DstTy = TLI->getValueType(DL, Dst);
497
498 if (!SrcTy.isSimple() || !DstTy.isSimple())
499 return AdjustCost(
500 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
501
502 // Extending masked load/Truncating masked stores is expensive because we
503 // currently don't split them. This means that we'll likely end up
504 // loading/storing each element individually (hence the high cost).
505 if ((ST->hasMVEIntegerOps() &&
506 (Opcode == Instruction::Trunc || Opcode == Instruction::ZExt ||
507 Opcode == Instruction::SExt)) ||
508 (ST->hasMVEFloatOps() &&
509 (Opcode == Instruction::FPExt || Opcode == Instruction::FPTrunc) &&
510 IsLegalFPType(SrcTy) && IsLegalFPType(DstTy)))
511 if (CCH == TTI::CastContextHint::Masked && DstTy.getSizeInBits() > 128)
512 return 2 * DstTy.getVectorNumElements() *
513 ST->getMVEVectorCostFactor(CostKind);
514
515 // The extend of other kinds of load is free
516 if (CCH == TTI::CastContextHint::Normal ||
517 CCH == TTI::CastContextHint::Masked) {
518 static const TypeConversionCostTblEntry LoadConversionTbl[] = {
519 {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0},
520 {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0},
521 {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0},
522 {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0},
523 {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0},
524 {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0},
525 {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1},
526 {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1},
527 {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1},
528 {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1},
529 {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1},
530 {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1},
531 };
532 if (const auto *Entry = ConvertCostTableLookup(
533 LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
534 return AdjustCost(Entry->Cost);
535
536 static const TypeConversionCostTblEntry MVELoadConversionTbl[] = {
537 {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0},
538 {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0},
539 {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0},
540 {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0},
541 {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0},
542 {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0},
543 // The following extend from a legal type to an illegal type, so need to
544 // split the load. This introduced an extra load operation, but the
545 // extend is still "free".
546 {ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1},
547 {ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1},
548 {ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 3},
549 {ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 3},
550 {ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1},
551 {ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1},
552 };
553 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
554 if (const auto *Entry =
555 ConvertCostTableLookup(MVELoadConversionTbl, ISD,
556 DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
557 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
558 }
559
560 static const TypeConversionCostTblEntry MVEFLoadConversionTbl[] = {
561 // FPExtends are similar but also require the VCVT instructions.
562 {ISD::FP_EXTEND, MVT::v4f32, MVT::v4f16, 1},
563 {ISD::FP_EXTEND, MVT::v8f32, MVT::v8f16, 3},
564 };
565 if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
566 if (const auto *Entry =
567 ConvertCostTableLookup(MVEFLoadConversionTbl, ISD,
568 DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
569 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
570 }
571
572 // The truncate of a store is free. This is the mirror of extends above.
573 static const TypeConversionCostTblEntry MVEStoreConversionTbl[] = {
574 {ISD::TRUNCATE, MVT::v4i32, MVT::v4i16, 0},
575 {ISD::TRUNCATE, MVT::v4i32, MVT::v4i8, 0},
576 {ISD::TRUNCATE, MVT::v8i16, MVT::v8i8, 0},
577 {ISD::TRUNCATE, MVT::v8i32, MVT::v8i16, 1},
578 {ISD::TRUNCATE, MVT::v8i32, MVT::v8i8, 1},
579 {ISD::TRUNCATE, MVT::v16i32, MVT::v16i8, 3},
580 {ISD::TRUNCATE, MVT::v16i16, MVT::v16i8, 1},
581 };
582 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
583 if (const auto *Entry =
584 ConvertCostTableLookup(MVEStoreConversionTbl, ISD,
585 SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
586 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
587 }
588
589 static const TypeConversionCostTblEntry MVEFStoreConversionTbl[] = {
590 {ISD::FP_ROUND, MVT::v4f32, MVT::v4f16, 1},
591 {ISD::FP_ROUND, MVT::v8f32, MVT::v8f16, 3},
592 };
593 if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
594 if (const auto *Entry =
595 ConvertCostTableLookup(MVEFStoreConversionTbl, ISD,
596 SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
597 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
598 }
599 }
600
601 // NEON vector operations that can extend their inputs.
602 if ((ISD == ISD::SIGN_EXTEND || ISD == ISD::ZERO_EXTEND) &&
603 I && I->hasOneUse() && ST->hasNEON() && SrcTy.isVector()) {
604 static const TypeConversionCostTblEntry NEONDoubleWidthTbl[] = {
605 // vaddl
606 { ISD::ADD, MVT::v4i32, MVT::v4i16, 0 },
607 { ISD::ADD, MVT::v8i16, MVT::v8i8, 0 },
608 // vsubl
609 { ISD::SUB, MVT::v4i32, MVT::v4i16, 0 },
610 { ISD::SUB, MVT::v8i16, MVT::v8i8, 0 },
611 // vmull
612 { ISD::MUL, MVT::v4i32, MVT::v4i16, 0 },
613 { ISD::MUL, MVT::v8i16, MVT::v8i8, 0 },
614 // vshll
615 { ISD::SHL, MVT::v4i32, MVT::v4i16, 0 },
616 { ISD::SHL, MVT::v8i16, MVT::v8i8, 0 },
617 };
618
619 auto *User = cast<Instruction>(*I->user_begin());
620 int UserISD = TLI->InstructionOpcodeToISD(User->getOpcode());
621 if (auto *Entry = ConvertCostTableLookup(NEONDoubleWidthTbl, UserISD,
622 DstTy.getSimpleVT(),
623 SrcTy.getSimpleVT())) {
624 return AdjustCost(Entry->Cost);
625 }
626 }
627
628 // Single to/from double precision conversions.
629 if (Src->isVectorTy() && ST->hasNEON() &&
630 ((ISD == ISD::FP_ROUND && SrcTy.getScalarType() == MVT::f64 &&
631 DstTy.getScalarType() == MVT::f32) ||
632 (ISD == ISD::FP_EXTEND && SrcTy.getScalarType() == MVT::f32 &&
633 DstTy.getScalarType() == MVT::f64))) {
634 static const CostTblEntry NEONFltDblTbl[] = {
635 // Vector fptrunc/fpext conversions.
636 {ISD::FP_ROUND, MVT::v2f64, 2},
637 {ISD::FP_EXTEND, MVT::v2f32, 2},
638 {ISD::FP_EXTEND, MVT::v4f32, 4}};
639
640 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
641 if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second))
642 return AdjustCost(LT.first * Entry->Cost);
643 }
644
645 // Some arithmetic, load and store operations have specific instructions
646 // to cast up/down their types automatically at no extra cost.
647 // TODO: Get these tables to know at least what the related operations are.
648 static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = {
649 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
650 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
651 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
652 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
653 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 },
654 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
655
656 // The number of vmovl instructions for the extension.
657 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
658 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
659 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
660 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
661 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 3 },
662 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 3 },
663 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
664 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
665 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
666 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
667 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
668 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
669 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
670 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
671 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
672 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
673 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
674 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
675
676 // Operations that we legalize using splitting.
677 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
678 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
679
680 // Vector float <-> i32 conversions.
681 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
682 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
683
684 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
685 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
686 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 },
687 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 },
688 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
689 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
690 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
691 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
692 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
693 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
694 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
695 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
696 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
697 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
698 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
699 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
700 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 },
701 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 },
702 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 },
703 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 },
704
705 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
706 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
707 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 },
708 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 },
709 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
710 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
711
712 // Vector double <-> i32 conversions.
713 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
714 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
715
716 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
717 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
718 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 },
719 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 },
720 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
721 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
722
723 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
724 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
725 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 },
726 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 },
727 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 8 },
728 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 8 }
729 };
730
731 if (SrcTy.isVector() && ST->hasNEON()) {
732 if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
733 DstTy.getSimpleVT(),
734 SrcTy.getSimpleVT()))
735 return AdjustCost(Entry->Cost);
736 }
737
738 // Scalar float to integer conversions.
739 static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = {
740 { ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 },
741 { ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 },
742 { ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 },
743 { ISD::FP_TO_UINT, MVT::i1, MVT::f64, 2 },
744 { ISD::FP_TO_SINT, MVT::i8, MVT::f32, 2 },
745 { ISD::FP_TO_UINT, MVT::i8, MVT::f32, 2 },
746 { ISD::FP_TO_SINT, MVT::i8, MVT::f64, 2 },
747 { ISD::FP_TO_UINT, MVT::i8, MVT::f64, 2 },
748 { ISD::FP_TO_SINT, MVT::i16, MVT::f32, 2 },
749 { ISD::FP_TO_UINT, MVT::i16, MVT::f32, 2 },
750 { ISD::FP_TO_SINT, MVT::i16, MVT::f64, 2 },
751 { ISD::FP_TO_UINT, MVT::i16, MVT::f64, 2 },
752 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 2 },
753 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 2 },
754 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 2 },
755 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 2 },
756 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 10 },
757 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 10 },
758 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 10 },
759 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 }
760 };
761 if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
762 if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
763 DstTy.getSimpleVT(),
764 SrcTy.getSimpleVT()))
765 return AdjustCost(Entry->Cost);
766 }
767
768 // Scalar integer to float conversions.
769 static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = {
770 { ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 },
771 { ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 },
772 { ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 },
773 { ISD::UINT_TO_FP, MVT::f64, MVT::i1, 2 },
774 { ISD::SINT_TO_FP, MVT::f32, MVT::i8, 2 },
775 { ISD::UINT_TO_FP, MVT::f32, MVT::i8, 2 },
776 { ISD::SINT_TO_FP, MVT::f64, MVT::i8, 2 },
777 { ISD::UINT_TO_FP, MVT::f64, MVT::i8, 2 },
778 { ISD::SINT_TO_FP, MVT::f32, MVT::i16, 2 },
779 { ISD::UINT_TO_FP, MVT::f32, MVT::i16, 2 },
780 { ISD::SINT_TO_FP, MVT::f64, MVT::i16, 2 },
781 { ISD::UINT_TO_FP, MVT::f64, MVT::i16, 2 },
782 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 2 },
783 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 2 },
784 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 2 },
785 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 2 },
786 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 10 },
787 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 10 },
788 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 10 },
789 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 10 }
790 };
791
792 if (SrcTy.isInteger() && ST->hasNEON()) {
793 if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl,
794 ISD, DstTy.getSimpleVT(),
795 SrcTy.getSimpleVT()))
796 return AdjustCost(Entry->Cost);
797 }
798
799 // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one
800 // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext
801 // are linearised so take more.
802 static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = {
803 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
804 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
805 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
806 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
807 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 },
808 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 },
809 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
810 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
811 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 },
812 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
813 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 },
814 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 },
815 };
816
817 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
818 if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl,
819 ISD, DstTy.getSimpleVT(),
820 SrcTy.getSimpleVT()))
821 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
822 }
823
824 if (ISD == ISD::FP_ROUND || ISD == ISD::FP_EXTEND) {
825 // As general rule, fp converts that were not matched above are scalarized
826 // and cost 1 vcvt for each lane, so long as the instruction is available.
827 // If not it will become a series of function calls.
828 const InstructionCost CallCost =
829 getCallInstrCost(nullptr, Dst, {Src}, CostKind);
830 int Lanes = 1;
831 if (SrcTy.isFixedLengthVector())
832 Lanes = SrcTy.getVectorNumElements();
833
834 if (IsLegalFPType(SrcTy) && IsLegalFPType(DstTy))
835 return Lanes;
836 else
837 return Lanes * CallCost;
838 }
839
840 if (ISD == ISD::TRUNCATE && ST->hasMVEIntegerOps() &&
841 SrcTy.isFixedLengthVector()) {
842 // Treat a truncate with larger than legal source (128bits for MVE) as
843 // expensive, 2 instructions per lane.
844 if ((SrcTy.getScalarType() == MVT::i8 ||
845 SrcTy.getScalarType() == MVT::i16 ||
846 SrcTy.getScalarType() == MVT::i32) &&
847 SrcTy.getSizeInBits() > 128 &&
848 SrcTy.getSizeInBits() > DstTy.getSizeInBits())
849 return SrcTy.getVectorNumElements() * 2;
850 }
851
852 // Scalar integer conversion costs.
853 static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = {
854 // i16 -> i64 requires two dependent operations.
855 { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
856
857 // Truncates on i64 are assumed to be free.
858 { ISD::TRUNCATE, MVT::i32, MVT::i64, 0 },
859 { ISD::TRUNCATE, MVT::i16, MVT::i64, 0 },
860 { ISD::TRUNCATE, MVT::i8, MVT::i64, 0 },
861 { ISD::TRUNCATE, MVT::i1, MVT::i64, 0 }
862 };
863
864 if (SrcTy.isInteger()) {
865 if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
866 DstTy.getSimpleVT(),
867 SrcTy.getSimpleVT()))
868 return AdjustCost(Entry->Cost);
869 }
870
871 int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
872 ? ST->getMVEVectorCostFactor(CostKind)
873 : 1;
874 return AdjustCost(
875 BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
876 }
877
getVectorInstrCost(unsigned Opcode,Type * ValTy,TTI::TargetCostKind CostKind,unsigned Index,Value * Op0,Value * Op1)878 InstructionCost ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
879 TTI::TargetCostKind CostKind,
880 unsigned Index, Value *Op0,
881 Value *Op1) {
882 // Penalize inserting into an D-subregister. We end up with a three times
883 // lower estimated throughput on swift.
884 if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
885 ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
886 return 3;
887
888 if (ST->hasNEON() && (Opcode == Instruction::InsertElement ||
889 Opcode == Instruction::ExtractElement)) {
890 // Cross-class copies are expensive on many microarchitectures,
891 // so assume they are expensive by default.
892 if (cast<VectorType>(ValTy)->getElementType()->isIntegerTy())
893 return 3;
894
895 // Even if it's not a cross class copy, this likely leads to mixing
896 // of NEON and VFP code and should be therefore penalized.
897 if (ValTy->isVectorTy() &&
898 ValTy->getScalarSizeInBits() <= 32)
899 return std::max<InstructionCost>(
900 BaseT::getVectorInstrCost(Opcode, ValTy, CostKind, Index, Op0, Op1),
901 2U);
902 }
903
904 if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement ||
905 Opcode == Instruction::ExtractElement)) {
906 // Integer cross-lane moves are more expensive than float, which can
907 // sometimes just be vmovs. Integer involve being passes to GPR registers,
908 // causing more of a delay.
909 std::pair<InstructionCost, MVT> LT =
910 getTypeLegalizationCost(ValTy->getScalarType());
911 return LT.first * (ValTy->getScalarType()->isIntegerTy() ? 4 : 1);
912 }
913
914 return BaseT::getVectorInstrCost(Opcode, ValTy, CostKind, Index, Op0, Op1);
915 }
916
getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy,CmpInst::Predicate VecPred,TTI::TargetCostKind CostKind,const Instruction * I)917 InstructionCost ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
918 Type *CondTy,
919 CmpInst::Predicate VecPred,
920 TTI::TargetCostKind CostKind,
921 const Instruction *I) {
922 int ISD = TLI->InstructionOpcodeToISD(Opcode);
923
924 // Thumb scalar code size cost for select.
925 if (CostKind == TTI::TCK_CodeSize && ISD == ISD::SELECT &&
926 ST->isThumb() && !ValTy->isVectorTy()) {
927 // Assume expensive structs.
928 if (TLI->getValueType(DL, ValTy, true) == MVT::Other)
929 return TTI::TCC_Expensive;
930
931 // Select costs can vary because they:
932 // - may require one or more conditional mov (including an IT),
933 // - can't operate directly on immediates,
934 // - require live flags, which we can't copy around easily.
935 InstructionCost Cost = getTypeLegalizationCost(ValTy).first;
936
937 // Possible IT instruction for Thumb2, or more for Thumb1.
938 ++Cost;
939
940 // i1 values may need rematerialising by using mov immediates and/or
941 // flag setting instructions.
942 if (ValTy->isIntegerTy(1))
943 ++Cost;
944
945 return Cost;
946 }
947
948 // If this is a vector min/max/abs, use the cost of that intrinsic directly
949 // instead. Hopefully when min/max intrinsics are more prevalent this code
950 // will not be needed.
951 const Instruction *Sel = I;
952 if ((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && Sel &&
953 Sel->hasOneUse())
954 Sel = cast<Instruction>(Sel->user_back());
955 if (Sel && ValTy->isVectorTy() &&
956 (ValTy->isIntOrIntVectorTy() || ValTy->isFPOrFPVectorTy())) {
957 const Value *LHS, *RHS;
958 SelectPatternFlavor SPF = matchSelectPattern(Sel, LHS, RHS).Flavor;
959 unsigned IID = 0;
960 switch (SPF) {
961 case SPF_ABS:
962 IID = Intrinsic::abs;
963 break;
964 case SPF_SMIN:
965 IID = Intrinsic::smin;
966 break;
967 case SPF_SMAX:
968 IID = Intrinsic::smax;
969 break;
970 case SPF_UMIN:
971 IID = Intrinsic::umin;
972 break;
973 case SPF_UMAX:
974 IID = Intrinsic::umax;
975 break;
976 case SPF_FMINNUM:
977 IID = Intrinsic::minnum;
978 break;
979 case SPF_FMAXNUM:
980 IID = Intrinsic::maxnum;
981 break;
982 default:
983 break;
984 }
985 if (IID) {
986 // The ICmp is free, the select gets the cost of the min/max/etc
987 if (Sel != I)
988 return 0;
989 IntrinsicCostAttributes CostAttrs(IID, ValTy, {ValTy, ValTy});
990 return getIntrinsicInstrCost(CostAttrs, CostKind);
991 }
992 }
993
994 // On NEON a vector select gets lowered to vbsl.
995 if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT && CondTy) {
996 // Lowering of some vector selects is currently far from perfect.
997 static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = {
998 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
999 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
1000 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
1001 };
1002
1003 EVT SelCondTy = TLI->getValueType(DL, CondTy);
1004 EVT SelValTy = TLI->getValueType(DL, ValTy);
1005 if (SelCondTy.isSimple() && SelValTy.isSimple()) {
1006 if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
1007 SelCondTy.getSimpleVT(),
1008 SelValTy.getSimpleVT()))
1009 return Entry->Cost;
1010 }
1011
1012 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1013 return LT.first;
1014 }
1015
1016 if (ST->hasMVEIntegerOps() && ValTy->isVectorTy() &&
1017 (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
1018 cast<FixedVectorType>(ValTy)->getNumElements() > 1) {
1019 FixedVectorType *VecValTy = cast<FixedVectorType>(ValTy);
1020 FixedVectorType *VecCondTy = dyn_cast_or_null<FixedVectorType>(CondTy);
1021 if (!VecCondTy)
1022 VecCondTy = cast<FixedVectorType>(CmpInst::makeCmpResultType(VecValTy));
1023
1024 // If we don't have mve.fp any fp operations will need to be scalarized.
1025 if (Opcode == Instruction::FCmp && !ST->hasMVEFloatOps()) {
1026 // One scalaization insert, one scalarization extract and the cost of the
1027 // fcmps.
1028 return BaseT::getScalarizationOverhead(VecValTy, /*Insert*/ false,
1029 /*Extract*/ true, CostKind) +
1030 BaseT::getScalarizationOverhead(VecCondTy, /*Insert*/ true,
1031 /*Extract*/ false, CostKind) +
1032 VecValTy->getNumElements() *
1033 getCmpSelInstrCost(Opcode, ValTy->getScalarType(),
1034 VecCondTy->getScalarType(), VecPred,
1035 CostKind, I);
1036 }
1037
1038 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1039 int BaseCost = ST->getMVEVectorCostFactor(CostKind);
1040 // There are two types - the input that specifies the type of the compare
1041 // and the output vXi1 type. Because we don't know how the output will be
1042 // split, we may need an expensive shuffle to get two in sync. This has the
1043 // effect of making larger than legal compares (v8i32 for example)
1044 // expensive.
1045 if (LT.second.isVector() && LT.second.getVectorNumElements() > 2) {
1046 if (LT.first > 1)
1047 return LT.first * BaseCost +
1048 BaseT::getScalarizationOverhead(VecCondTy, /*Insert*/ true,
1049 /*Extract*/ false, CostKind);
1050 return BaseCost;
1051 }
1052 }
1053
1054 // Default to cheap (throughput/size of 1 instruction) but adjust throughput
1055 // for "multiple beats" potentially needed by MVE instructions.
1056 int BaseCost = 1;
1057 if (ST->hasMVEIntegerOps() && ValTy->isVectorTy())
1058 BaseCost = ST->getMVEVectorCostFactor(CostKind);
1059
1060 return BaseCost *
1061 BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
1062 }
1063
getAddressComputationCost(Type * Ty,ScalarEvolution * SE,const SCEV * Ptr)1064 InstructionCost ARMTTIImpl::getAddressComputationCost(Type *Ty,
1065 ScalarEvolution *SE,
1066 const SCEV *Ptr) {
1067 // Address computations in vectorized code with non-consecutive addresses will
1068 // likely result in more instructions compared to scalar code where the
1069 // computation can more often be merged into the index mode. The resulting
1070 // extra micro-ops can significantly decrease throughput.
1071 unsigned NumVectorInstToHideOverhead = 10;
1072 int MaxMergeDistance = 64;
1073
1074 if (ST->hasNEON()) {
1075 if (Ty->isVectorTy() && SE &&
1076 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
1077 return NumVectorInstToHideOverhead;
1078
1079 // In many cases the address computation is not merged into the instruction
1080 // addressing mode.
1081 return 1;
1082 }
1083 return BaseT::getAddressComputationCost(Ty, SE, Ptr);
1084 }
1085
isProfitableLSRChainElement(Instruction * I)1086 bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) {
1087 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1088 // If a VCTP is part of a chain, it's already profitable and shouldn't be
1089 // optimized, else LSR may block tail-predication.
1090 switch (II->getIntrinsicID()) {
1091 case Intrinsic::arm_mve_vctp8:
1092 case Intrinsic::arm_mve_vctp16:
1093 case Intrinsic::arm_mve_vctp32:
1094 case Intrinsic::arm_mve_vctp64:
1095 return true;
1096 default:
1097 break;
1098 }
1099 }
1100 return false;
1101 }
1102
isLegalMaskedLoad(Type * DataTy,Align Alignment)1103 bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
1104 if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps())
1105 return false;
1106
1107 if (auto *VecTy = dyn_cast<FixedVectorType>(DataTy)) {
1108 // Don't support v2i1 yet.
1109 if (VecTy->getNumElements() == 2)
1110 return false;
1111
1112 // We don't support extending fp types.
1113 unsigned VecWidth = DataTy->getPrimitiveSizeInBits();
1114 if (VecWidth != 128 && VecTy->getElementType()->isFloatingPointTy())
1115 return false;
1116 }
1117
1118 unsigned EltWidth = DataTy->getScalarSizeInBits();
1119 return (EltWidth == 32 && Alignment >= 4) ||
1120 (EltWidth == 16 && Alignment >= 2) || (EltWidth == 8);
1121 }
1122
isLegalMaskedGather(Type * Ty,Align Alignment)1123 bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) {
1124 if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps())
1125 return false;
1126
1127 unsigned EltWidth = Ty->getScalarSizeInBits();
1128 return ((EltWidth == 32 && Alignment >= 4) ||
1129 (EltWidth == 16 && Alignment >= 2) || EltWidth == 8);
1130 }
1131
1132 /// Given a memcpy/memset/memmove instruction, return the number of memory
1133 /// operations performed, via querying findOptimalMemOpLowering. Returns -1 if a
1134 /// call is used.
getNumMemOps(const IntrinsicInst * I) const1135 int ARMTTIImpl::getNumMemOps(const IntrinsicInst *I) const {
1136 MemOp MOp;
1137 unsigned DstAddrSpace = ~0u;
1138 unsigned SrcAddrSpace = ~0u;
1139 const Function *F = I->getParent()->getParent();
1140
1141 if (const auto *MC = dyn_cast<MemTransferInst>(I)) {
1142 ConstantInt *C = dyn_cast<ConstantInt>(MC->getLength());
1143 // If 'size' is not a constant, a library call will be generated.
1144 if (!C)
1145 return -1;
1146
1147 const unsigned Size = C->getValue().getZExtValue();
1148 const Align DstAlign = *MC->getDestAlign();
1149 const Align SrcAlign = *MC->getSourceAlign();
1150
1151 MOp = MemOp::Copy(Size, /*DstAlignCanChange*/ false, DstAlign, SrcAlign,
1152 /*IsVolatile*/ false);
1153 DstAddrSpace = MC->getDestAddressSpace();
1154 SrcAddrSpace = MC->getSourceAddressSpace();
1155 }
1156 else if (const auto *MS = dyn_cast<MemSetInst>(I)) {
1157 ConstantInt *C = dyn_cast<ConstantInt>(MS->getLength());
1158 // If 'size' is not a constant, a library call will be generated.
1159 if (!C)
1160 return -1;
1161
1162 const unsigned Size = C->getValue().getZExtValue();
1163 const Align DstAlign = *MS->getDestAlign();
1164
1165 MOp = MemOp::Set(Size, /*DstAlignCanChange*/ false, DstAlign,
1166 /*IsZeroMemset*/ false, /*IsVolatile*/ false);
1167 DstAddrSpace = MS->getDestAddressSpace();
1168 }
1169 else
1170 llvm_unreachable("Expected a memcpy/move or memset!");
1171
1172 unsigned Limit, Factor = 2;
1173 switch(I->getIntrinsicID()) {
1174 case Intrinsic::memcpy:
1175 Limit = TLI->getMaxStoresPerMemcpy(F->hasMinSize());
1176 break;
1177 case Intrinsic::memmove:
1178 Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize());
1179 break;
1180 case Intrinsic::memset:
1181 Limit = TLI->getMaxStoresPerMemset(F->hasMinSize());
1182 Factor = 1;
1183 break;
1184 default:
1185 llvm_unreachable("Expected a memcpy/move or memset!");
1186 }
1187
1188 // MemOps will be poplulated with a list of data types that needs to be
1189 // loaded and stored. That's why we multiply the number of elements by 2 to
1190 // get the cost for this memcpy.
1191 std::vector<EVT> MemOps;
1192 if (getTLI()->findOptimalMemOpLowering(
1193 MemOps, Limit, MOp, DstAddrSpace,
1194 SrcAddrSpace, F->getAttributes()))
1195 return MemOps.size() * Factor;
1196
1197 // If we can't find an optimal memop lowering, return the default cost
1198 return -1;
1199 }
1200
getMemcpyCost(const Instruction * I)1201 InstructionCost ARMTTIImpl::getMemcpyCost(const Instruction *I) {
1202 int NumOps = getNumMemOps(cast<IntrinsicInst>(I));
1203
1204 // To model the cost of a library call, we assume 1 for the call, and
1205 // 3 for the argument setup.
1206 if (NumOps == -1)
1207 return 4;
1208 return NumOps;
1209 }
1210
getShuffleCost(TTI::ShuffleKind Kind,VectorType * Tp,ArrayRef<int> Mask,TTI::TargetCostKind CostKind,int Index,VectorType * SubTp,ArrayRef<const Value * > Args)1211 InstructionCost ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1212 VectorType *Tp, ArrayRef<int> Mask,
1213 TTI::TargetCostKind CostKind,
1214 int Index, VectorType *SubTp,
1215 ArrayRef<const Value *> Args) {
1216 Kind = improveShuffleKindFromMask(Kind, Mask, Tp, Index, SubTp);
1217 if (ST->hasNEON()) {
1218 if (Kind == TTI::SK_Broadcast) {
1219 static const CostTblEntry NEONDupTbl[] = {
1220 // VDUP handles these cases.
1221 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1222 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1223 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1224 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1225 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
1226 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
1227
1228 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1229 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1230 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1231 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}};
1232
1233 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
1234 if (const auto *Entry =
1235 CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second))
1236 return LT.first * Entry->Cost;
1237 }
1238 if (Kind == TTI::SK_Reverse) {
1239 static const CostTblEntry NEONShuffleTbl[] = {
1240 // Reverse shuffle cost one instruction if we are shuffling within a
1241 // double word (vrev) or two if we shuffle a quad word (vrev, vext).
1242 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1243 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1244 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1245 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1246 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
1247 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
1248
1249 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1250 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1251 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
1252 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
1253
1254 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
1255 if (const auto *Entry =
1256 CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
1257 return LT.first * Entry->Cost;
1258 }
1259 if (Kind == TTI::SK_Select) {
1260 static const CostTblEntry NEONSelShuffleTbl[] = {
1261 // Select shuffle cost table for ARM. Cost is the number of
1262 // instructions
1263 // required to create the shuffled vector.
1264
1265 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1266 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1267 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1268 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1269
1270 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1271 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1272 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
1273
1274 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
1275
1276 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
1277
1278 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
1279 if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl,
1280 ISD::VECTOR_SHUFFLE, LT.second))
1281 return LT.first * Entry->Cost;
1282 }
1283 }
1284 if (ST->hasMVEIntegerOps()) {
1285 if (Kind == TTI::SK_Broadcast) {
1286 static const CostTblEntry MVEDupTbl[] = {
1287 // VDUP handles these cases.
1288 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1289 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1290 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1},
1291 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1292 {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}};
1293
1294 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
1295 if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE,
1296 LT.second))
1297 return LT.first * Entry->Cost *
1298 ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput);
1299 }
1300
1301 if (!Mask.empty()) {
1302 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
1303 if (LT.second.isVector() &&
1304 Mask.size() <= LT.second.getVectorNumElements() &&
1305 (isVREVMask(Mask, LT.second, 16) || isVREVMask(Mask, LT.second, 32) ||
1306 isVREVMask(Mask, LT.second, 64)))
1307 return ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput) * LT.first;
1308 }
1309 }
1310
1311 int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy()
1312 ? ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput)
1313 : 1;
1314 return BaseCost *
1315 BaseT::getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp);
1316 }
1317
getArithmeticInstrCost(unsigned Opcode,Type * Ty,TTI::TargetCostKind CostKind,TTI::OperandValueInfo Op1Info,TTI::OperandValueInfo Op2Info,ArrayRef<const Value * > Args,const Instruction * CxtI)1318 InstructionCost ARMTTIImpl::getArithmeticInstrCost(
1319 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1320 TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info,
1321 ArrayRef<const Value *> Args,
1322 const Instruction *CxtI) {
1323 int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
1324 if (ST->isThumb() && CostKind == TTI::TCK_CodeSize && Ty->isIntegerTy(1)) {
1325 // Make operations on i1 relatively expensive as this often involves
1326 // combining predicates. AND and XOR should be easier to handle with IT
1327 // blocks.
1328 switch (ISDOpcode) {
1329 default:
1330 break;
1331 case ISD::AND:
1332 case ISD::XOR:
1333 return 2;
1334 case ISD::OR:
1335 return 3;
1336 }
1337 }
1338
1339 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
1340
1341 if (ST->hasNEON()) {
1342 const unsigned FunctionCallDivCost = 20;
1343 const unsigned ReciprocalDivCost = 10;
1344 static const CostTblEntry CostTbl[] = {
1345 // Division.
1346 // These costs are somewhat random. Choose a cost of 20 to indicate that
1347 // vectorizing devision (added function call) is going to be very expensive.
1348 // Double registers types.
1349 { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1350 { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1351 { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
1352 { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
1353 { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1354 { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1355 { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
1356 { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
1357 { ISD::SDIV, MVT::v4i16, ReciprocalDivCost},
1358 { ISD::UDIV, MVT::v4i16, ReciprocalDivCost},
1359 { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
1360 { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
1361 { ISD::SDIV, MVT::v8i8, ReciprocalDivCost},
1362 { ISD::UDIV, MVT::v8i8, ReciprocalDivCost},
1363 { ISD::SREM, MVT::v8i8, 8 * FunctionCallDivCost},
1364 { ISD::UREM, MVT::v8i8, 8 * FunctionCallDivCost},
1365 // Quad register types.
1366 { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1367 { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1368 { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
1369 { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
1370 { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1371 { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1372 { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
1373 { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
1374 { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1375 { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1376 { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
1377 { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
1378 { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1379 { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1380 { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
1381 { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
1382 // Multiplication.
1383 };
1384
1385 if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second))
1386 return LT.first * Entry->Cost;
1387
1388 InstructionCost Cost = BaseT::getArithmeticInstrCost(
1389 Opcode, Ty, CostKind, Op1Info, Op2Info);
1390
1391 // This is somewhat of a hack. The problem that we are facing is that SROA
1392 // creates a sequence of shift, and, or instructions to construct values.
1393 // These sequences are recognized by the ISel and have zero-cost. Not so for
1394 // the vectorized code. Because we have support for v2i64 but not i64 those
1395 // sequences look particularly beneficial to vectorize.
1396 // To work around this we increase the cost of v2i64 operations to make them
1397 // seem less beneficial.
1398 if (LT.second == MVT::v2i64 && Op2Info.isUniform() && Op2Info.isConstant())
1399 Cost += 4;
1400
1401 return Cost;
1402 }
1403
1404 // If this operation is a shift on arm/thumb2, it might well be folded into
1405 // the following instruction, hence having a cost of 0.
1406 auto LooksLikeAFreeShift = [&]() {
1407 if (ST->isThumb1Only() || Ty->isVectorTy())
1408 return false;
1409
1410 if (!CxtI || !CxtI->hasOneUse() || !CxtI->isShift())
1411 return false;
1412 if (!Op2Info.isUniform() || !Op2Info.isConstant())
1413 return false;
1414
1415 // Folded into a ADC/ADD/AND/BIC/CMP/EOR/MVN/ORR/ORN/RSB/SBC/SUB
1416 switch (cast<Instruction>(CxtI->user_back())->getOpcode()) {
1417 case Instruction::Add:
1418 case Instruction::Sub:
1419 case Instruction::And:
1420 case Instruction::Xor:
1421 case Instruction::Or:
1422 case Instruction::ICmp:
1423 return true;
1424 default:
1425 return false;
1426 }
1427 };
1428 if (LooksLikeAFreeShift())
1429 return 0;
1430
1431 // Default to cheap (throughput/size of 1 instruction) but adjust throughput
1432 // for "multiple beats" potentially needed by MVE instructions.
1433 int BaseCost = 1;
1434 if (ST->hasMVEIntegerOps() && Ty->isVectorTy())
1435 BaseCost = ST->getMVEVectorCostFactor(CostKind);
1436
1437 // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost,
1438 // without treating floats as more expensive that scalars or increasing the
1439 // costs for custom operations. The results is also multiplied by the
1440 // MVEVectorCostFactor where appropriate.
1441 if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second))
1442 return LT.first * BaseCost;
1443
1444 // Else this is expand, assume that we need to scalarize this op.
1445 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
1446 unsigned Num = VTy->getNumElements();
1447 InstructionCost Cost =
1448 getArithmeticInstrCost(Opcode, Ty->getScalarType(), CostKind);
1449 // Return the cost of multiple scalar invocation plus the cost of
1450 // inserting and extracting the values.
1451 SmallVector<Type *> Tys(Args.size(), Ty);
1452 return BaseT::getScalarizationOverhead(VTy, Args, Tys, CostKind) +
1453 Num * Cost;
1454 }
1455
1456 return BaseCost;
1457 }
1458
getMemoryOpCost(unsigned Opcode,Type * Src,MaybeAlign Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,TTI::OperandValueInfo OpInfo,const Instruction * I)1459 InstructionCost ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
1460 MaybeAlign Alignment,
1461 unsigned AddressSpace,
1462 TTI::TargetCostKind CostKind,
1463 TTI::OperandValueInfo OpInfo,
1464 const Instruction *I) {
1465 // TODO: Handle other cost kinds.
1466 if (CostKind != TTI::TCK_RecipThroughput)
1467 return 1;
1468
1469 // Type legalization can't handle structs
1470 if (TLI->getValueType(DL, Src, true) == MVT::Other)
1471 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1472 CostKind);
1473
1474 if (ST->hasNEON() && Src->isVectorTy() &&
1475 (Alignment && *Alignment != Align(16)) &&
1476 cast<VectorType>(Src)->getElementType()->isDoubleTy()) {
1477 // Unaligned loads/stores are extremely inefficient.
1478 // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
1479 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
1480 return LT.first * 4;
1481 }
1482
1483 // MVE can optimize a fpext(load(4xhalf)) using an extending integer load.
1484 // Same for stores.
1485 if (ST->hasMVEFloatOps() && isa<FixedVectorType>(Src) && I &&
1486 ((Opcode == Instruction::Load && I->hasOneUse() &&
1487 isa<FPExtInst>(*I->user_begin())) ||
1488 (Opcode == Instruction::Store && isa<FPTruncInst>(I->getOperand(0))))) {
1489 FixedVectorType *SrcVTy = cast<FixedVectorType>(Src);
1490 Type *DstTy =
1491 Opcode == Instruction::Load
1492 ? (*I->user_begin())->getType()
1493 : cast<Instruction>(I->getOperand(0))->getOperand(0)->getType();
1494 if (SrcVTy->getNumElements() == 4 && SrcVTy->getScalarType()->isHalfTy() &&
1495 DstTy->getScalarType()->isFloatTy())
1496 return ST->getMVEVectorCostFactor(CostKind);
1497 }
1498
1499 int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
1500 ? ST->getMVEVectorCostFactor(CostKind)
1501 : 1;
1502 return BaseCost * BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1503 CostKind, OpInfo, I);
1504 }
1505
1506 InstructionCost
getMaskedMemoryOpCost(unsigned Opcode,Type * Src,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind)1507 ARMTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
1508 unsigned AddressSpace,
1509 TTI::TargetCostKind CostKind) {
1510 if (ST->hasMVEIntegerOps()) {
1511 if (Opcode == Instruction::Load && isLegalMaskedLoad(Src, Alignment))
1512 return ST->getMVEVectorCostFactor(CostKind);
1513 if (Opcode == Instruction::Store && isLegalMaskedStore(Src, Alignment))
1514 return ST->getMVEVectorCostFactor(CostKind);
1515 }
1516 if (!isa<FixedVectorType>(Src))
1517 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1518 CostKind);
1519 // Scalar cost, which is currently very high due to the efficiency of the
1520 // generated code.
1521 return cast<FixedVectorType>(Src)->getNumElements() * 8;
1522 }
1523
getInterleavedMemoryOpCost(unsigned Opcode,Type * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,bool UseMaskForCond,bool UseMaskForGaps)1524 InstructionCost ARMTTIImpl::getInterleavedMemoryOpCost(
1525 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1526 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1527 bool UseMaskForCond, bool UseMaskForGaps) {
1528 assert(Factor >= 2 && "Invalid interleave factor");
1529 assert(isa<VectorType>(VecTy) && "Expect a vector type");
1530
1531 // vldN/vstN doesn't support vector types of i64/f64 element.
1532 bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64;
1533
1534 if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits &&
1535 !UseMaskForCond && !UseMaskForGaps) {
1536 unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements();
1537 auto *SubVecTy =
1538 FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
1539
1540 // vldN/vstN only support legal vector types of size 64 or 128 in bits.
1541 // Accesses having vector types that are a multiple of 128 bits can be
1542 // matched to more than one vldN/vstN instruction.
1543 int BaseCost =
1544 ST->hasMVEIntegerOps() ? ST->getMVEVectorCostFactor(CostKind) : 1;
1545 if (NumElts % Factor == 0 &&
1546 TLI->isLegalInterleavedAccessType(Factor, SubVecTy, Alignment, DL))
1547 return Factor * BaseCost * TLI->getNumInterleavedAccesses(SubVecTy, DL);
1548
1549 // Some smaller than legal interleaved patterns are cheap as we can make
1550 // use of the vmovn or vrev patterns to interleave a standard load. This is
1551 // true for v4i8, v8i8 and v4i16 at least (but not for v4f16 as it is
1552 // promoted differently). The cost of 2 here is then a load and vrev or
1553 // vmovn.
1554 if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 &&
1555 VecTy->isIntOrIntVectorTy() &&
1556 DL.getTypeSizeInBits(SubVecTy).getFixedValue() <= 64)
1557 return 2 * BaseCost;
1558 }
1559
1560 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1561 Alignment, AddressSpace, CostKind,
1562 UseMaskForCond, UseMaskForGaps);
1563 }
1564
getGatherScatterOpCost(unsigned Opcode,Type * DataTy,const Value * Ptr,bool VariableMask,Align Alignment,TTI::TargetCostKind CostKind,const Instruction * I)1565 InstructionCost ARMTTIImpl::getGatherScatterOpCost(
1566 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1567 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
1568 using namespace PatternMatch;
1569 if (!ST->hasMVEIntegerOps() || !EnableMaskedGatherScatters)
1570 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1571 Alignment, CostKind, I);
1572
1573 assert(DataTy->isVectorTy() && "Can't do gather/scatters on scalar!");
1574 auto *VTy = cast<FixedVectorType>(DataTy);
1575
1576 // TODO: Splitting, once we do that.
1577
1578 unsigned NumElems = VTy->getNumElements();
1579 unsigned EltSize = VTy->getScalarSizeInBits();
1580 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(DataTy);
1581
1582 // For now, it is assumed that for the MVE gather instructions the loads are
1583 // all effectively serialised. This means the cost is the scalar cost
1584 // multiplied by the number of elements being loaded. This is possibly very
1585 // conservative, but even so we still end up vectorising loops because the
1586 // cost per iteration for many loops is lower than for scalar loops.
1587 InstructionCost VectorCost =
1588 NumElems * LT.first * ST->getMVEVectorCostFactor(CostKind);
1589 // The scalarization cost should be a lot higher. We use the number of vector
1590 // elements plus the scalarization overhead. If masking is required then a lot
1591 // of little blocks will be needed and potentially a scalarized p0 mask,
1592 // greatly increasing the cost.
1593 InstructionCost ScalarCost =
1594 NumElems * LT.first + (VariableMask ? NumElems * 5 : 0) +
1595 BaseT::getScalarizationOverhead(VTy, /*Insert*/ true, /*Extract*/ false,
1596 CostKind) +
1597 BaseT::getScalarizationOverhead(VTy, /*Insert*/ false, /*Extract*/ true,
1598 CostKind);
1599
1600 if (EltSize < 8 || Alignment < EltSize / 8)
1601 return ScalarCost;
1602
1603 unsigned ExtSize = EltSize;
1604 // Check whether there's a single user that asks for an extended type
1605 if (I != nullptr) {
1606 // Dependent of the caller of this function, a gather instruction will
1607 // either have opcode Instruction::Load or be a call to the masked_gather
1608 // intrinsic
1609 if ((I->getOpcode() == Instruction::Load ||
1610 match(I, m_Intrinsic<Intrinsic::masked_gather>())) &&
1611 I->hasOneUse()) {
1612 const User *Us = *I->users().begin();
1613 if (isa<ZExtInst>(Us) || isa<SExtInst>(Us)) {
1614 // only allow valid type combinations
1615 unsigned TypeSize =
1616 cast<Instruction>(Us)->getType()->getScalarSizeInBits();
1617 if (((TypeSize == 32 && (EltSize == 8 || EltSize == 16)) ||
1618 (TypeSize == 16 && EltSize == 8)) &&
1619 TypeSize * NumElems == 128) {
1620 ExtSize = TypeSize;
1621 }
1622 }
1623 }
1624 // Check whether the input data needs to be truncated
1625 TruncInst *T;
1626 if ((I->getOpcode() == Instruction::Store ||
1627 match(I, m_Intrinsic<Intrinsic::masked_scatter>())) &&
1628 (T = dyn_cast<TruncInst>(I->getOperand(0)))) {
1629 // Only allow valid type combinations
1630 unsigned TypeSize = T->getOperand(0)->getType()->getScalarSizeInBits();
1631 if (((EltSize == 16 && TypeSize == 32) ||
1632 (EltSize == 8 && (TypeSize == 32 || TypeSize == 16))) &&
1633 TypeSize * NumElems == 128)
1634 ExtSize = TypeSize;
1635 }
1636 }
1637
1638 if (ExtSize * NumElems != 128 || NumElems < 4)
1639 return ScalarCost;
1640
1641 // Any (aligned) i32 gather will not need to be scalarised.
1642 if (ExtSize == 32)
1643 return VectorCost;
1644 // For smaller types, we need to ensure that the gep's inputs are correctly
1645 // extended from a small enough value. Other sizes (including i64) are
1646 // scalarized for now.
1647 if (ExtSize != 8 && ExtSize != 16)
1648 return ScalarCost;
1649
1650 if (const auto *BC = dyn_cast<BitCastInst>(Ptr))
1651 Ptr = BC->getOperand(0);
1652 if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1653 if (GEP->getNumOperands() != 2)
1654 return ScalarCost;
1655 unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType());
1656 // Scale needs to be correct (which is only relevant for i16s).
1657 if (Scale != 1 && Scale * 8 != ExtSize)
1658 return ScalarCost;
1659 // And we need to zext (not sext) the indexes from a small enough type.
1660 if (const auto *ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) {
1661 if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize)
1662 return VectorCost;
1663 }
1664 return ScalarCost;
1665 }
1666 return ScalarCost;
1667 }
1668
1669 InstructionCost
getArithmeticReductionCost(unsigned Opcode,VectorType * ValTy,std::optional<FastMathFlags> FMF,TTI::TargetCostKind CostKind)1670 ARMTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
1671 std::optional<FastMathFlags> FMF,
1672 TTI::TargetCostKind CostKind) {
1673
1674 EVT ValVT = TLI->getValueType(DL, ValTy);
1675 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1676 unsigned EltSize = ValVT.getScalarSizeInBits();
1677
1678 // In general floating point reductions are a series of elementwise
1679 // operations, with free extracts on each step. These are either in-order or
1680 // treewise depending on whether that is allowed by the fast math flags.
1681 if ((ISD == ISD::FADD || ISD == ISD::FMUL) &&
1682 ((EltSize == 32 && ST->hasVFP2Base()) ||
1683 (EltSize == 64 && ST->hasFP64()) ||
1684 (EltSize == 16 && ST->hasFullFP16()))) {
1685 unsigned NumElts = cast<FixedVectorType>(ValTy)->getNumElements();
1686 unsigned VecLimit = ST->hasMVEFloatOps() ? 128 : (ST->hasNEON() ? 64 : -1);
1687 InstructionCost VecCost = 0;
1688 while (!TTI::requiresOrderedReduction(FMF) && isPowerOf2_32(NumElts) &&
1689 NumElts * EltSize > VecLimit) {
1690 Type *VecTy = FixedVectorType::get(ValTy->getElementType(), NumElts / 2);
1691 VecCost += getArithmeticInstrCost(Opcode, VecTy, CostKind);
1692 NumElts /= 2;
1693 }
1694
1695 // For fp16 we need to extract the upper lane elements. MVE can add a
1696 // VREV+FMIN/MAX to perform another vector step instead.
1697 InstructionCost ExtractCost = 0;
1698 if (!TTI::requiresOrderedReduction(FMF) && ST->hasMVEFloatOps() &&
1699 ValVT.getVectorElementType() == MVT::f16 && NumElts == 8) {
1700 VecCost += ST->getMVEVectorCostFactor(CostKind) * 2;
1701 NumElts /= 2;
1702 } else if (ValVT.getVectorElementType() == MVT::f16)
1703 ExtractCost = NumElts / 2;
1704
1705 return VecCost + ExtractCost +
1706 NumElts *
1707 getArithmeticInstrCost(Opcode, ValTy->getElementType(), CostKind);
1708 }
1709
1710 if ((ISD == ISD::AND || ISD == ISD::OR || ISD == ISD::XOR) &&
1711 (EltSize == 64 || EltSize == 32 || EltSize == 16 || EltSize == 8)) {
1712 unsigned NumElts = cast<FixedVectorType>(ValTy)->getNumElements();
1713 unsigned VecLimit =
1714 ST->hasMVEIntegerOps() ? 128 : (ST->hasNEON() ? 64 : -1);
1715 InstructionCost VecCost = 0;
1716 while (isPowerOf2_32(NumElts) && NumElts * EltSize > VecLimit) {
1717 Type *VecTy = FixedVectorType::get(ValTy->getElementType(), NumElts / 2);
1718 VecCost += getArithmeticInstrCost(Opcode, VecTy, CostKind);
1719 NumElts /= 2;
1720 }
1721 // For i16/i8, MVE will perform a VREV + VORR/VAND/VEOR for the 64bit vector
1722 // step.
1723 if (ST->hasMVEIntegerOps() && ValVT.getScalarSizeInBits() <= 16 &&
1724 NumElts * EltSize == 64) {
1725 Type *VecTy = FixedVectorType::get(ValTy->getElementType(), NumElts);
1726 VecCost += ST->getMVEVectorCostFactor(CostKind) +
1727 getArithmeticInstrCost(Opcode, VecTy, CostKind);
1728 NumElts /= 2;
1729 }
1730
1731 // From here we extract the elements and perform the and/or/xor.
1732 InstructionCost ExtractCost = NumElts;
1733 return VecCost + ExtractCost +
1734 (NumElts - 1) * getArithmeticInstrCost(
1735 Opcode, ValTy->getElementType(), CostKind);
1736 }
1737
1738 if (!ST->hasMVEIntegerOps() || !ValVT.isSimple() || ISD != ISD::ADD ||
1739 TTI::requiresOrderedReduction(FMF))
1740 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
1741
1742 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1743
1744 static const CostTblEntry CostTblAdd[]{
1745 {ISD::ADD, MVT::v16i8, 1},
1746 {ISD::ADD, MVT::v8i16, 1},
1747 {ISD::ADD, MVT::v4i32, 1},
1748 };
1749 if (const auto *Entry = CostTableLookup(CostTblAdd, ISD, LT.second))
1750 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind) * LT.first;
1751
1752 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
1753 }
1754
getExtendedReductionCost(unsigned Opcode,bool IsUnsigned,Type * ResTy,VectorType * ValTy,FastMathFlags FMF,TTI::TargetCostKind CostKind)1755 InstructionCost ARMTTIImpl::getExtendedReductionCost(
1756 unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy,
1757 FastMathFlags FMF, TTI::TargetCostKind CostKind) {
1758 EVT ValVT = TLI->getValueType(DL, ValTy);
1759 EVT ResVT = TLI->getValueType(DL, ResTy);
1760
1761 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1762
1763 switch (ISD) {
1764 case ISD::ADD:
1765 if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) {
1766 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1767
1768 // The legal cases are:
1769 // VADDV u/s 8/16/32
1770 // VADDLV u/s 32
1771 // Codegen currently cannot always handle larger than legal vectors very
1772 // well, especially for predicated reductions where the mask needs to be
1773 // split, so restrict to 128bit or smaller input types.
1774 unsigned RevVTSize = ResVT.getSizeInBits();
1775 if (ValVT.getSizeInBits() <= 128 &&
1776 ((LT.second == MVT::v16i8 && RevVTSize <= 32) ||
1777 (LT.second == MVT::v8i16 && RevVTSize <= 32) ||
1778 (LT.second == MVT::v4i32 && RevVTSize <= 64)))
1779 return ST->getMVEVectorCostFactor(CostKind) * LT.first;
1780 }
1781 break;
1782 default:
1783 break;
1784 }
1785 return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, ValTy, FMF,
1786 CostKind);
1787 }
1788
1789 InstructionCost
getMulAccReductionCost(bool IsUnsigned,Type * ResTy,VectorType * ValTy,TTI::TargetCostKind CostKind)1790 ARMTTIImpl::getMulAccReductionCost(bool IsUnsigned, Type *ResTy,
1791 VectorType *ValTy,
1792 TTI::TargetCostKind CostKind) {
1793 EVT ValVT = TLI->getValueType(DL, ValTy);
1794 EVT ResVT = TLI->getValueType(DL, ResTy);
1795
1796 if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) {
1797 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1798
1799 // The legal cases are:
1800 // VMLAV u/s 8/16/32
1801 // VMLALV u/s 16/32
1802 // Codegen currently cannot always handle larger than legal vectors very
1803 // well, especially for predicated reductions where the mask needs to be
1804 // split, so restrict to 128bit or smaller input types.
1805 unsigned RevVTSize = ResVT.getSizeInBits();
1806 if (ValVT.getSizeInBits() <= 128 &&
1807 ((LT.second == MVT::v16i8 && RevVTSize <= 32) ||
1808 (LT.second == MVT::v8i16 && RevVTSize <= 64) ||
1809 (LT.second == MVT::v4i32 && RevVTSize <= 64)))
1810 return ST->getMVEVectorCostFactor(CostKind) * LT.first;
1811 }
1812
1813 return BaseT::getMulAccReductionCost(IsUnsigned, ResTy, ValTy, CostKind);
1814 }
1815
1816 InstructionCost
getMinMaxReductionCost(Intrinsic::ID IID,VectorType * Ty,FastMathFlags FMF,TTI::TargetCostKind CostKind)1817 ARMTTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty,
1818 FastMathFlags FMF,
1819 TTI::TargetCostKind CostKind) {
1820 EVT ValVT = TLI->getValueType(DL, Ty);
1821
1822 // In general floating point reductions are a series of elementwise
1823 // operations, with free extracts on each step. These are either in-order or
1824 // treewise depending on whether that is allowed by the fast math flags.
1825 if ((IID == Intrinsic::minnum || IID == Intrinsic::maxnum) &&
1826 ((ValVT.getVectorElementType() == MVT::f32 && ST->hasVFP2Base()) ||
1827 (ValVT.getVectorElementType() == MVT::f64 && ST->hasFP64()) ||
1828 (ValVT.getVectorElementType() == MVT::f16 && ST->hasFullFP16()))) {
1829 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
1830 unsigned EltSize = ValVT.getScalarSizeInBits();
1831 unsigned VecLimit = ST->hasMVEFloatOps() ? 128 : (ST->hasNEON() ? 64 : -1);
1832 InstructionCost VecCost;
1833 while (isPowerOf2_32(NumElts) && NumElts * EltSize > VecLimit) {
1834 Type *VecTy = FixedVectorType::get(Ty->getElementType(), NumElts/2);
1835 IntrinsicCostAttributes ICA(IID, VecTy, {VecTy, VecTy}, FMF);
1836 VecCost += getIntrinsicInstrCost(ICA, CostKind);
1837 NumElts /= 2;
1838 }
1839
1840 // For fp16 we need to extract the upper lane elements. MVE can add a
1841 // VREV+FMIN/MAX to perform another vector step instead.
1842 InstructionCost ExtractCost = 0;
1843 if (ST->hasMVEFloatOps() && ValVT.getVectorElementType() == MVT::f16 &&
1844 NumElts == 8) {
1845 VecCost += ST->getMVEVectorCostFactor(CostKind) * 2;
1846 NumElts /= 2;
1847 } else if (ValVT.getVectorElementType() == MVT::f16)
1848 ExtractCost = cast<FixedVectorType>(Ty)->getNumElements() / 2;
1849
1850 IntrinsicCostAttributes ICA(IID, Ty->getElementType(),
1851 {Ty->getElementType(), Ty->getElementType()},
1852 FMF);
1853 return VecCost + ExtractCost +
1854 (NumElts - 1) * getIntrinsicInstrCost(ICA, CostKind);
1855 }
1856
1857 if (IID == Intrinsic::smin || IID == Intrinsic::smax ||
1858 IID == Intrinsic::umin || IID == Intrinsic::umax) {
1859 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
1860
1861 // All costs are the same for u/s min/max. These lower to vminv, which are
1862 // given a slightly higher cost as they tend to take multiple cycles for
1863 // smaller type sizes.
1864 static const CostTblEntry CostTblAdd[]{
1865 {ISD::SMIN, MVT::v16i8, 4},
1866 {ISD::SMIN, MVT::v8i16, 3},
1867 {ISD::SMIN, MVT::v4i32, 2},
1868 };
1869 if (const auto *Entry = CostTableLookup(CostTblAdd, ISD::SMIN, LT.second))
1870 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind) * LT.first;
1871 }
1872
1873 return BaseT::getMinMaxReductionCost(IID, Ty, FMF, CostKind);
1874 }
1875
1876 InstructionCost
getIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind)1877 ARMTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1878 TTI::TargetCostKind CostKind) {
1879 switch (ICA.getID()) {
1880 case Intrinsic::get_active_lane_mask:
1881 // Currently we make a somewhat optimistic assumption that
1882 // active_lane_mask's are always free. In reality it may be freely folded
1883 // into a tail predicated loop, expanded into a VCPT or expanded into a lot
1884 // of add/icmp code. We may need to improve this in the future, but being
1885 // able to detect if it is free or not involves looking at a lot of other
1886 // code. We currently assume that the vectorizer inserted these, and knew
1887 // what it was doing in adding one.
1888 if (ST->hasMVEIntegerOps())
1889 return 0;
1890 break;
1891 case Intrinsic::sadd_sat:
1892 case Intrinsic::ssub_sat:
1893 case Intrinsic::uadd_sat:
1894 case Intrinsic::usub_sat: {
1895 if (!ST->hasMVEIntegerOps())
1896 break;
1897 Type *VT = ICA.getReturnType();
1898
1899 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(VT);
1900 if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 ||
1901 LT.second == MVT::v16i8) {
1902 // This is a base cost of 1 for the vqadd, plus 3 extract shifts if we
1903 // need to extend the type, as it uses shr(qadd(shl, shl)).
1904 unsigned Instrs =
1905 LT.second.getScalarSizeInBits() == VT->getScalarSizeInBits() ? 1 : 4;
1906 return LT.first * ST->getMVEVectorCostFactor(CostKind) * Instrs;
1907 }
1908 break;
1909 }
1910 case Intrinsic::abs:
1911 case Intrinsic::smin:
1912 case Intrinsic::smax:
1913 case Intrinsic::umin:
1914 case Intrinsic::umax: {
1915 if (!ST->hasMVEIntegerOps())
1916 break;
1917 Type *VT = ICA.getReturnType();
1918
1919 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(VT);
1920 if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 ||
1921 LT.second == MVT::v16i8)
1922 return LT.first * ST->getMVEVectorCostFactor(CostKind);
1923 break;
1924 }
1925 case Intrinsic::minnum:
1926 case Intrinsic::maxnum: {
1927 if (!ST->hasMVEFloatOps())
1928 break;
1929 Type *VT = ICA.getReturnType();
1930 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(VT);
1931 if (LT.second == MVT::v4f32 || LT.second == MVT::v8f16)
1932 return LT.first * ST->getMVEVectorCostFactor(CostKind);
1933 break;
1934 }
1935 case Intrinsic::fptosi_sat:
1936 case Intrinsic::fptoui_sat: {
1937 if (ICA.getArgTypes().empty())
1938 break;
1939 bool IsSigned = ICA.getID() == Intrinsic::fptosi_sat;
1940 auto LT = getTypeLegalizationCost(ICA.getArgTypes()[0]);
1941 EVT MTy = TLI->getValueType(DL, ICA.getReturnType());
1942 // Check for the legal types, with the corect subtarget features.
1943 if ((ST->hasVFP2Base() && LT.second == MVT::f32 && MTy == MVT::i32) ||
1944 (ST->hasFP64() && LT.second == MVT::f64 && MTy == MVT::i32) ||
1945 (ST->hasFullFP16() && LT.second == MVT::f16 && MTy == MVT::i32))
1946 return LT.first;
1947
1948 // Equally for MVE vector types
1949 if (ST->hasMVEFloatOps() &&
1950 (LT.second == MVT::v4f32 || LT.second == MVT::v8f16) &&
1951 LT.second.getScalarSizeInBits() == MTy.getScalarSizeInBits())
1952 return LT.first * ST->getMVEVectorCostFactor(CostKind);
1953
1954 // Otherwise we use a legal convert followed by a min+max
1955 if (((ST->hasVFP2Base() && LT.second == MVT::f32) ||
1956 (ST->hasFP64() && LT.second == MVT::f64) ||
1957 (ST->hasFullFP16() && LT.second == MVT::f16) ||
1958 (ST->hasMVEFloatOps() &&
1959 (LT.second == MVT::v4f32 || LT.second == MVT::v8f16))) &&
1960 LT.second.getScalarSizeInBits() >= MTy.getScalarSizeInBits()) {
1961 Type *LegalTy = Type::getIntNTy(ICA.getReturnType()->getContext(),
1962 LT.second.getScalarSizeInBits());
1963 InstructionCost Cost =
1964 LT.second.isVector() ? ST->getMVEVectorCostFactor(CostKind) : 1;
1965 IntrinsicCostAttributes Attrs1(IsSigned ? Intrinsic::smin
1966 : Intrinsic::umin,
1967 LegalTy, {LegalTy, LegalTy});
1968 Cost += getIntrinsicInstrCost(Attrs1, CostKind);
1969 IntrinsicCostAttributes Attrs2(IsSigned ? Intrinsic::smax
1970 : Intrinsic::umax,
1971 LegalTy, {LegalTy, LegalTy});
1972 Cost += getIntrinsicInstrCost(Attrs2, CostKind);
1973 return LT.first * Cost;
1974 }
1975 break;
1976 }
1977 }
1978
1979 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1980 }
1981
isLoweredToCall(const Function * F)1982 bool ARMTTIImpl::isLoweredToCall(const Function *F) {
1983 if (!F->isIntrinsic())
1984 return BaseT::isLoweredToCall(F);
1985
1986 // Assume all Arm-specific intrinsics map to an instruction.
1987 if (F->getName().starts_with("llvm.arm"))
1988 return false;
1989
1990 switch (F->getIntrinsicID()) {
1991 default: break;
1992 case Intrinsic::powi:
1993 case Intrinsic::sin:
1994 case Intrinsic::cos:
1995 case Intrinsic::pow:
1996 case Intrinsic::log:
1997 case Intrinsic::log10:
1998 case Intrinsic::log2:
1999 case Intrinsic::exp:
2000 case Intrinsic::exp2:
2001 return true;
2002 case Intrinsic::sqrt:
2003 case Intrinsic::fabs:
2004 case Intrinsic::copysign:
2005 case Intrinsic::floor:
2006 case Intrinsic::ceil:
2007 case Intrinsic::trunc:
2008 case Intrinsic::rint:
2009 case Intrinsic::nearbyint:
2010 case Intrinsic::round:
2011 case Intrinsic::canonicalize:
2012 case Intrinsic::lround:
2013 case Intrinsic::llround:
2014 case Intrinsic::lrint:
2015 case Intrinsic::llrint:
2016 if (F->getReturnType()->isDoubleTy() && !ST->hasFP64())
2017 return true;
2018 if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16())
2019 return true;
2020 // Some operations can be handled by vector instructions and assume
2021 // unsupported vectors will be expanded into supported scalar ones.
2022 // TODO Handle scalar operations properly.
2023 return !ST->hasFPARMv8Base() && !ST->hasVFP2Base();
2024 case Intrinsic::masked_store:
2025 case Intrinsic::masked_load:
2026 case Intrinsic::masked_gather:
2027 case Intrinsic::masked_scatter:
2028 return !ST->hasMVEIntegerOps();
2029 case Intrinsic::sadd_with_overflow:
2030 case Intrinsic::uadd_with_overflow:
2031 case Intrinsic::ssub_with_overflow:
2032 case Intrinsic::usub_with_overflow:
2033 case Intrinsic::sadd_sat:
2034 case Intrinsic::uadd_sat:
2035 case Intrinsic::ssub_sat:
2036 case Intrinsic::usub_sat:
2037 return false;
2038 }
2039
2040 return BaseT::isLoweredToCall(F);
2041 }
2042
maybeLoweredToCall(Instruction & I)2043 bool ARMTTIImpl::maybeLoweredToCall(Instruction &I) {
2044 unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode());
2045 EVT VT = TLI->getValueType(DL, I.getType(), true);
2046 if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall)
2047 return true;
2048
2049 // Check if an intrinsic will be lowered to a call and assume that any
2050 // other CallInst will generate a bl.
2051 if (auto *Call = dyn_cast<CallInst>(&I)) {
2052 if (auto *II = dyn_cast<IntrinsicInst>(Call)) {
2053 switch(II->getIntrinsicID()) {
2054 case Intrinsic::memcpy:
2055 case Intrinsic::memset:
2056 case Intrinsic::memmove:
2057 return getNumMemOps(II) == -1;
2058 default:
2059 if (const Function *F = Call->getCalledFunction())
2060 return isLoweredToCall(F);
2061 }
2062 }
2063 return true;
2064 }
2065
2066 // FPv5 provides conversions between integer, double-precision,
2067 // single-precision, and half-precision formats.
2068 switch (I.getOpcode()) {
2069 default:
2070 break;
2071 case Instruction::FPToSI:
2072 case Instruction::FPToUI:
2073 case Instruction::SIToFP:
2074 case Instruction::UIToFP:
2075 case Instruction::FPTrunc:
2076 case Instruction::FPExt:
2077 return !ST->hasFPARMv8Base();
2078 }
2079
2080 // FIXME: Unfortunately the approach of checking the Operation Action does
2081 // not catch all cases of Legalization that use library calls. Our
2082 // Legalization step categorizes some transformations into library calls as
2083 // Custom, Expand or even Legal when doing type legalization. So for now
2084 // we have to special case for instance the SDIV of 64bit integers and the
2085 // use of floating point emulation.
2086 if (VT.isInteger() && VT.getSizeInBits() >= 64) {
2087 switch (ISD) {
2088 default:
2089 break;
2090 case ISD::SDIV:
2091 case ISD::UDIV:
2092 case ISD::SREM:
2093 case ISD::UREM:
2094 case ISD::SDIVREM:
2095 case ISD::UDIVREM:
2096 return true;
2097 }
2098 }
2099
2100 // Assume all other non-float operations are supported.
2101 if (!VT.isFloatingPoint())
2102 return false;
2103
2104 // We'll need a library call to handle most floats when using soft.
2105 if (TLI->useSoftFloat()) {
2106 switch (I.getOpcode()) {
2107 default:
2108 return true;
2109 case Instruction::Alloca:
2110 case Instruction::Load:
2111 case Instruction::Store:
2112 case Instruction::Select:
2113 case Instruction::PHI:
2114 return false;
2115 }
2116 }
2117
2118 // We'll need a libcall to perform double precision operations on a single
2119 // precision only FPU.
2120 if (I.getType()->isDoubleTy() && !ST->hasFP64())
2121 return true;
2122
2123 // Likewise for half precision arithmetic.
2124 if (I.getType()->isHalfTy() && !ST->hasFullFP16())
2125 return true;
2126
2127 return false;
2128 }
2129
isHardwareLoopProfitable(Loop * L,ScalarEvolution & SE,AssumptionCache & AC,TargetLibraryInfo * LibInfo,HardwareLoopInfo & HWLoopInfo)2130 bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
2131 AssumptionCache &AC,
2132 TargetLibraryInfo *LibInfo,
2133 HardwareLoopInfo &HWLoopInfo) {
2134 // Low-overhead branches are only supported in the 'low-overhead branch'
2135 // extension of v8.1-m.
2136 if (!ST->hasLOB() || DisableLowOverheadLoops) {
2137 LLVM_DEBUG(dbgs() << "ARMHWLoops: Disabled\n");
2138 return false;
2139 }
2140
2141 if (!SE.hasLoopInvariantBackedgeTakenCount(L)) {
2142 LLVM_DEBUG(dbgs() << "ARMHWLoops: No BETC\n");
2143 return false;
2144 }
2145
2146 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
2147 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
2148 LLVM_DEBUG(dbgs() << "ARMHWLoops: Uncomputable BETC\n");
2149 return false;
2150 }
2151
2152 const SCEV *TripCountSCEV =
2153 SE.getAddExpr(BackedgeTakenCount,
2154 SE.getOne(BackedgeTakenCount->getType()));
2155
2156 // We need to store the trip count in LR, a 32-bit register.
2157 if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32) {
2158 LLVM_DEBUG(dbgs() << "ARMHWLoops: Trip count does not fit into 32bits\n");
2159 return false;
2160 }
2161
2162 // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little
2163 // point in generating a hardware loop if that's going to happen.
2164
2165 auto IsHardwareLoopIntrinsic = [](Instruction &I) {
2166 if (auto *Call = dyn_cast<IntrinsicInst>(&I)) {
2167 switch (Call->getIntrinsicID()) {
2168 default:
2169 break;
2170 case Intrinsic::start_loop_iterations:
2171 case Intrinsic::test_start_loop_iterations:
2172 case Intrinsic::loop_decrement:
2173 case Intrinsic::loop_decrement_reg:
2174 return true;
2175 }
2176 }
2177 return false;
2178 };
2179
2180 // Scan the instructions to see if there's any that we know will turn into a
2181 // call or if this loop is already a low-overhead loop or will become a tail
2182 // predicated loop.
2183 bool IsTailPredLoop = false;
2184 auto ScanLoop = [&](Loop *L) {
2185 for (auto *BB : L->getBlocks()) {
2186 for (auto &I : *BB) {
2187 if (maybeLoweredToCall(I) || IsHardwareLoopIntrinsic(I) ||
2188 isa<InlineAsm>(I)) {
2189 LLVM_DEBUG(dbgs() << "ARMHWLoops: Bad instruction: " << I << "\n");
2190 return false;
2191 }
2192 if (auto *II = dyn_cast<IntrinsicInst>(&I))
2193 IsTailPredLoop |=
2194 II->getIntrinsicID() == Intrinsic::get_active_lane_mask ||
2195 II->getIntrinsicID() == Intrinsic::arm_mve_vctp8 ||
2196 II->getIntrinsicID() == Intrinsic::arm_mve_vctp16 ||
2197 II->getIntrinsicID() == Intrinsic::arm_mve_vctp32 ||
2198 II->getIntrinsicID() == Intrinsic::arm_mve_vctp64;
2199 }
2200 }
2201 return true;
2202 };
2203
2204 // Visit inner loops.
2205 for (auto *Inner : *L)
2206 if (!ScanLoop(Inner))
2207 return false;
2208
2209 if (!ScanLoop(L))
2210 return false;
2211
2212 // TODO: Check whether the trip count calculation is expensive. If L is the
2213 // inner loop but we know it has a low trip count, calculating that trip
2214 // count (in the parent loop) may be detrimental.
2215
2216 LLVMContext &C = L->getHeader()->getContext();
2217 HWLoopInfo.CounterInReg = true;
2218 HWLoopInfo.IsNestingLegal = false;
2219 HWLoopInfo.PerformEntryTest = AllowWLSLoops && !IsTailPredLoop;
2220 HWLoopInfo.CountType = Type::getInt32Ty(C);
2221 HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
2222 return true;
2223 }
2224
canTailPredicateInstruction(Instruction & I,int & ICmpCount)2225 static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount) {
2226 // We don't allow icmp's, and because we only look at single block loops,
2227 // we simply count the icmps, i.e. there should only be 1 for the backedge.
2228 if (isa<ICmpInst>(&I) && ++ICmpCount > 1)
2229 return false;
2230 // FIXME: This is a workaround for poor cost modelling. Min/Max intrinsics are
2231 // not currently canonical, but soon will be. Code without them uses icmp, and
2232 // so is not tail predicated as per the condition above. In order to get the
2233 // same performance we treat min and max the same as an icmp for tailpred
2234 // purposes for the moment (we often rely on non-tailpred and higher VF's to
2235 // pick more optimial instructions like VQDMULH. They need to be recognized
2236 // directly by the vectorizer).
2237 if (auto *II = dyn_cast<IntrinsicInst>(&I))
2238 if ((II->getIntrinsicID() == Intrinsic::smin ||
2239 II->getIntrinsicID() == Intrinsic::smax ||
2240 II->getIntrinsicID() == Intrinsic::umin ||
2241 II->getIntrinsicID() == Intrinsic::umax) &&
2242 ++ICmpCount > 1)
2243 return false;
2244
2245 if (isa<FCmpInst>(&I))
2246 return false;
2247
2248 // We could allow extending/narrowing FP loads/stores, but codegen is
2249 // too inefficient so reject this for now.
2250 if (isa<FPExtInst>(&I) || isa<FPTruncInst>(&I))
2251 return false;
2252
2253 // Extends have to be extending-loads
2254 if (isa<SExtInst>(&I) || isa<ZExtInst>(&I) )
2255 if (!I.getOperand(0)->hasOneUse() || !isa<LoadInst>(I.getOperand(0)))
2256 return false;
2257
2258 // Truncs have to be narrowing-stores
2259 if (isa<TruncInst>(&I) )
2260 if (!I.hasOneUse() || !isa<StoreInst>(*I.user_begin()))
2261 return false;
2262
2263 return true;
2264 }
2265
2266 // To set up a tail-predicated loop, we need to know the total number of
2267 // elements processed by that loop. Thus, we need to determine the element
2268 // size and:
2269 // 1) it should be uniform for all operations in the vector loop, so we
2270 // e.g. don't want any widening/narrowing operations.
2271 // 2) it should be smaller than i64s because we don't have vector operations
2272 // that work on i64s.
2273 // 3) we don't want elements to be reversed or shuffled, to make sure the
2274 // tail-predication masks/predicates the right lanes.
2275 //
canTailPredicateLoop(Loop * L,LoopInfo * LI,ScalarEvolution & SE,const DataLayout & DL,const LoopAccessInfo * LAI)2276 static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
2277 const DataLayout &DL,
2278 const LoopAccessInfo *LAI) {
2279 LLVM_DEBUG(dbgs() << "Tail-predication: checking allowed instructions\n");
2280
2281 // If there are live-out values, it is probably a reduction. We can predicate
2282 // most reduction operations freely under MVE using a combination of
2283 // prefer-predicated-reduction-select and inloop reductions. We limit this to
2284 // floating point and integer reductions, but don't check for operators
2285 // specifically here. If the value ends up not being a reduction (and so the
2286 // vectorizer cannot tailfold the loop), we should fall back to standard
2287 // vectorization automatically.
2288 SmallVector< Instruction *, 8 > LiveOuts;
2289 LiveOuts = llvm::findDefsUsedOutsideOfLoop(L);
2290 bool ReductionsDisabled =
2291 EnableTailPredication == TailPredication::EnabledNoReductions ||
2292 EnableTailPredication == TailPredication::ForceEnabledNoReductions;
2293
2294 for (auto *I : LiveOuts) {
2295 if (!I->getType()->isIntegerTy() && !I->getType()->isFloatTy() &&
2296 !I->getType()->isHalfTy()) {
2297 LLVM_DEBUG(dbgs() << "Don't tail-predicate loop with non-integer/float "
2298 "live-out value\n");
2299 return false;
2300 }
2301 if (ReductionsDisabled) {
2302 LLVM_DEBUG(dbgs() << "Reductions not enabled\n");
2303 return false;
2304 }
2305 }
2306
2307 // Next, check that all instructions can be tail-predicated.
2308 PredicatedScalarEvolution PSE = LAI->getPSE();
2309 SmallVector<Instruction *, 16> LoadStores;
2310 int ICmpCount = 0;
2311
2312 for (BasicBlock *BB : L->blocks()) {
2313 for (Instruction &I : BB->instructionsWithoutDebug()) {
2314 if (isa<PHINode>(&I))
2315 continue;
2316 if (!canTailPredicateInstruction(I, ICmpCount)) {
2317 LLVM_DEBUG(dbgs() << "Instruction not allowed: "; I.dump());
2318 return false;
2319 }
2320
2321 Type *T = I.getType();
2322 if (T->getScalarSizeInBits() > 32) {
2323 LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump());
2324 return false;
2325 }
2326 if (isa<StoreInst>(I) || isa<LoadInst>(I)) {
2327 Value *Ptr = getLoadStorePointerOperand(&I);
2328 Type *AccessTy = getLoadStoreType(&I);
2329 int64_t NextStride = getPtrStride(PSE, AccessTy, Ptr, L).value_or(0);
2330 if (NextStride == 1) {
2331 // TODO: for now only allow consecutive strides of 1. We could support
2332 // other strides as long as it is uniform, but let's keep it simple
2333 // for now.
2334 continue;
2335 } else if (NextStride == -1 ||
2336 (NextStride == 2 && MVEMaxSupportedInterleaveFactor >= 2) ||
2337 (NextStride == 4 && MVEMaxSupportedInterleaveFactor >= 4)) {
2338 LLVM_DEBUG(dbgs()
2339 << "Consecutive strides of 2 found, vld2/vstr2 can't "
2340 "be tail-predicated\n.");
2341 return false;
2342 // TODO: don't tail predicate if there is a reversed load?
2343 } else if (EnableMaskedGatherScatters) {
2344 // Gather/scatters do allow loading from arbitrary strides, at
2345 // least if they are loop invariant.
2346 // TODO: Loop variant strides should in theory work, too, but
2347 // this requires further testing.
2348 const SCEV *PtrScev = PSE.getSE()->getSCEV(Ptr);
2349 if (auto AR = dyn_cast<SCEVAddRecExpr>(PtrScev)) {
2350 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
2351 if (PSE.getSE()->isLoopInvariant(Step, L))
2352 continue;
2353 }
2354 }
2355 LLVM_DEBUG(dbgs() << "Bad stride found, can't "
2356 "tail-predicate\n.");
2357 return false;
2358 }
2359 }
2360 }
2361
2362 LLVM_DEBUG(dbgs() << "tail-predication: all instructions allowed!\n");
2363 return true;
2364 }
2365
preferPredicateOverEpilogue(TailFoldingInfo * TFI)2366 bool ARMTTIImpl::preferPredicateOverEpilogue(TailFoldingInfo *TFI) {
2367 if (!EnableTailPredication) {
2368 LLVM_DEBUG(dbgs() << "Tail-predication not enabled.\n");
2369 return false;
2370 }
2371
2372 // Creating a predicated vector loop is the first step for generating a
2373 // tail-predicated hardware loop, for which we need the MVE masked
2374 // load/stores instructions:
2375 if (!ST->hasMVEIntegerOps())
2376 return false;
2377
2378 LoopVectorizationLegality *LVL = TFI->LVL;
2379 Loop *L = LVL->getLoop();
2380
2381 // For now, restrict this to single block loops.
2382 if (L->getNumBlocks() > 1) {
2383 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: not a single block "
2384 "loop.\n");
2385 return false;
2386 }
2387
2388 assert(L->isInnermost() && "preferPredicateOverEpilogue: inner-loop expected");
2389
2390 LoopInfo *LI = LVL->getLoopInfo();
2391 HardwareLoopInfo HWLoopInfo(L);
2392 if (!HWLoopInfo.canAnalyze(*LI)) {
2393 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
2394 "analyzable.\n");
2395 return false;
2396 }
2397
2398 AssumptionCache *AC = LVL->getAssumptionCache();
2399 ScalarEvolution *SE = LVL->getScalarEvolution();
2400
2401 // This checks if we have the low-overhead branch architecture
2402 // extension, and if we will create a hardware-loop:
2403 if (!isHardwareLoopProfitable(L, *SE, *AC, TFI->TLI, HWLoopInfo)) {
2404 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
2405 "profitable.\n");
2406 return false;
2407 }
2408
2409 DominatorTree *DT = LVL->getDominatorTree();
2410 if (!HWLoopInfo.isHardwareLoopCandidate(*SE, *LI, *DT)) {
2411 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
2412 "a candidate.\n");
2413 return false;
2414 }
2415
2416 return canTailPredicateLoop(L, LI, *SE, DL, LVL->getLAI());
2417 }
2418
2419 TailFoldingStyle
getPreferredTailFoldingStyle(bool IVUpdateMayOverflow) const2420 ARMTTIImpl::getPreferredTailFoldingStyle(bool IVUpdateMayOverflow) const {
2421 if (!ST->hasMVEIntegerOps() || !EnableTailPredication)
2422 return TailFoldingStyle::DataWithoutLaneMask;
2423
2424 // Intrinsic @llvm.get.active.lane.mask is supported.
2425 // It is used in the MVETailPredication pass, which requires the number of
2426 // elements processed by this vector loop to setup the tail-predicated
2427 // loop.
2428 return TailFoldingStyle::Data;
2429 }
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,TTI::UnrollingPreferences & UP,OptimizationRemarkEmitter * ORE)2430 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
2431 TTI::UnrollingPreferences &UP,
2432 OptimizationRemarkEmitter *ORE) {
2433 // Enable Upper bound unrolling universally, providing that we do not see an
2434 // active lane mask, which will be better kept as a loop to become tail
2435 // predicated than to be conditionally unrolled.
2436 UP.UpperBound =
2437 !ST->hasMVEIntegerOps() || !any_of(*L->getHeader(), [](Instruction &I) {
2438 return isa<IntrinsicInst>(I) &&
2439 cast<IntrinsicInst>(I).getIntrinsicID() ==
2440 Intrinsic::get_active_lane_mask;
2441 });
2442
2443 // Only currently enable these preferences for M-Class cores.
2444 if (!ST->isMClass())
2445 return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP, ORE);
2446
2447 // Disable loop unrolling for Oz and Os.
2448 UP.OptSizeThreshold = 0;
2449 UP.PartialOptSizeThreshold = 0;
2450 if (L->getHeader()->getParent()->hasOptSize())
2451 return;
2452
2453 SmallVector<BasicBlock*, 4> ExitingBlocks;
2454 L->getExitingBlocks(ExitingBlocks);
2455 LLVM_DEBUG(dbgs() << "Loop has:\n"
2456 << "Blocks: " << L->getNumBlocks() << "\n"
2457 << "Exit blocks: " << ExitingBlocks.size() << "\n");
2458
2459 // Only allow another exit other than the latch. This acts as an early exit
2460 // as it mirrors the profitability calculation of the runtime unroller.
2461 if (ExitingBlocks.size() > 2)
2462 return;
2463
2464 // Limit the CFG of the loop body for targets with a branch predictor.
2465 // Allowing 4 blocks permits if-then-else diamonds in the body.
2466 if (ST->hasBranchPredictor() && L->getNumBlocks() > 4)
2467 return;
2468
2469 // Don't unroll vectorized loops, including the remainder loop
2470 if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
2471 return;
2472
2473 // Scan the loop: don't unroll loops with calls as this could prevent
2474 // inlining.
2475 InstructionCost Cost = 0;
2476 for (auto *BB : L->getBlocks()) {
2477 for (auto &I : *BB) {
2478 // Don't unroll vectorised loop. MVE does not benefit from it as much as
2479 // scalar code.
2480 if (I.getType()->isVectorTy())
2481 return;
2482
2483 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
2484 if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
2485 if (!isLoweredToCall(F))
2486 continue;
2487 }
2488 return;
2489 }
2490
2491 SmallVector<const Value*, 4> Operands(I.operand_values());
2492 Cost += getInstructionCost(&I, Operands,
2493 TargetTransformInfo::TCK_SizeAndLatency);
2494 }
2495 }
2496
2497 // On v6m cores, there are very few registers available. We can easily end up
2498 // spilling and reloading more registers in an unrolled loop. Look at the
2499 // number of LCSSA phis as a rough measure of how many registers will need to
2500 // be live out of the loop, reducing the default unroll count if more than 1
2501 // value is needed. In the long run, all of this should be being learnt by a
2502 // machine.
2503 unsigned UnrollCount = 4;
2504 if (ST->isThumb1Only()) {
2505 unsigned ExitingValues = 0;
2506 SmallVector<BasicBlock *, 4> ExitBlocks;
2507 L->getExitBlocks(ExitBlocks);
2508 for (auto *Exit : ExitBlocks) {
2509 // Count the number of LCSSA phis. Exclude values coming from GEP's as
2510 // only the last is expected to be needed for address operands.
2511 unsigned LiveOuts = count_if(Exit->phis(), [](auto &PH) {
2512 return PH.getNumOperands() != 1 ||
2513 !isa<GetElementPtrInst>(PH.getOperand(0));
2514 });
2515 ExitingValues = ExitingValues < LiveOuts ? LiveOuts : ExitingValues;
2516 }
2517 if (ExitingValues)
2518 UnrollCount /= ExitingValues;
2519 if (UnrollCount <= 1)
2520 return;
2521 }
2522
2523 LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
2524 LLVM_DEBUG(dbgs() << "Default Runtime Unroll Count: " << UnrollCount << "\n");
2525
2526 UP.Partial = true;
2527 UP.Runtime = true;
2528 UP.UnrollRemainder = true;
2529 UP.DefaultUnrollRuntimeCount = UnrollCount;
2530 UP.UnrollAndJam = true;
2531 UP.UnrollAndJamInnerLoopThreshold = 60;
2532
2533 // Force unrolling small loops can be very useful because of the branch
2534 // taken cost of the backedge.
2535 if (Cost < 12)
2536 UP.Force = true;
2537 }
2538
getPeelingPreferences(Loop * L,ScalarEvolution & SE,TTI::PeelingPreferences & PP)2539 void ARMTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
2540 TTI::PeelingPreferences &PP) {
2541 BaseT::getPeelingPreferences(L, SE, PP);
2542 }
2543
preferInLoopReduction(unsigned Opcode,Type * Ty,TTI::ReductionFlags Flags) const2544 bool ARMTTIImpl::preferInLoopReduction(unsigned Opcode, Type *Ty,
2545 TTI::ReductionFlags Flags) const {
2546 if (!ST->hasMVEIntegerOps())
2547 return false;
2548
2549 unsigned ScalarBits = Ty->getScalarSizeInBits();
2550 switch (Opcode) {
2551 case Instruction::Add:
2552 return ScalarBits <= 64;
2553 default:
2554 return false;
2555 }
2556 }
2557
preferPredicatedReductionSelect(unsigned Opcode,Type * Ty,TTI::ReductionFlags Flags) const2558 bool ARMTTIImpl::preferPredicatedReductionSelect(
2559 unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const {
2560 if (!ST->hasMVEIntegerOps())
2561 return false;
2562 return true;
2563 }
2564
getScalingFactorCost(Type * Ty,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg,int64_t Scale,unsigned AddrSpace) const2565 InstructionCost ARMTTIImpl::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
2566 int64_t BaseOffset,
2567 bool HasBaseReg, int64_t Scale,
2568 unsigned AddrSpace) const {
2569 TargetLoweringBase::AddrMode AM;
2570 AM.BaseGV = BaseGV;
2571 AM.BaseOffs = BaseOffset;
2572 AM.HasBaseReg = HasBaseReg;
2573 AM.Scale = Scale;
2574 if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace)) {
2575 if (ST->hasFPAO())
2576 return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster
2577 return 0;
2578 }
2579 return -1;
2580 }
2581
hasArmWideBranch(bool Thumb) const2582 bool ARMTTIImpl::hasArmWideBranch(bool Thumb) const {
2583 if (Thumb) {
2584 // B.W is available in any Thumb2-supporting target, and also in every
2585 // version of Armv8-M, even Baseline which does not include the rest of
2586 // Thumb2.
2587 return ST->isThumb2() || ST->hasV8MBaselineOps();
2588 } else {
2589 // B is available in all versions of the Arm ISA, so the only question is
2590 // whether that ISA is available at all.
2591 return ST->hasARMOps();
2592 }
2593 }
2594