1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AArch64TargetTransformInfo.h"
10 #include "AArch64ExpandImm.h"
11 #include "MCTargetDesc/AArch64AddressingModes.h"
12 #include "llvm/Analysis/LoopInfo.h"
13 #include "llvm/Analysis/TargetTransformInfo.h"
14 #include "llvm/CodeGen/BasicTTIImpl.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/TargetLowering.h"
17 #include "llvm/IR/IntrinsicInst.h"
18 #include "llvm/IR/IntrinsicsAArch64.h"
19 #include "llvm/IR/PatternMatch.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Transforms/InstCombine/InstCombiner.h"
22 #include <algorithm>
23 using namespace llvm;
24 using namespace llvm::PatternMatch;
25 
26 #define DEBUG_TYPE "aarch64tti"
27 
28 static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix",
29                                                cl::init(true), cl::Hidden);
30 
areInlineCompatible(const Function * Caller,const Function * Callee) const31 bool AArch64TTIImpl::areInlineCompatible(const Function *Caller,
32                                          const Function *Callee) const {
33   const TargetMachine &TM = getTLI()->getTargetMachine();
34 
35   const FeatureBitset &CallerBits =
36       TM.getSubtargetImpl(*Caller)->getFeatureBits();
37   const FeatureBitset &CalleeBits =
38       TM.getSubtargetImpl(*Callee)->getFeatureBits();
39 
40   // Inline a callee if its target-features are a subset of the callers
41   // target-features.
42   return (CallerBits & CalleeBits) == CalleeBits;
43 }
44 
45 /// Calculate the cost of materializing a 64-bit value. This helper
46 /// method might only calculate a fraction of a larger immediate. Therefore it
47 /// is valid to return a cost of ZERO.
getIntImmCost(int64_t Val)48 InstructionCost AArch64TTIImpl::getIntImmCost(int64_t Val) {
49   // Check if the immediate can be encoded within an instruction.
50   if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
51     return 0;
52 
53   if (Val < 0)
54     Val = ~Val;
55 
56   // Calculate how many moves we will need to materialize this constant.
57   SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
58   AArch64_IMM::expandMOVImm(Val, 64, Insn);
59   return Insn.size();
60 }
61 
62 /// Calculate the cost of materializing the given constant.
getIntImmCost(const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind)63 InstructionCost AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
64                                               TTI::TargetCostKind CostKind) {
65   assert(Ty->isIntegerTy());
66 
67   unsigned BitSize = Ty->getPrimitiveSizeInBits();
68   if (BitSize == 0)
69     return ~0U;
70 
71   // Sign-extend all constants to a multiple of 64-bit.
72   APInt ImmVal = Imm;
73   if (BitSize & 0x3f)
74     ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
75 
76   // Split the constant into 64-bit chunks and calculate the cost for each
77   // chunk.
78   InstructionCost Cost = 0;
79   for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
80     APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
81     int64_t Val = Tmp.getSExtValue();
82     Cost += getIntImmCost(Val);
83   }
84   // We need at least one instruction to materialze the constant.
85   return std::max<InstructionCost>(1, Cost);
86 }
87 
getIntImmCostInst(unsigned Opcode,unsigned Idx,const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind,Instruction * Inst)88 InstructionCost AArch64TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
89                                                   const APInt &Imm, Type *Ty,
90                                                   TTI::TargetCostKind CostKind,
91                                                   Instruction *Inst) {
92   assert(Ty->isIntegerTy());
93 
94   unsigned BitSize = Ty->getPrimitiveSizeInBits();
95   // There is no cost model for constants with a bit size of 0. Return TCC_Free
96   // here, so that constant hoisting will ignore this constant.
97   if (BitSize == 0)
98     return TTI::TCC_Free;
99 
100   unsigned ImmIdx = ~0U;
101   switch (Opcode) {
102   default:
103     return TTI::TCC_Free;
104   case Instruction::GetElementPtr:
105     // Always hoist the base address of a GetElementPtr.
106     if (Idx == 0)
107       return 2 * TTI::TCC_Basic;
108     return TTI::TCC_Free;
109   case Instruction::Store:
110     ImmIdx = 0;
111     break;
112   case Instruction::Add:
113   case Instruction::Sub:
114   case Instruction::Mul:
115   case Instruction::UDiv:
116   case Instruction::SDiv:
117   case Instruction::URem:
118   case Instruction::SRem:
119   case Instruction::And:
120   case Instruction::Or:
121   case Instruction::Xor:
122   case Instruction::ICmp:
123     ImmIdx = 1;
124     break;
125   // Always return TCC_Free for the shift value of a shift instruction.
126   case Instruction::Shl:
127   case Instruction::LShr:
128   case Instruction::AShr:
129     if (Idx == 1)
130       return TTI::TCC_Free;
131     break;
132   case Instruction::Trunc:
133   case Instruction::ZExt:
134   case Instruction::SExt:
135   case Instruction::IntToPtr:
136   case Instruction::PtrToInt:
137   case Instruction::BitCast:
138   case Instruction::PHI:
139   case Instruction::Call:
140   case Instruction::Select:
141   case Instruction::Ret:
142   case Instruction::Load:
143     break;
144   }
145 
146   if (Idx == ImmIdx) {
147     int NumConstants = (BitSize + 63) / 64;
148     InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
149     return (Cost <= NumConstants * TTI::TCC_Basic)
150                ? static_cast<int>(TTI::TCC_Free)
151                : Cost;
152   }
153   return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
154 }
155 
156 InstructionCost
getIntImmCostIntrin(Intrinsic::ID IID,unsigned Idx,const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind)157 AArch64TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
158                                     const APInt &Imm, Type *Ty,
159                                     TTI::TargetCostKind CostKind) {
160   assert(Ty->isIntegerTy());
161 
162   unsigned BitSize = Ty->getPrimitiveSizeInBits();
163   // There is no cost model for constants with a bit size of 0. Return TCC_Free
164   // here, so that constant hoisting will ignore this constant.
165   if (BitSize == 0)
166     return TTI::TCC_Free;
167 
168   // Most (all?) AArch64 intrinsics do not support folding immediates into the
169   // selected instruction, so we compute the materialization cost for the
170   // immediate directly.
171   if (IID >= Intrinsic::aarch64_addg && IID <= Intrinsic::aarch64_udiv)
172     return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
173 
174   switch (IID) {
175   default:
176     return TTI::TCC_Free;
177   case Intrinsic::sadd_with_overflow:
178   case Intrinsic::uadd_with_overflow:
179   case Intrinsic::ssub_with_overflow:
180   case Intrinsic::usub_with_overflow:
181   case Intrinsic::smul_with_overflow:
182   case Intrinsic::umul_with_overflow:
183     if (Idx == 1) {
184       int NumConstants = (BitSize + 63) / 64;
185       InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
186       return (Cost <= NumConstants * TTI::TCC_Basic)
187                  ? static_cast<int>(TTI::TCC_Free)
188                  : Cost;
189     }
190     break;
191   case Intrinsic::experimental_stackmap:
192     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
193       return TTI::TCC_Free;
194     break;
195   case Intrinsic::experimental_patchpoint_void:
196   case Intrinsic::experimental_patchpoint_i64:
197     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
198       return TTI::TCC_Free;
199     break;
200   case Intrinsic::experimental_gc_statepoint:
201     if ((Idx < 5) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
202       return TTI::TCC_Free;
203     break;
204   }
205   return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
206 }
207 
208 TargetTransformInfo::PopcntSupportKind
getPopcntSupport(unsigned TyWidth)209 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
210   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
211   if (TyWidth == 32 || TyWidth == 64)
212     return TTI::PSK_FastHardware;
213   // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
214   return TTI::PSK_Software;
215 }
216 
217 InstructionCost
getIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind)218 AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
219                                       TTI::TargetCostKind CostKind) {
220   auto *RetTy = ICA.getReturnType();
221   switch (ICA.getID()) {
222   case Intrinsic::umin:
223   case Intrinsic::umax: {
224     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
225     // umin(x,y) -> sub(x,usubsat(x,y))
226     // umax(x,y) -> add(x,usubsat(y,x))
227     if (LT.second == MVT::v2i64)
228       return LT.first * 2;
229     LLVM_FALLTHROUGH;
230   }
231   case Intrinsic::smin:
232   case Intrinsic::smax: {
233     static const auto ValidMinMaxTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
234                                         MVT::v8i16, MVT::v2i32, MVT::v4i32};
235     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
236     if (any_of(ValidMinMaxTys, [&LT](MVT M) { return M == LT.second; }))
237       return LT.first;
238     break;
239   }
240   case Intrinsic::sadd_sat:
241   case Intrinsic::ssub_sat:
242   case Intrinsic::uadd_sat:
243   case Intrinsic::usub_sat: {
244     static const auto ValidSatTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
245                                      MVT::v8i16, MVT::v2i32, MVT::v4i32,
246                                      MVT::v2i64};
247     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
248     // This is a base cost of 1 for the vadd, plus 3 extract shifts if we
249     // need to extend the type, as it uses shr(qadd(shl, shl)).
250     unsigned Instrs =
251         LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits() ? 1 : 4;
252     if (any_of(ValidSatTys, [&LT](MVT M) { return M == LT.second; }))
253       return LT.first * Instrs;
254     break;
255   }
256   case Intrinsic::abs: {
257     static const auto ValidAbsTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
258                                      MVT::v8i16, MVT::v2i32, MVT::v4i32,
259                                      MVT::v2i64};
260     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
261     if (any_of(ValidAbsTys, [&LT](MVT M) { return M == LT.second; }))
262       return LT.first;
263     break;
264   }
265   case Intrinsic::experimental_stepvector: {
266     InstructionCost Cost = 1; // Cost of the `index' instruction
267     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
268     // Legalisation of illegal vectors involves an `index' instruction plus
269     // (LT.first - 1) vector adds.
270     if (LT.first > 1) {
271       Type *LegalVTy = EVT(LT.second).getTypeForEVT(RetTy->getContext());
272       InstructionCost AddCost =
273           getArithmeticInstrCost(Instruction::Add, LegalVTy, CostKind);
274       Cost += AddCost * (LT.first - 1);
275     }
276     return Cost;
277   }
278   case Intrinsic::bitreverse: {
279     static const CostTblEntry BitreverseTbl[] = {
280         {Intrinsic::bitreverse, MVT::i32, 1},
281         {Intrinsic::bitreverse, MVT::i64, 1},
282         {Intrinsic::bitreverse, MVT::v8i8, 1},
283         {Intrinsic::bitreverse, MVT::v16i8, 1},
284         {Intrinsic::bitreverse, MVT::v4i16, 2},
285         {Intrinsic::bitreverse, MVT::v8i16, 2},
286         {Intrinsic::bitreverse, MVT::v2i32, 2},
287         {Intrinsic::bitreverse, MVT::v4i32, 2},
288         {Intrinsic::bitreverse, MVT::v1i64, 2},
289         {Intrinsic::bitreverse, MVT::v2i64, 2},
290     };
291     const auto LegalisationCost = TLI->getTypeLegalizationCost(DL, RetTy);
292     const auto *Entry =
293         CostTableLookup(BitreverseTbl, ICA.getID(), LegalisationCost.second);
294     // Cost Model is using the legal type(i32) that i8 and i16 will be converted
295     // to +1 so that we match the actual lowering cost
296     if (TLI->getValueType(DL, RetTy, true) == MVT::i8 ||
297         TLI->getValueType(DL, RetTy, true) == MVT::i16)
298       return LegalisationCost.first * Entry->Cost + 1;
299     if (Entry)
300       return LegalisationCost.first * Entry->Cost;
301     break;
302   }
303   case Intrinsic::ctpop: {
304     static const CostTblEntry CtpopCostTbl[] = {
305         {ISD::CTPOP, MVT::v2i64, 4},
306         {ISD::CTPOP, MVT::v4i32, 3},
307         {ISD::CTPOP, MVT::v8i16, 2},
308         {ISD::CTPOP, MVT::v16i8, 1},
309         {ISD::CTPOP, MVT::i64,   4},
310         {ISD::CTPOP, MVT::v2i32, 3},
311         {ISD::CTPOP, MVT::v4i16, 2},
312         {ISD::CTPOP, MVT::v8i8,  1},
313         {ISD::CTPOP, MVT::i32,   5},
314     };
315     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
316     MVT MTy = LT.second;
317     if (const auto *Entry = CostTableLookup(CtpopCostTbl, ISD::CTPOP, MTy)) {
318       // Extra cost of +1 when illegal vector types are legalized by promoting
319       // the integer type.
320       int ExtraCost = MTy.isVector() && MTy.getScalarSizeInBits() !=
321                                             RetTy->getScalarSizeInBits()
322                           ? 1
323                           : 0;
324       return LT.first * Entry->Cost + ExtraCost;
325     }
326     break;
327   }
328   default:
329     break;
330   }
331   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
332 }
333 
334 /// The function will remove redundant reinterprets casting in the presence
335 /// of the control flow
processPhiNode(InstCombiner & IC,IntrinsicInst & II)336 static Optional<Instruction *> processPhiNode(InstCombiner &IC,
337                                               IntrinsicInst &II) {
338   SmallVector<Instruction *, 32> Worklist;
339   auto RequiredType = II.getType();
340 
341   auto *PN = dyn_cast<PHINode>(II.getArgOperand(0));
342   assert(PN && "Expected Phi Node!");
343 
344   // Don't create a new Phi unless we can remove the old one.
345   if (!PN->hasOneUse())
346     return None;
347 
348   for (Value *IncValPhi : PN->incoming_values()) {
349     auto *Reinterpret = dyn_cast<IntrinsicInst>(IncValPhi);
350     if (!Reinterpret ||
351         Reinterpret->getIntrinsicID() !=
352             Intrinsic::aarch64_sve_convert_to_svbool ||
353         RequiredType != Reinterpret->getArgOperand(0)->getType())
354       return None;
355   }
356 
357   // Create the new Phi
358   LLVMContext &Ctx = PN->getContext();
359   IRBuilder<> Builder(Ctx);
360   Builder.SetInsertPoint(PN);
361   PHINode *NPN = Builder.CreatePHI(RequiredType, PN->getNumIncomingValues());
362   Worklist.push_back(PN);
363 
364   for (unsigned I = 0; I < PN->getNumIncomingValues(); I++) {
365     auto *Reinterpret = cast<Instruction>(PN->getIncomingValue(I));
366     NPN->addIncoming(Reinterpret->getOperand(0), PN->getIncomingBlock(I));
367     Worklist.push_back(Reinterpret);
368   }
369 
370   // Cleanup Phi Node and reinterprets
371   return IC.replaceInstUsesWith(II, NPN);
372 }
373 
instCombineConvertFromSVBool(InstCombiner & IC,IntrinsicInst & II)374 static Optional<Instruction *> instCombineConvertFromSVBool(InstCombiner &IC,
375                                                             IntrinsicInst &II) {
376   // If the reinterpret instruction operand is a PHI Node
377   if (isa<PHINode>(II.getArgOperand(0)))
378     return processPhiNode(IC, II);
379 
380   SmallVector<Instruction *, 32> CandidatesForRemoval;
381   Value *Cursor = II.getOperand(0), *EarliestReplacement = nullptr;
382 
383   const auto *IVTy = cast<VectorType>(II.getType());
384 
385   // Walk the chain of conversions.
386   while (Cursor) {
387     // If the type of the cursor has fewer lanes than the final result, zeroing
388     // must take place, which breaks the equivalence chain.
389     const auto *CursorVTy = cast<VectorType>(Cursor->getType());
390     if (CursorVTy->getElementCount().getKnownMinValue() <
391         IVTy->getElementCount().getKnownMinValue())
392       break;
393 
394     // If the cursor has the same type as I, it is a viable replacement.
395     if (Cursor->getType() == IVTy)
396       EarliestReplacement = Cursor;
397 
398     auto *IntrinsicCursor = dyn_cast<IntrinsicInst>(Cursor);
399 
400     // If this is not an SVE conversion intrinsic, this is the end of the chain.
401     if (!IntrinsicCursor || !(IntrinsicCursor->getIntrinsicID() ==
402                                   Intrinsic::aarch64_sve_convert_to_svbool ||
403                               IntrinsicCursor->getIntrinsicID() ==
404                                   Intrinsic::aarch64_sve_convert_from_svbool))
405       break;
406 
407     CandidatesForRemoval.insert(CandidatesForRemoval.begin(), IntrinsicCursor);
408     Cursor = IntrinsicCursor->getOperand(0);
409   }
410 
411   // If no viable replacement in the conversion chain was found, there is
412   // nothing to do.
413   if (!EarliestReplacement)
414     return None;
415 
416   return IC.replaceInstUsesWith(II, EarliestReplacement);
417 }
418 
instCombineSVEDup(InstCombiner & IC,IntrinsicInst & II)419 static Optional<Instruction *> instCombineSVEDup(InstCombiner &IC,
420                                                  IntrinsicInst &II) {
421   IntrinsicInst *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(1));
422   if (!Pg)
423     return None;
424 
425   if (Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
426     return None;
427 
428   const auto PTruePattern =
429       cast<ConstantInt>(Pg->getOperand(0))->getZExtValue();
430   if (PTruePattern != AArch64SVEPredPattern::vl1)
431     return None;
432 
433   // The intrinsic is inserting into lane zero so use an insert instead.
434   auto *IdxTy = Type::getInt64Ty(II.getContext());
435   auto *Insert = InsertElementInst::Create(
436       II.getArgOperand(0), II.getArgOperand(2), ConstantInt::get(IdxTy, 0));
437   Insert->insertBefore(&II);
438   Insert->takeName(&II);
439 
440   return IC.replaceInstUsesWith(II, Insert);
441 }
442 
instCombineSVECmpNE(InstCombiner & IC,IntrinsicInst & II)443 static Optional<Instruction *> instCombineSVECmpNE(InstCombiner &IC,
444                                                    IntrinsicInst &II) {
445   LLVMContext &Ctx = II.getContext();
446   IRBuilder<> Builder(Ctx);
447   Builder.SetInsertPoint(&II);
448 
449   // Check that the predicate is all active
450   auto *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(0));
451   if (!Pg || Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
452     return None;
453 
454   const auto PTruePattern =
455       cast<ConstantInt>(Pg->getOperand(0))->getZExtValue();
456   if (PTruePattern != AArch64SVEPredPattern::all)
457     return None;
458 
459   // Check that we have a compare of zero..
460   auto *DupX = dyn_cast<IntrinsicInst>(II.getArgOperand(2));
461   if (!DupX || DupX->getIntrinsicID() != Intrinsic::aarch64_sve_dup_x)
462     return None;
463 
464   auto *DupXArg = dyn_cast<ConstantInt>(DupX->getArgOperand(0));
465   if (!DupXArg || !DupXArg->isZero())
466     return None;
467 
468   // ..against a dupq
469   auto *DupQLane = dyn_cast<IntrinsicInst>(II.getArgOperand(1));
470   if (!DupQLane ||
471       DupQLane->getIntrinsicID() != Intrinsic::aarch64_sve_dupq_lane)
472     return None;
473 
474   // Where the dupq is a lane 0 replicate of a vector insert
475   if (!cast<ConstantInt>(DupQLane->getArgOperand(1))->isZero())
476     return None;
477 
478   auto *VecIns = dyn_cast<IntrinsicInst>(DupQLane->getArgOperand(0));
479   if (!VecIns ||
480       VecIns->getIntrinsicID() != Intrinsic::experimental_vector_insert)
481     return None;
482 
483   // Where the vector insert is a fixed constant vector insert into undef at
484   // index zero
485   if (!isa<UndefValue>(VecIns->getArgOperand(0)))
486     return None;
487 
488   if (!cast<ConstantInt>(VecIns->getArgOperand(2))->isZero())
489     return None;
490 
491   auto *ConstVec = dyn_cast<Constant>(VecIns->getArgOperand(1));
492   if (!ConstVec)
493     return None;
494 
495   auto *VecTy = dyn_cast<FixedVectorType>(ConstVec->getType());
496   auto *OutTy = dyn_cast<ScalableVectorType>(II.getType());
497   if (!VecTy || !OutTy || VecTy->getNumElements() != OutTy->getMinNumElements())
498     return None;
499 
500   unsigned NumElts = VecTy->getNumElements();
501   unsigned PredicateBits = 0;
502 
503   // Expand intrinsic operands to a 16-bit byte level predicate
504   for (unsigned I = 0; I < NumElts; ++I) {
505     auto *Arg = dyn_cast<ConstantInt>(ConstVec->getAggregateElement(I));
506     if (!Arg)
507       return None;
508     if (!Arg->isZero())
509       PredicateBits |= 1 << (I * (16 / NumElts));
510   }
511 
512   // If all bits are zero bail early with an empty predicate
513   if (PredicateBits == 0) {
514     auto *PFalse = Constant::getNullValue(II.getType());
515     PFalse->takeName(&II);
516     return IC.replaceInstUsesWith(II, PFalse);
517   }
518 
519   // Calculate largest predicate type used (where byte predicate is largest)
520   unsigned Mask = 8;
521   for (unsigned I = 0; I < 16; ++I)
522     if ((PredicateBits & (1 << I)) != 0)
523       Mask |= (I % 8);
524 
525   unsigned PredSize = Mask & -Mask;
526   auto *PredType = ScalableVectorType::get(
527       Type::getInt1Ty(Ctx), AArch64::SVEBitsPerBlock / (PredSize * 8));
528 
529   // Ensure all relevant bits are set
530   for (unsigned I = 0; I < 16; I += PredSize)
531     if ((PredicateBits & (1 << I)) == 0)
532       return None;
533 
534   auto *PTruePat =
535       ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all);
536   auto *PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue,
537                                         {PredType}, {PTruePat});
538   auto *ConvertToSVBool = Builder.CreateIntrinsic(
539       Intrinsic::aarch64_sve_convert_to_svbool, {PredType}, {PTrue});
540   auto *ConvertFromSVBool =
541       Builder.CreateIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool,
542                               {II.getType()}, {ConvertToSVBool});
543 
544   ConvertFromSVBool->takeName(&II);
545   return IC.replaceInstUsesWith(II, ConvertFromSVBool);
546 }
547 
instCombineSVELast(InstCombiner & IC,IntrinsicInst & II)548 static Optional<Instruction *> instCombineSVELast(InstCombiner &IC,
549                                                   IntrinsicInst &II) {
550   Value *Pg = II.getArgOperand(0);
551   Value *Vec = II.getArgOperand(1);
552   bool IsAfter = II.getIntrinsicID() == Intrinsic::aarch64_sve_lasta;
553 
554   // lastX(splat(X)) --> X
555   if (auto *SplatVal = getSplatValue(Vec))
556     return IC.replaceInstUsesWith(II, SplatVal);
557 
558   auto *C = dyn_cast<Constant>(Pg);
559   if (IsAfter && C && C->isNullValue()) {
560     // The intrinsic is extracting lane 0 so use an extract instead.
561     auto *IdxTy = Type::getInt64Ty(II.getContext());
562     auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, 0));
563     Extract->insertBefore(&II);
564     Extract->takeName(&II);
565     return IC.replaceInstUsesWith(II, Extract);
566   }
567 
568   auto *IntrPG = dyn_cast<IntrinsicInst>(Pg);
569   if (!IntrPG)
570     return None;
571 
572   if (IntrPG->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
573     return None;
574 
575   const auto PTruePattern =
576       cast<ConstantInt>(IntrPG->getOperand(0))->getZExtValue();
577 
578   // Can the intrinsic's predicate be converted to a known constant index?
579   unsigned Idx;
580   switch (PTruePattern) {
581   default:
582     return None;
583   case AArch64SVEPredPattern::vl1:
584     Idx = 0;
585     break;
586   case AArch64SVEPredPattern::vl2:
587     Idx = 1;
588     break;
589   case AArch64SVEPredPattern::vl3:
590     Idx = 2;
591     break;
592   case AArch64SVEPredPattern::vl4:
593     Idx = 3;
594     break;
595   case AArch64SVEPredPattern::vl5:
596     Idx = 4;
597     break;
598   case AArch64SVEPredPattern::vl6:
599     Idx = 5;
600     break;
601   case AArch64SVEPredPattern::vl7:
602     Idx = 6;
603     break;
604   case AArch64SVEPredPattern::vl8:
605     Idx = 7;
606     break;
607   case AArch64SVEPredPattern::vl16:
608     Idx = 15;
609     break;
610   }
611 
612   // Increment the index if extracting the element after the last active
613   // predicate element.
614   if (IsAfter)
615     ++Idx;
616 
617   // Ignore extracts whose index is larger than the known minimum vector
618   // length. NOTE: This is an artificial constraint where we prefer to
619   // maintain what the user asked for until an alternative is proven faster.
620   auto *PgVTy = cast<ScalableVectorType>(Pg->getType());
621   if (Idx >= PgVTy->getMinNumElements())
622     return None;
623 
624   // The intrinsic is extracting a fixed lane so use an extract instead.
625   auto *IdxTy = Type::getInt64Ty(II.getContext());
626   auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, Idx));
627   Extract->insertBefore(&II);
628   Extract->takeName(&II);
629   return IC.replaceInstUsesWith(II, Extract);
630 }
631 
instCombineRDFFR(InstCombiner & IC,IntrinsicInst & II)632 static Optional<Instruction *> instCombineRDFFR(InstCombiner &IC,
633                                                 IntrinsicInst &II) {
634   LLVMContext &Ctx = II.getContext();
635   IRBuilder<> Builder(Ctx);
636   Builder.SetInsertPoint(&II);
637   // Replace rdffr with predicated rdffr.z intrinsic, so that optimizePTestInstr
638   // can work with RDFFR_PP for ptest elimination.
639   auto *AllPat =
640       ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all);
641   auto *PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue,
642                                         {II.getType()}, {AllPat});
643   auto *RDFFR =
644       Builder.CreateIntrinsic(Intrinsic::aarch64_sve_rdffr_z, {}, {PTrue});
645   RDFFR->takeName(&II);
646   return IC.replaceInstUsesWith(II, RDFFR);
647 }
648 
649 static Optional<Instruction *>
instCombineSVECntElts(InstCombiner & IC,IntrinsicInst & II,unsigned NumElts)650 instCombineSVECntElts(InstCombiner &IC, IntrinsicInst &II, unsigned NumElts) {
651   const auto Pattern = cast<ConstantInt>(II.getArgOperand(0))->getZExtValue();
652 
653   if (Pattern == AArch64SVEPredPattern::all) {
654     LLVMContext &Ctx = II.getContext();
655     IRBuilder<> Builder(Ctx);
656     Builder.SetInsertPoint(&II);
657 
658     Constant *StepVal = ConstantInt::get(II.getType(), NumElts);
659     auto *VScale = Builder.CreateVScale(StepVal);
660     VScale->takeName(&II);
661     return IC.replaceInstUsesWith(II, VScale);
662   }
663 
664   unsigned MinNumElts = 0;
665   switch (Pattern) {
666   default:
667     return None;
668   case AArch64SVEPredPattern::vl1:
669   case AArch64SVEPredPattern::vl2:
670   case AArch64SVEPredPattern::vl3:
671   case AArch64SVEPredPattern::vl4:
672   case AArch64SVEPredPattern::vl5:
673   case AArch64SVEPredPattern::vl6:
674   case AArch64SVEPredPattern::vl7:
675   case AArch64SVEPredPattern::vl8:
676     MinNumElts = Pattern;
677     break;
678   case AArch64SVEPredPattern::vl16:
679     MinNumElts = 16;
680     break;
681   }
682 
683   return NumElts >= MinNumElts
684              ? Optional<Instruction *>(IC.replaceInstUsesWith(
685                    II, ConstantInt::get(II.getType(), MinNumElts)))
686              : None;
687 }
688 
instCombineSVEPTest(InstCombiner & IC,IntrinsicInst & II)689 static Optional<Instruction *> instCombineSVEPTest(InstCombiner &IC,
690                                                    IntrinsicInst &II) {
691   IntrinsicInst *Op1 = dyn_cast<IntrinsicInst>(II.getArgOperand(0));
692   IntrinsicInst *Op2 = dyn_cast<IntrinsicInst>(II.getArgOperand(1));
693 
694   if (Op1 && Op2 &&
695       Op1->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool &&
696       Op2->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool &&
697       Op1->getArgOperand(0)->getType() == Op2->getArgOperand(0)->getType()) {
698 
699     IRBuilder<> Builder(II.getContext());
700     Builder.SetInsertPoint(&II);
701 
702     Value *Ops[] = {Op1->getArgOperand(0), Op2->getArgOperand(0)};
703     Type *Tys[] = {Op1->getArgOperand(0)->getType()};
704 
705     auto *PTest = Builder.CreateIntrinsic(II.getIntrinsicID(), Tys, Ops);
706 
707     PTest->takeName(&II);
708     return IC.replaceInstUsesWith(II, PTest);
709   }
710 
711   return None;
712 }
713 
instCombineSVEVectorMul(InstCombiner & IC,IntrinsicInst & II)714 static Optional<Instruction *> instCombineSVEVectorMul(InstCombiner &IC,
715                                                        IntrinsicInst &II) {
716   auto *OpPredicate = II.getOperand(0);
717   auto *OpMultiplicand = II.getOperand(1);
718   auto *OpMultiplier = II.getOperand(2);
719 
720   IRBuilder<> Builder(II.getContext());
721   Builder.SetInsertPoint(&II);
722 
723   // Return true if a given instruction is an aarch64_sve_dup_x intrinsic call
724   // with a unit splat value, false otherwise.
725   auto IsUnitDupX = [](auto *I) {
726     auto *IntrI = dyn_cast<IntrinsicInst>(I);
727     if (!IntrI || IntrI->getIntrinsicID() != Intrinsic::aarch64_sve_dup_x)
728       return false;
729 
730     auto *SplatValue = IntrI->getOperand(0);
731     return match(SplatValue, m_FPOne()) || match(SplatValue, m_One());
732   };
733 
734   // Return true if a given instruction is an aarch64_sve_dup intrinsic call
735   // with a unit splat value, false otherwise.
736   auto IsUnitDup = [](auto *I) {
737     auto *IntrI = dyn_cast<IntrinsicInst>(I);
738     if (!IntrI || IntrI->getIntrinsicID() != Intrinsic::aarch64_sve_dup)
739       return false;
740 
741     auto *SplatValue = IntrI->getOperand(2);
742     return match(SplatValue, m_FPOne()) || match(SplatValue, m_One());
743   };
744 
745   // The OpMultiplier variable should always point to the dup (if any), so
746   // swap if necessary.
747   if (IsUnitDup(OpMultiplicand) || IsUnitDupX(OpMultiplicand))
748     std::swap(OpMultiplier, OpMultiplicand);
749 
750   if (IsUnitDupX(OpMultiplier)) {
751     // [f]mul pg (dupx 1) %n => %n
752     OpMultiplicand->takeName(&II);
753     return IC.replaceInstUsesWith(II, OpMultiplicand);
754   } else if (IsUnitDup(OpMultiplier)) {
755     // [f]mul pg (dup pg 1) %n => %n
756     auto *DupInst = cast<IntrinsicInst>(OpMultiplier);
757     auto *DupPg = DupInst->getOperand(1);
758     // TODO: this is naive. The optimization is still valid if DupPg
759     // 'encompasses' OpPredicate, not only if they're the same predicate.
760     if (OpPredicate == DupPg) {
761       OpMultiplicand->takeName(&II);
762       return IC.replaceInstUsesWith(II, OpMultiplicand);
763     }
764   }
765 
766   return None;
767 }
768 
instCombineSVETBL(InstCombiner & IC,IntrinsicInst & II)769 static Optional<Instruction *> instCombineSVETBL(InstCombiner &IC,
770                                                  IntrinsicInst &II) {
771   auto *OpVal = II.getOperand(0);
772   auto *OpIndices = II.getOperand(1);
773   VectorType *VTy = cast<VectorType>(II.getType());
774 
775   // Check whether OpIndices is an aarch64_sve_dup_x intrinsic call with
776   // constant splat value < minimal element count of result.
777   auto *DupXIntrI = dyn_cast<IntrinsicInst>(OpIndices);
778   if (!DupXIntrI || DupXIntrI->getIntrinsicID() != Intrinsic::aarch64_sve_dup_x)
779     return None;
780 
781   auto *SplatValue = dyn_cast<ConstantInt>(DupXIntrI->getOperand(0));
782   if (!SplatValue ||
783       SplatValue->getValue().uge(VTy->getElementCount().getKnownMinValue()))
784     return None;
785 
786   // Convert sve_tbl(OpVal sve_dup_x(SplatValue)) to
787   // splat_vector(extractelement(OpVal, SplatValue)) for further optimization.
788   IRBuilder<> Builder(II.getContext());
789   Builder.SetInsertPoint(&II);
790   auto *Extract = Builder.CreateExtractElement(OpVal, SplatValue);
791   auto *VectorSplat =
792       Builder.CreateVectorSplat(VTy->getElementCount(), Extract);
793 
794   VectorSplat->takeName(&II);
795   return IC.replaceInstUsesWith(II, VectorSplat);
796 }
797 
798 Optional<Instruction *>
instCombineIntrinsic(InstCombiner & IC,IntrinsicInst & II) const799 AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC,
800                                      IntrinsicInst &II) const {
801   Intrinsic::ID IID = II.getIntrinsicID();
802   switch (IID) {
803   default:
804     break;
805   case Intrinsic::aarch64_sve_convert_from_svbool:
806     return instCombineConvertFromSVBool(IC, II);
807   case Intrinsic::aarch64_sve_dup:
808     return instCombineSVEDup(IC, II);
809   case Intrinsic::aarch64_sve_cmpne:
810   case Intrinsic::aarch64_sve_cmpne_wide:
811     return instCombineSVECmpNE(IC, II);
812   case Intrinsic::aarch64_sve_rdffr:
813     return instCombineRDFFR(IC, II);
814   case Intrinsic::aarch64_sve_lasta:
815   case Intrinsic::aarch64_sve_lastb:
816     return instCombineSVELast(IC, II);
817   case Intrinsic::aarch64_sve_cntd:
818     return instCombineSVECntElts(IC, II, 2);
819   case Intrinsic::aarch64_sve_cntw:
820     return instCombineSVECntElts(IC, II, 4);
821   case Intrinsic::aarch64_sve_cnth:
822     return instCombineSVECntElts(IC, II, 8);
823   case Intrinsic::aarch64_sve_cntb:
824     return instCombineSVECntElts(IC, II, 16);
825   case Intrinsic::aarch64_sve_ptest_any:
826   case Intrinsic::aarch64_sve_ptest_first:
827   case Intrinsic::aarch64_sve_ptest_last:
828     return instCombineSVEPTest(IC, II);
829   case Intrinsic::aarch64_sve_mul:
830   case Intrinsic::aarch64_sve_fmul:
831     return instCombineSVEVectorMul(IC, II);
832   case Intrinsic::aarch64_sve_tbl:
833     return instCombineSVETBL(IC, II);
834   }
835 
836   return None;
837 }
838 
isWideningInstruction(Type * DstTy,unsigned Opcode,ArrayRef<const Value * > Args)839 bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode,
840                                            ArrayRef<const Value *> Args) {
841 
842   // A helper that returns a vector type from the given type. The number of
843   // elements in type Ty determine the vector width.
844   auto toVectorTy = [&](Type *ArgTy) {
845     return VectorType::get(ArgTy->getScalarType(),
846                            cast<VectorType>(DstTy)->getElementCount());
847   };
848 
849   // Exit early if DstTy is not a vector type whose elements are at least
850   // 16-bits wide.
851   if (!DstTy->isVectorTy() || DstTy->getScalarSizeInBits() < 16)
852     return false;
853 
854   // Determine if the operation has a widening variant. We consider both the
855   // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the
856   // instructions.
857   //
858   // TODO: Add additional widening operations (e.g., mul, shl, etc.) once we
859   //       verify that their extending operands are eliminated during code
860   //       generation.
861   switch (Opcode) {
862   case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2).
863   case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2).
864     break;
865   default:
866     return false;
867   }
868 
869   // To be a widening instruction (either the "wide" or "long" versions), the
870   // second operand must be a sign- or zero extend having a single user. We
871   // only consider extends having a single user because they may otherwise not
872   // be eliminated.
873   if (Args.size() != 2 ||
874       (!isa<SExtInst>(Args[1]) && !isa<ZExtInst>(Args[1])) ||
875       !Args[1]->hasOneUse())
876     return false;
877   auto *Extend = cast<CastInst>(Args[1]);
878 
879   // Legalize the destination type and ensure it can be used in a widening
880   // operation.
881   auto DstTyL = TLI->getTypeLegalizationCost(DL, DstTy);
882   unsigned DstElTySize = DstTyL.second.getScalarSizeInBits();
883   if (!DstTyL.second.isVector() || DstElTySize != DstTy->getScalarSizeInBits())
884     return false;
885 
886   // Legalize the source type and ensure it can be used in a widening
887   // operation.
888   auto *SrcTy = toVectorTy(Extend->getSrcTy());
889   auto SrcTyL = TLI->getTypeLegalizationCost(DL, SrcTy);
890   unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits();
891   if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits())
892     return false;
893 
894   // Get the total number of vector elements in the legalized types.
895   InstructionCost NumDstEls =
896       DstTyL.first * DstTyL.second.getVectorMinNumElements();
897   InstructionCost NumSrcEls =
898       SrcTyL.first * SrcTyL.second.getVectorMinNumElements();
899 
900   // Return true if the legalized types have the same number of vector elements
901   // and the destination element type size is twice that of the source type.
902   return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstElTySize;
903 }
904 
getCastInstrCost(unsigned Opcode,Type * Dst,Type * Src,TTI::CastContextHint CCH,TTI::TargetCostKind CostKind,const Instruction * I)905 InstructionCost AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
906                                                  Type *Src,
907                                                  TTI::CastContextHint CCH,
908                                                  TTI::TargetCostKind CostKind,
909                                                  const Instruction *I) {
910   int ISD = TLI->InstructionOpcodeToISD(Opcode);
911   assert(ISD && "Invalid opcode");
912 
913   // If the cast is observable, and it is used by a widening instruction (e.g.,
914   // uaddl, saddw, etc.), it may be free.
915   if (I && I->hasOneUse()) {
916     auto *SingleUser = cast<Instruction>(*I->user_begin());
917     SmallVector<const Value *, 4> Operands(SingleUser->operand_values());
918     if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands)) {
919       // If the cast is the second operand, it is free. We will generate either
920       // a "wide" or "long" version of the widening instruction.
921       if (I == SingleUser->getOperand(1))
922         return 0;
923       // If the cast is not the second operand, it will be free if it looks the
924       // same as the second operand. In this case, we will generate a "long"
925       // version of the widening instruction.
926       if (auto *Cast = dyn_cast<CastInst>(SingleUser->getOperand(1)))
927         if (I->getOpcode() == unsigned(Cast->getOpcode()) &&
928             cast<CastInst>(I)->getSrcTy() == Cast->getSrcTy())
929           return 0;
930     }
931   }
932 
933   // TODO: Allow non-throughput costs that aren't binary.
934   auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
935     if (CostKind != TTI::TCK_RecipThroughput)
936       return Cost == 0 ? 0 : 1;
937     return Cost;
938   };
939 
940   EVT SrcTy = TLI->getValueType(DL, Src);
941   EVT DstTy = TLI->getValueType(DL, Dst);
942 
943   if (!SrcTy.isSimple() || !DstTy.isSimple())
944     return AdjustCost(
945         BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
946 
947   static const TypeConversionCostTblEntry
948   ConversionTbl[] = {
949     { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32,  1 },
950     { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64,  0 },
951     { ISD::TRUNCATE, MVT::v8i8,  MVT::v8i32,  3 },
952     { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
953 
954     // Truncations on nxvmiN
955     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i16, 1 },
956     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i32, 1 },
957     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i64, 1 },
958     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i16, 1 },
959     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i32, 1 },
960     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i64, 2 },
961     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i16, 1 },
962     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i32, 3 },
963     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i64, 5 },
964     { ISD::TRUNCATE, MVT::nxv16i1, MVT::nxv16i8, 1 },
965     { ISD::TRUNCATE, MVT::nxv2i16, MVT::nxv2i32, 1 },
966     { ISD::TRUNCATE, MVT::nxv2i32, MVT::nxv2i64, 1 },
967     { ISD::TRUNCATE, MVT::nxv4i16, MVT::nxv4i32, 1 },
968     { ISD::TRUNCATE, MVT::nxv4i32, MVT::nxv4i64, 2 },
969     { ISD::TRUNCATE, MVT::nxv8i16, MVT::nxv8i32, 3 },
970     { ISD::TRUNCATE, MVT::nxv8i32, MVT::nxv8i64, 6 },
971 
972     // The number of shll instructions for the extension.
973     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
974     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
975     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
976     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
977     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
978     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
979     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
980     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
981     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
982     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
983     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
984     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
985     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
986     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
987     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
988     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
989 
990     // LowerVectorINT_TO_FP:
991     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
992     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
993     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
994     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
995     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
996     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
997 
998     // Complex: to v2f32
999     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
1000     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
1001     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
1002     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
1003     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
1004     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
1005 
1006     // Complex: to v4f32
1007     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8,  4 },
1008     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
1009     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8,  3 },
1010     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
1011 
1012     // Complex: to v8f32
1013     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
1014     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
1015     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
1016     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
1017 
1018     // Complex: to v16f32
1019     { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
1020     { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
1021 
1022     // Complex: to v2f64
1023     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
1024     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
1025     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
1026     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
1027     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
1028     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
1029 
1030 
1031     // LowerVectorFP_TO_INT
1032     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
1033     { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
1034     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
1035     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
1036     { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
1037     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
1038 
1039     // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
1040     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
1041     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
1042     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f32, 1 },
1043     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
1044     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
1045     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f32, 1 },
1046 
1047     // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
1048     { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
1049     { ISD::FP_TO_SINT, MVT::v4i8,  MVT::v4f32, 2 },
1050     { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
1051     { ISD::FP_TO_UINT, MVT::v4i8,  MVT::v4f32, 2 },
1052 
1053     // Complex, from nxv2f32.
1054     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f32, 1 },
1055     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f32, 1 },
1056     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f32, 1 },
1057     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f32, 1 },
1058     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f32, 1 },
1059     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f32, 1 },
1060     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f32, 1 },
1061     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f32, 1 },
1062 
1063     // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
1064     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
1065     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
1066     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f64, 2 },
1067     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
1068     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
1069     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f64, 2 },
1070 
1071     // Complex, from nxv2f64.
1072     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f64, 1 },
1073     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f64, 1 },
1074     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f64, 1 },
1075     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f64, 1 },
1076     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f64, 1 },
1077     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f64, 1 },
1078     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f64, 1 },
1079     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f64, 1 },
1080 
1081     // Complex, from nxv4f32.
1082     { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f32, 4 },
1083     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f32, 1 },
1084     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f32, 1 },
1085     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f32, 1 },
1086     { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f32, 4 },
1087     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f32, 1 },
1088     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f32, 1 },
1089     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f32, 1 },
1090 
1091     // Complex, from nxv8f64. Illegal -> illegal conversions not required.
1092     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f64, 7 },
1093     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f64, 7 },
1094     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f64, 7 },
1095     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f64, 7 },
1096 
1097     // Complex, from nxv4f64. Illegal -> illegal conversions not required.
1098     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f64, 3 },
1099     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f64, 3 },
1100     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f64, 3 },
1101     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f64, 3 },
1102     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f64, 3 },
1103     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f64, 3 },
1104 
1105     // Complex, from nxv8f32. Illegal -> illegal conversions not required.
1106     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f32, 3 },
1107     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f32, 3 },
1108     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f32, 3 },
1109     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f32, 3 },
1110 
1111     // Complex, from nxv8f16.
1112     { ISD::FP_TO_SINT, MVT::nxv8i64, MVT::nxv8f16, 10 },
1113     { ISD::FP_TO_SINT, MVT::nxv8i32, MVT::nxv8f16, 4 },
1114     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f16, 1 },
1115     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f16, 1 },
1116     { ISD::FP_TO_UINT, MVT::nxv8i64, MVT::nxv8f16, 10 },
1117     { ISD::FP_TO_UINT, MVT::nxv8i32, MVT::nxv8f16, 4 },
1118     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f16, 1 },
1119     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f16, 1 },
1120 
1121     // Complex, from nxv4f16.
1122     { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f16, 4 },
1123     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f16, 1 },
1124     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f16, 1 },
1125     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f16, 1 },
1126     { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f16, 4 },
1127     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f16, 1 },
1128     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f16, 1 },
1129     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f16, 1 },
1130 
1131     // Complex, from nxv2f16.
1132     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f16, 1 },
1133     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f16, 1 },
1134     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f16, 1 },
1135     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f16, 1 },
1136     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f16, 1 },
1137     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f16, 1 },
1138     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f16, 1 },
1139     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f16, 1 },
1140 
1141     // Truncate from nxvmf32 to nxvmf16.
1142     { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f32, 1 },
1143     { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f32, 1 },
1144     { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f32, 3 },
1145 
1146     // Truncate from nxvmf64 to nxvmf16.
1147     { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f64, 1 },
1148     { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f64, 3 },
1149     { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f64, 7 },
1150 
1151     // Truncate from nxvmf64 to nxvmf32.
1152     { ISD::FP_ROUND, MVT::nxv2f32, MVT::nxv2f64, 1 },
1153     { ISD::FP_ROUND, MVT::nxv4f32, MVT::nxv4f64, 3 },
1154     { ISD::FP_ROUND, MVT::nxv8f32, MVT::nxv8f64, 6 },
1155 
1156     // Extend from nxvmf16 to nxvmf32.
1157     { ISD::FP_EXTEND, MVT::nxv2f32, MVT::nxv2f16, 1},
1158     { ISD::FP_EXTEND, MVT::nxv4f32, MVT::nxv4f16, 1},
1159     { ISD::FP_EXTEND, MVT::nxv8f32, MVT::nxv8f16, 2},
1160 
1161     // Extend from nxvmf16 to nxvmf64.
1162     { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f16, 1},
1163     { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f16, 2},
1164     { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f16, 4},
1165 
1166     // Extend from nxvmf32 to nxvmf64.
1167     { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f32, 1},
1168     { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f32, 2},
1169     { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f32, 6},
1170 
1171   };
1172 
1173   if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD,
1174                                                  DstTy.getSimpleVT(),
1175                                                  SrcTy.getSimpleVT()))
1176     return AdjustCost(Entry->Cost);
1177 
1178   return AdjustCost(
1179       BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
1180 }
1181 
getExtractWithExtendCost(unsigned Opcode,Type * Dst,VectorType * VecTy,unsigned Index)1182 InstructionCost AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode,
1183                                                          Type *Dst,
1184                                                          VectorType *VecTy,
1185                                                          unsigned Index) {
1186 
1187   // Make sure we were given a valid extend opcode.
1188   assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) &&
1189          "Invalid opcode");
1190 
1191   // We are extending an element we extract from a vector, so the source type
1192   // of the extend is the element type of the vector.
1193   auto *Src = VecTy->getElementType();
1194 
1195   // Sign- and zero-extends are for integer types only.
1196   assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type");
1197 
1198   // Get the cost for the extract. We compute the cost (if any) for the extend
1199   // below.
1200   InstructionCost Cost =
1201       getVectorInstrCost(Instruction::ExtractElement, VecTy, Index);
1202 
1203   // Legalize the types.
1204   auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy);
1205   auto DstVT = TLI->getValueType(DL, Dst);
1206   auto SrcVT = TLI->getValueType(DL, Src);
1207   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
1208 
1209   // If the resulting type is still a vector and the destination type is legal,
1210   // we may get the extension for free. If not, get the default cost for the
1211   // extend.
1212   if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT))
1213     return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
1214                                    CostKind);
1215 
1216   // The destination type should be larger than the element type. If not, get
1217   // the default cost for the extend.
1218   if (DstVT.getFixedSizeInBits() < SrcVT.getFixedSizeInBits())
1219     return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
1220                                    CostKind);
1221 
1222   switch (Opcode) {
1223   default:
1224     llvm_unreachable("Opcode should be either SExt or ZExt");
1225 
1226   // For sign-extends, we only need a smov, which performs the extension
1227   // automatically.
1228   case Instruction::SExt:
1229     return Cost;
1230 
1231   // For zero-extends, the extend is performed automatically by a umov unless
1232   // the destination type is i64 and the element type is i8 or i16.
1233   case Instruction::ZExt:
1234     if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u)
1235       return Cost;
1236   }
1237 
1238   // If we are unable to perform the extend for free, get the default cost.
1239   return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
1240                                  CostKind);
1241 }
1242 
getCFInstrCost(unsigned Opcode,TTI::TargetCostKind CostKind,const Instruction * I)1243 InstructionCost AArch64TTIImpl::getCFInstrCost(unsigned Opcode,
1244                                                TTI::TargetCostKind CostKind,
1245                                                const Instruction *I) {
1246   if (CostKind != TTI::TCK_RecipThroughput)
1247     return Opcode == Instruction::PHI ? 0 : 1;
1248   assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind");
1249   // Branches are assumed to be predicted.
1250   return 0;
1251 }
1252 
getVectorInstrCost(unsigned Opcode,Type * Val,unsigned Index)1253 InstructionCost AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
1254                                                    unsigned Index) {
1255   assert(Val->isVectorTy() && "This must be a vector type");
1256 
1257   if (Index != -1U) {
1258     // Legalize the type.
1259     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
1260 
1261     // This type is legalized to a scalar type.
1262     if (!LT.second.isVector())
1263       return 0;
1264 
1265     // The type may be split. Normalize the index to the new type.
1266     unsigned Width = LT.second.getVectorNumElements();
1267     Index = Index % Width;
1268 
1269     // The element at index zero is already inside the vector.
1270     if (Index == 0)
1271       return 0;
1272   }
1273 
1274   // All other insert/extracts cost this much.
1275   return ST->getVectorInsertExtractBaseCost();
1276 }
1277 
getArithmeticInstrCost(unsigned Opcode,Type * Ty,TTI::TargetCostKind CostKind,TTI::OperandValueKind Opd1Info,TTI::OperandValueKind Opd2Info,TTI::OperandValueProperties Opd1PropInfo,TTI::OperandValueProperties Opd2PropInfo,ArrayRef<const Value * > Args,const Instruction * CxtI)1278 InstructionCost AArch64TTIImpl::getArithmeticInstrCost(
1279     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1280     TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info,
1281     TTI::OperandValueProperties Opd1PropInfo,
1282     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
1283     const Instruction *CxtI) {
1284   // TODO: Handle more cost kinds.
1285   if (CostKind != TTI::TCK_RecipThroughput)
1286     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
1287                                          Opd2Info, Opd1PropInfo,
1288                                          Opd2PropInfo, Args, CxtI);
1289 
1290   // Legalize the type.
1291   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
1292 
1293   // If the instruction is a widening instruction (e.g., uaddl, saddw, etc.),
1294   // add in the widening overhead specified by the sub-target. Since the
1295   // extends feeding widening instructions are performed automatically, they
1296   // aren't present in the generated code and have a zero cost. By adding a
1297   // widening overhead here, we attach the total cost of the combined operation
1298   // to the widening instruction.
1299   InstructionCost Cost = 0;
1300   if (isWideningInstruction(Ty, Opcode, Args))
1301     Cost += ST->getWideningBaseCost();
1302 
1303   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1304 
1305   switch (ISD) {
1306   default:
1307     return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
1308                                                 Opd2Info,
1309                                                 Opd1PropInfo, Opd2PropInfo);
1310   case ISD::SDIV:
1311     if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue &&
1312         Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
1313       // On AArch64, scalar signed division by constants power-of-two are
1314       // normally expanded to the sequence ADD + CMP + SELECT + SRA.
1315       // The OperandValue properties many not be same as that of previous
1316       // operation; conservatively assume OP_None.
1317       Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind,
1318                                      Opd1Info, Opd2Info,
1319                                      TargetTransformInfo::OP_None,
1320                                      TargetTransformInfo::OP_None);
1321       Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind,
1322                                      Opd1Info, Opd2Info,
1323                                      TargetTransformInfo::OP_None,
1324                                      TargetTransformInfo::OP_None);
1325       Cost += getArithmeticInstrCost(Instruction::Select, Ty, CostKind,
1326                                      Opd1Info, Opd2Info,
1327                                      TargetTransformInfo::OP_None,
1328                                      TargetTransformInfo::OP_None);
1329       Cost += getArithmeticInstrCost(Instruction::AShr, Ty, CostKind,
1330                                      Opd1Info, Opd2Info,
1331                                      TargetTransformInfo::OP_None,
1332                                      TargetTransformInfo::OP_None);
1333       return Cost;
1334     }
1335     LLVM_FALLTHROUGH;
1336   case ISD::UDIV:
1337     if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue) {
1338       auto VT = TLI->getValueType(DL, Ty);
1339       if (TLI->isOperationLegalOrCustom(ISD::MULHU, VT)) {
1340         // Vector signed division by constant are expanded to the
1341         // sequence MULHS + ADD/SUB + SRA + SRL + ADD, and unsigned division
1342         // to MULHS + SUB + SRL + ADD + SRL.
1343         InstructionCost MulCost = getArithmeticInstrCost(
1344             Instruction::Mul, Ty, CostKind, Opd1Info, Opd2Info,
1345             TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
1346         InstructionCost AddCost = getArithmeticInstrCost(
1347             Instruction::Add, Ty, CostKind, Opd1Info, Opd2Info,
1348             TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
1349         InstructionCost ShrCost = getArithmeticInstrCost(
1350             Instruction::AShr, Ty, CostKind, Opd1Info, Opd2Info,
1351             TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
1352         return MulCost * 2 + AddCost * 2 + ShrCost * 2 + 1;
1353       }
1354     }
1355 
1356     Cost += BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
1357                                           Opd2Info,
1358                                           Opd1PropInfo, Opd2PropInfo);
1359     if (Ty->isVectorTy()) {
1360       // On AArch64, vector divisions are not supported natively and are
1361       // expanded into scalar divisions of each pair of elements.
1362       Cost += getArithmeticInstrCost(Instruction::ExtractElement, Ty, CostKind,
1363                                      Opd1Info, Opd2Info, Opd1PropInfo,
1364                                      Opd2PropInfo);
1365       Cost += getArithmeticInstrCost(Instruction::InsertElement, Ty, CostKind,
1366                                      Opd1Info, Opd2Info, Opd1PropInfo,
1367                                      Opd2PropInfo);
1368       // TODO: if one of the arguments is scalar, then it's not necessary to
1369       // double the cost of handling the vector elements.
1370       Cost += Cost;
1371     }
1372     return Cost;
1373 
1374   case ISD::MUL:
1375     if (LT.second != MVT::v2i64)
1376       return (Cost + 1) * LT.first;
1377     // Since we do not have a MUL.2d instruction, a mul <2 x i64> is expensive
1378     // as elements are extracted from the vectors and the muls scalarized.
1379     // As getScalarizationOverhead is a bit too pessimistic, we estimate the
1380     // cost for a i64 vector directly here, which is:
1381     // - four i64 extracts,
1382     // - two i64 inserts, and
1383     // - two muls.
1384     // So, for a v2i64 with LT.First = 1 the cost is 8, and for a v4i64 with
1385     // LT.first = 2 the cost is 16.
1386     return LT.first * 8;
1387   case ISD::ADD:
1388   case ISD::XOR:
1389   case ISD::OR:
1390   case ISD::AND:
1391     // These nodes are marked as 'custom' for combining purposes only.
1392     // We know that they are legal. See LowerAdd in ISelLowering.
1393     return (Cost + 1) * LT.first;
1394 
1395   case ISD::FADD:
1396     // These nodes are marked as 'custom' just to lower them to SVE.
1397     // We know said lowering will incur no additional cost.
1398     if (isa<FixedVectorType>(Ty) && !Ty->getScalarType()->isFP128Ty())
1399       return (Cost + 2) * LT.first;
1400 
1401     return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
1402                                                 Opd2Info,
1403                                                 Opd1PropInfo, Opd2PropInfo);
1404   }
1405 }
1406 
getAddressComputationCost(Type * Ty,ScalarEvolution * SE,const SCEV * Ptr)1407 InstructionCost AArch64TTIImpl::getAddressComputationCost(Type *Ty,
1408                                                           ScalarEvolution *SE,
1409                                                           const SCEV *Ptr) {
1410   // Address computations in vectorized code with non-consecutive addresses will
1411   // likely result in more instructions compared to scalar code where the
1412   // computation can more often be merged into the index mode. The resulting
1413   // extra micro-ops can significantly decrease throughput.
1414   unsigned NumVectorInstToHideOverhead = 10;
1415   int MaxMergeDistance = 64;
1416 
1417   if (Ty->isVectorTy() && SE &&
1418       !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
1419     return NumVectorInstToHideOverhead;
1420 
1421   // In many cases the address computation is not merged into the instruction
1422   // addressing mode.
1423   return 1;
1424 }
1425 
getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy,CmpInst::Predicate VecPred,TTI::TargetCostKind CostKind,const Instruction * I)1426 InstructionCost AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
1427                                                    Type *CondTy,
1428                                                    CmpInst::Predicate VecPred,
1429                                                    TTI::TargetCostKind CostKind,
1430                                                    const Instruction *I) {
1431   // TODO: Handle other cost kinds.
1432   if (CostKind != TTI::TCK_RecipThroughput)
1433     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1434                                      I);
1435 
1436   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1437   // We don't lower some vector selects well that are wider than the register
1438   // width.
1439   if (isa<FixedVectorType>(ValTy) && ISD == ISD::SELECT) {
1440     // We would need this many instructions to hide the scalarization happening.
1441     const int AmortizationCost = 20;
1442 
1443     // If VecPred is not set, check if we can get a predicate from the context
1444     // instruction, if its type matches the requested ValTy.
1445     if (VecPred == CmpInst::BAD_ICMP_PREDICATE && I && I->getType() == ValTy) {
1446       CmpInst::Predicate CurrentPred;
1447       if (match(I, m_Select(m_Cmp(CurrentPred, m_Value(), m_Value()), m_Value(),
1448                             m_Value())))
1449         VecPred = CurrentPred;
1450     }
1451     // Check if we have a compare/select chain that can be lowered using CMxx &
1452     // BFI pair.
1453     if (CmpInst::isIntPredicate(VecPred)) {
1454       static const auto ValidMinMaxTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
1455                                           MVT::v8i16, MVT::v2i32, MVT::v4i32,
1456                                           MVT::v2i64};
1457       auto LT = TLI->getTypeLegalizationCost(DL, ValTy);
1458       if (any_of(ValidMinMaxTys, [&LT](MVT M) { return M == LT.second; }))
1459         return LT.first;
1460     }
1461 
1462     static const TypeConversionCostTblEntry
1463     VectorSelectTbl[] = {
1464       { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 },
1465       { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 },
1466       { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 },
1467       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
1468       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
1469       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
1470     };
1471 
1472     EVT SelCondTy = TLI->getValueType(DL, CondTy);
1473     EVT SelValTy = TLI->getValueType(DL, ValTy);
1474     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
1475       if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD,
1476                                                      SelCondTy.getSimpleVT(),
1477                                                      SelValTy.getSimpleVT()))
1478         return Entry->Cost;
1479     }
1480   }
1481   // The base case handles scalable vectors fine for now, since it treats the
1482   // cost as 1 * legalization cost.
1483   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
1484 }
1485 
1486 AArch64TTIImpl::TTI::MemCmpExpansionOptions
enableMemCmpExpansion(bool OptSize,bool IsZeroCmp) const1487 AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
1488   TTI::MemCmpExpansionOptions Options;
1489   if (ST->requiresStrictAlign()) {
1490     // TODO: Add cost modeling for strict align. Misaligned loads expand to
1491     // a bunch of instructions when strict align is enabled.
1492     return Options;
1493   }
1494   Options.AllowOverlappingLoads = true;
1495   Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
1496   Options.NumLoadsPerBlock = Options.MaxNumLoads;
1497   // TODO: Though vector loads usually perform well on AArch64, in some targets
1498   // they may wake up the FP unit, which raises the power consumption.  Perhaps
1499   // they could be used with no holds barred (-O3).
1500   Options.LoadSizes = {8, 4, 2, 1};
1501   return Options;
1502 }
1503 
1504 InstructionCost
getMaskedMemoryOpCost(unsigned Opcode,Type * Src,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind)1505 AArch64TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
1506                                       Align Alignment, unsigned AddressSpace,
1507                                       TTI::TargetCostKind CostKind) {
1508   if (!isa<ScalableVectorType>(Src))
1509     return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1510                                         CostKind);
1511   auto LT = TLI->getTypeLegalizationCost(DL, Src);
1512   if (!LT.first.isValid())
1513     return InstructionCost::getInvalid();
1514 
1515   // The code-generator is currently not able to handle scalable vectors
1516   // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
1517   // it. This change will be removed when code-generation for these types is
1518   // sufficiently reliable.
1519   if (cast<VectorType>(Src)->getElementCount() == ElementCount::getScalable(1))
1520     return InstructionCost::getInvalid();
1521 
1522   return LT.first * 2;
1523 }
1524 
getGatherScatterOpCost(unsigned Opcode,Type * DataTy,const Value * Ptr,bool VariableMask,Align Alignment,TTI::TargetCostKind CostKind,const Instruction * I)1525 InstructionCost AArch64TTIImpl::getGatherScatterOpCost(
1526     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1527     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
1528 
1529   if (!isa<ScalableVectorType>(DataTy))
1530     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1531                                          Alignment, CostKind, I);
1532   auto *VT = cast<VectorType>(DataTy);
1533   auto LT = TLI->getTypeLegalizationCost(DL, DataTy);
1534   if (!LT.first.isValid())
1535     return InstructionCost::getInvalid();
1536 
1537   // The code-generator is currently not able to handle scalable vectors
1538   // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
1539   // it. This change will be removed when code-generation for these types is
1540   // sufficiently reliable.
1541   if (cast<VectorType>(DataTy)->getElementCount() ==
1542       ElementCount::getScalable(1))
1543     return InstructionCost::getInvalid();
1544 
1545   ElementCount LegalVF = LT.second.getVectorElementCount();
1546   InstructionCost MemOpCost =
1547       getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind, I);
1548   return LT.first * MemOpCost * getMaxNumElements(LegalVF);
1549 }
1550 
useNeonVector(const Type * Ty) const1551 bool AArch64TTIImpl::useNeonVector(const Type *Ty) const {
1552   return isa<FixedVectorType>(Ty) && !ST->useSVEForFixedLengthVectors();
1553 }
1554 
getMemoryOpCost(unsigned Opcode,Type * Ty,MaybeAlign Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,const Instruction * I)1555 InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
1556                                                 MaybeAlign Alignment,
1557                                                 unsigned AddressSpace,
1558                                                 TTI::TargetCostKind CostKind,
1559                                                 const Instruction *I) {
1560   EVT VT = TLI->getValueType(DL, Ty, true);
1561   // Type legalization can't handle structs
1562   if (VT == MVT::Other)
1563     return BaseT::getMemoryOpCost(Opcode, Ty, Alignment, AddressSpace,
1564                                   CostKind);
1565 
1566   auto LT = TLI->getTypeLegalizationCost(DL, Ty);
1567   if (!LT.first.isValid())
1568     return InstructionCost::getInvalid();
1569 
1570   // The code-generator is currently not able to handle scalable vectors
1571   // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
1572   // it. This change will be removed when code-generation for these types is
1573   // sufficiently reliable.
1574   if (auto *VTy = dyn_cast<ScalableVectorType>(Ty))
1575     if (VTy->getElementCount() == ElementCount::getScalable(1))
1576       return InstructionCost::getInvalid();
1577 
1578   // TODO: consider latency as well for TCK_SizeAndLatency.
1579   if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency)
1580     return LT.first;
1581 
1582   if (CostKind != TTI::TCK_RecipThroughput)
1583     return 1;
1584 
1585   if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
1586       LT.second.is128BitVector() && (!Alignment || *Alignment < Align(16))) {
1587     // Unaligned stores are extremely inefficient. We don't split all
1588     // unaligned 128-bit stores because the negative impact that has shown in
1589     // practice on inlined block copy code.
1590     // We make such stores expensive so that we will only vectorize if there
1591     // are 6 other instructions getting vectorized.
1592     const int AmortizationCost = 6;
1593 
1594     return LT.first * 2 * AmortizationCost;
1595   }
1596 
1597   // Check truncating stores and extending loads.
1598   if (useNeonVector(Ty) &&
1599       Ty->getScalarSizeInBits() != LT.second.getScalarSizeInBits()) {
1600     // v4i8 types are lowered to scalar a load/store and sshll/xtn.
1601     if (VT == MVT::v4i8)
1602       return 2;
1603     // Otherwise we need to scalarize.
1604     return cast<FixedVectorType>(Ty)->getNumElements() * 2;
1605   }
1606 
1607   return LT.first;
1608 }
1609 
getInterleavedMemoryOpCost(unsigned Opcode,Type * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,bool UseMaskForCond,bool UseMaskForGaps)1610 InstructionCost AArch64TTIImpl::getInterleavedMemoryOpCost(
1611     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1612     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1613     bool UseMaskForCond, bool UseMaskForGaps) {
1614   assert(Factor >= 2 && "Invalid interleave factor");
1615   auto *VecVTy = cast<FixedVectorType>(VecTy);
1616 
1617   if (!UseMaskForCond && !UseMaskForGaps &&
1618       Factor <= TLI->getMaxSupportedInterleaveFactor()) {
1619     unsigned NumElts = VecVTy->getNumElements();
1620     auto *SubVecTy =
1621         FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
1622 
1623     // ldN/stN only support legal vector types of size 64 or 128 in bits.
1624     // Accesses having vector types that are a multiple of 128 bits can be
1625     // matched to more than one ldN/stN instruction.
1626     if (NumElts % Factor == 0 &&
1627         TLI->isLegalInterleavedAccessType(SubVecTy, DL))
1628       return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL);
1629   }
1630 
1631   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1632                                            Alignment, AddressSpace, CostKind,
1633                                            UseMaskForCond, UseMaskForGaps);
1634 }
1635 
1636 InstructionCost
getCostOfKeepingLiveOverCall(ArrayRef<Type * > Tys)1637 AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
1638   InstructionCost Cost = 0;
1639   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
1640   for (auto *I : Tys) {
1641     if (!I->isVectorTy())
1642       continue;
1643     if (I->getScalarSizeInBits() * cast<FixedVectorType>(I)->getNumElements() ==
1644         128)
1645       Cost += getMemoryOpCost(Instruction::Store, I, Align(128), 0, CostKind) +
1646               getMemoryOpCost(Instruction::Load, I, Align(128), 0, CostKind);
1647   }
1648   return Cost;
1649 }
1650 
getMaxInterleaveFactor(unsigned VF)1651 unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) {
1652   return ST->getMaxInterleaveFactor();
1653 }
1654 
1655 // For Falkor, we want to avoid having too many strided loads in a loop since
1656 // that can exhaust the HW prefetcher resources.  We adjust the unroller
1657 // MaxCount preference below to attempt to ensure unrolling doesn't create too
1658 // many strided loads.
1659 static void
getFalkorUnrollingPreferences(Loop * L,ScalarEvolution & SE,TargetTransformInfo::UnrollingPreferences & UP)1660 getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1661                               TargetTransformInfo::UnrollingPreferences &UP) {
1662   enum { MaxStridedLoads = 7 };
1663   auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) {
1664     int StridedLoads = 0;
1665     // FIXME? We could make this more precise by looking at the CFG and
1666     // e.g. not counting loads in each side of an if-then-else diamond.
1667     for (const auto BB : L->blocks()) {
1668       for (auto &I : *BB) {
1669         LoadInst *LMemI = dyn_cast<LoadInst>(&I);
1670         if (!LMemI)
1671           continue;
1672 
1673         Value *PtrValue = LMemI->getPointerOperand();
1674         if (L->isLoopInvariant(PtrValue))
1675           continue;
1676 
1677         const SCEV *LSCEV = SE.getSCEV(PtrValue);
1678         const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV);
1679         if (!LSCEVAddRec || !LSCEVAddRec->isAffine())
1680           continue;
1681 
1682         // FIXME? We could take pairing of unrolled load copies into account
1683         // by looking at the AddRec, but we would probably have to limit this
1684         // to loops with no stores or other memory optimization barriers.
1685         ++StridedLoads;
1686         // We've seen enough strided loads that seeing more won't make a
1687         // difference.
1688         if (StridedLoads > MaxStridedLoads / 2)
1689           return StridedLoads;
1690       }
1691     }
1692     return StridedLoads;
1693   };
1694 
1695   int StridedLoads = countStridedLoads(L, SE);
1696   LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads
1697                     << " strided loads\n");
1698   // Pick the largest power of 2 unroll count that won't result in too many
1699   // strided loads.
1700   if (StridedLoads) {
1701     UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads);
1702     LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to "
1703                       << UP.MaxCount << '\n');
1704   }
1705 }
1706 
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,TTI::UnrollingPreferences & UP)1707 void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1708                                              TTI::UnrollingPreferences &UP) {
1709   // Enable partial unrolling and runtime unrolling.
1710   BaseT::getUnrollingPreferences(L, SE, UP);
1711 
1712   // For inner loop, it is more likely to be a hot one, and the runtime check
1713   // can be promoted out from LICM pass, so the overhead is less, let's try
1714   // a larger threshold to unroll more loops.
1715   if (L->getLoopDepth() > 1)
1716     UP.PartialThreshold *= 2;
1717 
1718   // Disable partial & runtime unrolling on -Os.
1719   UP.PartialOptSizeThreshold = 0;
1720 
1721   if (ST->getProcFamily() == AArch64Subtarget::Falkor &&
1722       EnableFalkorHWPFUnrollFix)
1723     getFalkorUnrollingPreferences(L, SE, UP);
1724 
1725   // Scan the loop: don't unroll loops with calls as this could prevent
1726   // inlining. Don't unroll vector loops either, as they don't benefit much from
1727   // unrolling.
1728   for (auto *BB : L->getBlocks()) {
1729     for (auto &I : *BB) {
1730       // Don't unroll vectorised loop.
1731       if (I.getType()->isVectorTy())
1732         return;
1733 
1734       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
1735         if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
1736           if (!isLoweredToCall(F))
1737             continue;
1738         }
1739         return;
1740       }
1741     }
1742   }
1743 
1744   // Enable runtime unrolling for in-order models
1745   // If mcpu is omitted, getProcFamily() returns AArch64Subtarget::Others, so by
1746   // checking for that case, we can ensure that the default behaviour is
1747   // unchanged
1748   if (ST->getProcFamily() != AArch64Subtarget::Others &&
1749       !ST->getSchedModel().isOutOfOrder()) {
1750     UP.Runtime = true;
1751     UP.Partial = true;
1752     UP.UpperBound = true;
1753     UP.UnrollRemainder = true;
1754     UP.DefaultUnrollRuntimeCount = 4;
1755 
1756     UP.UnrollAndJam = true;
1757     UP.UnrollAndJamInnerLoopThreshold = 60;
1758   }
1759 }
1760 
getPeelingPreferences(Loop * L,ScalarEvolution & SE,TTI::PeelingPreferences & PP)1761 void AArch64TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1762                                            TTI::PeelingPreferences &PP) {
1763   BaseT::getPeelingPreferences(L, SE, PP);
1764 }
1765 
getOrCreateResultFromMemIntrinsic(IntrinsicInst * Inst,Type * ExpectedType)1766 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
1767                                                          Type *ExpectedType) {
1768   switch (Inst->getIntrinsicID()) {
1769   default:
1770     return nullptr;
1771   case Intrinsic::aarch64_neon_st2:
1772   case Intrinsic::aarch64_neon_st3:
1773   case Intrinsic::aarch64_neon_st4: {
1774     // Create a struct type
1775     StructType *ST = dyn_cast<StructType>(ExpectedType);
1776     if (!ST)
1777       return nullptr;
1778     unsigned NumElts = Inst->getNumArgOperands() - 1;
1779     if (ST->getNumElements() != NumElts)
1780       return nullptr;
1781     for (unsigned i = 0, e = NumElts; i != e; ++i) {
1782       if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
1783         return nullptr;
1784     }
1785     Value *Res = UndefValue::get(ExpectedType);
1786     IRBuilder<> Builder(Inst);
1787     for (unsigned i = 0, e = NumElts; i != e; ++i) {
1788       Value *L = Inst->getArgOperand(i);
1789       Res = Builder.CreateInsertValue(Res, L, i);
1790     }
1791     return Res;
1792   }
1793   case Intrinsic::aarch64_neon_ld2:
1794   case Intrinsic::aarch64_neon_ld3:
1795   case Intrinsic::aarch64_neon_ld4:
1796     if (Inst->getType() == ExpectedType)
1797       return Inst;
1798     return nullptr;
1799   }
1800 }
1801 
getTgtMemIntrinsic(IntrinsicInst * Inst,MemIntrinsicInfo & Info)1802 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
1803                                         MemIntrinsicInfo &Info) {
1804   switch (Inst->getIntrinsicID()) {
1805   default:
1806     break;
1807   case Intrinsic::aarch64_neon_ld2:
1808   case Intrinsic::aarch64_neon_ld3:
1809   case Intrinsic::aarch64_neon_ld4:
1810     Info.ReadMem = true;
1811     Info.WriteMem = false;
1812     Info.PtrVal = Inst->getArgOperand(0);
1813     break;
1814   case Intrinsic::aarch64_neon_st2:
1815   case Intrinsic::aarch64_neon_st3:
1816   case Intrinsic::aarch64_neon_st4:
1817     Info.ReadMem = false;
1818     Info.WriteMem = true;
1819     Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1);
1820     break;
1821   }
1822 
1823   switch (Inst->getIntrinsicID()) {
1824   default:
1825     return false;
1826   case Intrinsic::aarch64_neon_ld2:
1827   case Intrinsic::aarch64_neon_st2:
1828     Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
1829     break;
1830   case Intrinsic::aarch64_neon_ld3:
1831   case Intrinsic::aarch64_neon_st3:
1832     Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
1833     break;
1834   case Intrinsic::aarch64_neon_ld4:
1835   case Intrinsic::aarch64_neon_st4:
1836     Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;
1837     break;
1838   }
1839   return true;
1840 }
1841 
1842 /// See if \p I should be considered for address type promotion. We check if \p
1843 /// I is a sext with right type and used in memory accesses. If it used in a
1844 /// "complex" getelementptr, we allow it to be promoted without finding other
1845 /// sext instructions that sign extended the same initial value. A getelementptr
1846 /// is considered as "complex" if it has more than 2 operands.
shouldConsiderAddressTypePromotion(const Instruction & I,bool & AllowPromotionWithoutCommonHeader)1847 bool AArch64TTIImpl::shouldConsiderAddressTypePromotion(
1848     const Instruction &I, bool &AllowPromotionWithoutCommonHeader) {
1849   bool Considerable = false;
1850   AllowPromotionWithoutCommonHeader = false;
1851   if (!isa<SExtInst>(&I))
1852     return false;
1853   Type *ConsideredSExtType =
1854       Type::getInt64Ty(I.getParent()->getParent()->getContext());
1855   if (I.getType() != ConsideredSExtType)
1856     return false;
1857   // See if the sext is the one with the right type and used in at least one
1858   // GetElementPtrInst.
1859   for (const User *U : I.users()) {
1860     if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) {
1861       Considerable = true;
1862       // A getelementptr is considered as "complex" if it has more than 2
1863       // operands. We will promote a SExt used in such complex GEP as we
1864       // expect some computation to be merged if they are done on 64 bits.
1865       if (GEPInst->getNumOperands() > 2) {
1866         AllowPromotionWithoutCommonHeader = true;
1867         break;
1868       }
1869     }
1870   }
1871   return Considerable;
1872 }
1873 
isLegalToVectorizeReduction(const RecurrenceDescriptor & RdxDesc,ElementCount VF) const1874 bool AArch64TTIImpl::isLegalToVectorizeReduction(
1875     const RecurrenceDescriptor &RdxDesc, ElementCount VF) const {
1876   if (!VF.isScalable())
1877     return true;
1878 
1879   Type *Ty = RdxDesc.getRecurrenceType();
1880   if (Ty->isBFloatTy() || !isElementTypeLegalForScalableVector(Ty))
1881     return false;
1882 
1883   switch (RdxDesc.getRecurrenceKind()) {
1884   case RecurKind::Add:
1885   case RecurKind::FAdd:
1886   case RecurKind::And:
1887   case RecurKind::Or:
1888   case RecurKind::Xor:
1889   case RecurKind::SMin:
1890   case RecurKind::SMax:
1891   case RecurKind::UMin:
1892   case RecurKind::UMax:
1893   case RecurKind::FMin:
1894   case RecurKind::FMax:
1895     return true;
1896   default:
1897     return false;
1898   }
1899 }
1900 
1901 InstructionCost
getMinMaxReductionCost(VectorType * Ty,VectorType * CondTy,bool IsUnsigned,TTI::TargetCostKind CostKind)1902 AArch64TTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
1903                                        bool IsUnsigned,
1904                                        TTI::TargetCostKind CostKind) {
1905   if (!isa<ScalableVectorType>(Ty))
1906     return BaseT::getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind);
1907   assert((isa<ScalableVectorType>(Ty) && isa<ScalableVectorType>(CondTy)) &&
1908          "Both vector needs to be scalable");
1909 
1910   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
1911   InstructionCost LegalizationCost = 0;
1912   if (LT.first > 1) {
1913     Type *LegalVTy = EVT(LT.second).getTypeForEVT(Ty->getContext());
1914     unsigned CmpOpcode =
1915         Ty->isFPOrFPVectorTy() ? Instruction::FCmp : Instruction::ICmp;
1916     LegalizationCost =
1917         getCmpSelInstrCost(CmpOpcode, LegalVTy, LegalVTy,
1918                            CmpInst::BAD_ICMP_PREDICATE, CostKind) +
1919         getCmpSelInstrCost(Instruction::Select, LegalVTy, LegalVTy,
1920                            CmpInst::BAD_ICMP_PREDICATE, CostKind);
1921     LegalizationCost *= LT.first - 1;
1922   }
1923 
1924   return LegalizationCost + /*Cost of horizontal reduction*/ 2;
1925 }
1926 
getArithmeticReductionCostSVE(unsigned Opcode,VectorType * ValTy,TTI::TargetCostKind CostKind)1927 InstructionCost AArch64TTIImpl::getArithmeticReductionCostSVE(
1928     unsigned Opcode, VectorType *ValTy, TTI::TargetCostKind CostKind) {
1929   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1930   InstructionCost LegalizationCost = 0;
1931   if (LT.first > 1) {
1932     Type *LegalVTy = EVT(LT.second).getTypeForEVT(ValTy->getContext());
1933     LegalizationCost = getArithmeticInstrCost(Opcode, LegalVTy, CostKind);
1934     LegalizationCost *= LT.first - 1;
1935   }
1936 
1937   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1938   assert(ISD && "Invalid opcode");
1939   // Add the final reduction cost for the legal horizontal reduction
1940   switch (ISD) {
1941   case ISD::ADD:
1942   case ISD::AND:
1943   case ISD::OR:
1944   case ISD::XOR:
1945   case ISD::FADD:
1946     return LegalizationCost + 2;
1947   default:
1948     return InstructionCost::getInvalid();
1949   }
1950 }
1951 
1952 InstructionCost
getArithmeticReductionCost(unsigned Opcode,VectorType * ValTy,Optional<FastMathFlags> FMF,TTI::TargetCostKind CostKind)1953 AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
1954                                            Optional<FastMathFlags> FMF,
1955                                            TTI::TargetCostKind CostKind) {
1956   if (TTI::requiresOrderedReduction(FMF)) {
1957     if (!isa<ScalableVectorType>(ValTy))
1958       return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
1959 
1960     if (Opcode != Instruction::FAdd)
1961       return InstructionCost::getInvalid();
1962 
1963     auto *VTy = cast<ScalableVectorType>(ValTy);
1964     InstructionCost Cost =
1965         getArithmeticInstrCost(Opcode, VTy->getScalarType(), CostKind);
1966     Cost *= getMaxNumElements(VTy->getElementCount());
1967     return Cost;
1968   }
1969 
1970   if (isa<ScalableVectorType>(ValTy))
1971     return getArithmeticReductionCostSVE(Opcode, ValTy, CostKind);
1972 
1973   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1974   MVT MTy = LT.second;
1975   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1976   assert(ISD && "Invalid opcode");
1977 
1978   // Horizontal adds can use the 'addv' instruction. We model the cost of these
1979   // instructions as twice a normal vector add, plus 1 for each legalization
1980   // step (LT.first). This is the only arithmetic vector reduction operation for
1981   // which we have an instruction.
1982   // OR, XOR and AND costs should match the codegen from:
1983   // OR: llvm/test/CodeGen/AArch64/reduce-or.ll
1984   // XOR: llvm/test/CodeGen/AArch64/reduce-xor.ll
1985   // AND: llvm/test/CodeGen/AArch64/reduce-and.ll
1986   static const CostTblEntry CostTblNoPairwise[]{
1987       {ISD::ADD, MVT::v8i8,   2},
1988       {ISD::ADD, MVT::v16i8,  2},
1989       {ISD::ADD, MVT::v4i16,  2},
1990       {ISD::ADD, MVT::v8i16,  2},
1991       {ISD::ADD, MVT::v4i32,  2},
1992       {ISD::OR,  MVT::v8i8,  15},
1993       {ISD::OR,  MVT::v16i8, 17},
1994       {ISD::OR,  MVT::v4i16,  7},
1995       {ISD::OR,  MVT::v8i16,  9},
1996       {ISD::OR,  MVT::v2i32,  3},
1997       {ISD::OR,  MVT::v4i32,  5},
1998       {ISD::OR,  MVT::v2i64,  3},
1999       {ISD::XOR, MVT::v8i8,  15},
2000       {ISD::XOR, MVT::v16i8, 17},
2001       {ISD::XOR, MVT::v4i16,  7},
2002       {ISD::XOR, MVT::v8i16,  9},
2003       {ISD::XOR, MVT::v2i32,  3},
2004       {ISD::XOR, MVT::v4i32,  5},
2005       {ISD::XOR, MVT::v2i64,  3},
2006       {ISD::AND, MVT::v8i8,  15},
2007       {ISD::AND, MVT::v16i8, 17},
2008       {ISD::AND, MVT::v4i16,  7},
2009       {ISD::AND, MVT::v8i16,  9},
2010       {ISD::AND, MVT::v2i32,  3},
2011       {ISD::AND, MVT::v4i32,  5},
2012       {ISD::AND, MVT::v2i64,  3},
2013   };
2014   switch (ISD) {
2015   default:
2016     break;
2017   case ISD::ADD:
2018     if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy))
2019       return (LT.first - 1) + Entry->Cost;
2020     break;
2021   case ISD::XOR:
2022   case ISD::AND:
2023   case ISD::OR:
2024     const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy);
2025     if (!Entry)
2026       break;
2027     auto *ValVTy = cast<FixedVectorType>(ValTy);
2028     if (!ValVTy->getElementType()->isIntegerTy(1) &&
2029         MTy.getVectorNumElements() <= ValVTy->getNumElements() &&
2030         isPowerOf2_32(ValVTy->getNumElements())) {
2031       InstructionCost ExtraCost = 0;
2032       if (LT.first != 1) {
2033         // Type needs to be split, so there is an extra cost of LT.first - 1
2034         // arithmetic ops.
2035         auto *Ty = FixedVectorType::get(ValTy->getElementType(),
2036                                         MTy.getVectorNumElements());
2037         ExtraCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
2038         ExtraCost *= LT.first - 1;
2039       }
2040       return Entry->Cost + ExtraCost;
2041     }
2042     break;
2043   }
2044   return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
2045 }
2046 
getSpliceCost(VectorType * Tp,int Index)2047 InstructionCost AArch64TTIImpl::getSpliceCost(VectorType *Tp, int Index) {
2048   static const CostTblEntry ShuffleTbl[] = {
2049       { TTI::SK_Splice, MVT::nxv16i8,  1 },
2050       { TTI::SK_Splice, MVT::nxv8i16,  1 },
2051       { TTI::SK_Splice, MVT::nxv4i32,  1 },
2052       { TTI::SK_Splice, MVT::nxv2i64,  1 },
2053       { TTI::SK_Splice, MVT::nxv2f16,  1 },
2054       { TTI::SK_Splice, MVT::nxv4f16,  1 },
2055       { TTI::SK_Splice, MVT::nxv8f16,  1 },
2056       { TTI::SK_Splice, MVT::nxv2bf16, 1 },
2057       { TTI::SK_Splice, MVT::nxv4bf16, 1 },
2058       { TTI::SK_Splice, MVT::nxv8bf16, 1 },
2059       { TTI::SK_Splice, MVT::nxv2f32,  1 },
2060       { TTI::SK_Splice, MVT::nxv4f32,  1 },
2061       { TTI::SK_Splice, MVT::nxv2f64,  1 },
2062   };
2063 
2064   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
2065   Type *LegalVTy = EVT(LT.second).getTypeForEVT(Tp->getContext());
2066   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
2067   EVT PromotedVT = LT.second.getScalarType() == MVT::i1
2068                        ? TLI->getPromotedVTForPredicate(EVT(LT.second))
2069                        : LT.second;
2070   Type *PromotedVTy = EVT(PromotedVT).getTypeForEVT(Tp->getContext());
2071   InstructionCost LegalizationCost = 0;
2072   if (Index < 0) {
2073     LegalizationCost =
2074         getCmpSelInstrCost(Instruction::ICmp, PromotedVTy, PromotedVTy,
2075                            CmpInst::BAD_ICMP_PREDICATE, CostKind) +
2076         getCmpSelInstrCost(Instruction::Select, PromotedVTy, LegalVTy,
2077                            CmpInst::BAD_ICMP_PREDICATE, CostKind);
2078   }
2079 
2080   // Predicated splice are promoted when lowering. See AArch64ISelLowering.cpp
2081   // Cost performed on a promoted type.
2082   if (LT.second.getScalarType() == MVT::i1) {
2083     LegalizationCost +=
2084         getCastInstrCost(Instruction::ZExt, PromotedVTy, LegalVTy,
2085                          TTI::CastContextHint::None, CostKind) +
2086         getCastInstrCost(Instruction::Trunc, LegalVTy, PromotedVTy,
2087                          TTI::CastContextHint::None, CostKind);
2088   }
2089   const auto *Entry =
2090       CostTableLookup(ShuffleTbl, TTI::SK_Splice, PromotedVT.getSimpleVT());
2091   assert(Entry && "Illegal Type for Splice");
2092   LegalizationCost += Entry->Cost;
2093   return LegalizationCost * LT.first;
2094 }
2095 
getShuffleCost(TTI::ShuffleKind Kind,VectorType * Tp,ArrayRef<int> Mask,int Index,VectorType * SubTp)2096 InstructionCost AArch64TTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
2097                                                VectorType *Tp,
2098                                                ArrayRef<int> Mask, int Index,
2099                                                VectorType *SubTp) {
2100   Kind = improveShuffleKindFromMask(Kind, Mask);
2101   if (Kind == TTI::SK_Broadcast || Kind == TTI::SK_Transpose ||
2102       Kind == TTI::SK_Select || Kind == TTI::SK_PermuteSingleSrc ||
2103       Kind == TTI::SK_Reverse) {
2104     static const CostTblEntry ShuffleTbl[] = {
2105       // Broadcast shuffle kinds can be performed with 'dup'.
2106       { TTI::SK_Broadcast, MVT::v8i8,  1 },
2107       { TTI::SK_Broadcast, MVT::v16i8, 1 },
2108       { TTI::SK_Broadcast, MVT::v4i16, 1 },
2109       { TTI::SK_Broadcast, MVT::v8i16, 1 },
2110       { TTI::SK_Broadcast, MVT::v2i32, 1 },
2111       { TTI::SK_Broadcast, MVT::v4i32, 1 },
2112       { TTI::SK_Broadcast, MVT::v2i64, 1 },
2113       { TTI::SK_Broadcast, MVT::v2f32, 1 },
2114       { TTI::SK_Broadcast, MVT::v4f32, 1 },
2115       { TTI::SK_Broadcast, MVT::v2f64, 1 },
2116       // Transpose shuffle kinds can be performed with 'trn1/trn2' and
2117       // 'zip1/zip2' instructions.
2118       { TTI::SK_Transpose, MVT::v8i8,  1 },
2119       { TTI::SK_Transpose, MVT::v16i8, 1 },
2120       { TTI::SK_Transpose, MVT::v4i16, 1 },
2121       { TTI::SK_Transpose, MVT::v8i16, 1 },
2122       { TTI::SK_Transpose, MVT::v2i32, 1 },
2123       { TTI::SK_Transpose, MVT::v4i32, 1 },
2124       { TTI::SK_Transpose, MVT::v2i64, 1 },
2125       { TTI::SK_Transpose, MVT::v2f32, 1 },
2126       { TTI::SK_Transpose, MVT::v4f32, 1 },
2127       { TTI::SK_Transpose, MVT::v2f64, 1 },
2128       // Select shuffle kinds.
2129       // TODO: handle vXi8/vXi16.
2130       { TTI::SK_Select, MVT::v2i32, 1 }, // mov.
2131       { TTI::SK_Select, MVT::v4i32, 2 }, // rev+trn (or similar).
2132       { TTI::SK_Select, MVT::v2i64, 1 }, // mov.
2133       { TTI::SK_Select, MVT::v2f32, 1 }, // mov.
2134       { TTI::SK_Select, MVT::v4f32, 2 }, // rev+trn (or similar).
2135       { TTI::SK_Select, MVT::v2f64, 1 }, // mov.
2136       // PermuteSingleSrc shuffle kinds.
2137       { TTI::SK_PermuteSingleSrc, MVT::v2i32, 1 }, // mov.
2138       { TTI::SK_PermuteSingleSrc, MVT::v4i32, 3 }, // perfectshuffle worst case.
2139       { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // mov.
2140       { TTI::SK_PermuteSingleSrc, MVT::v2f32, 1 }, // mov.
2141       { TTI::SK_PermuteSingleSrc, MVT::v4f32, 3 }, // perfectshuffle worst case.
2142       { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // mov.
2143       { TTI::SK_PermuteSingleSrc, MVT::v4i16, 3 }, // perfectshuffle worst case.
2144       { TTI::SK_PermuteSingleSrc, MVT::v4f16, 3 }, // perfectshuffle worst case.
2145       { TTI::SK_PermuteSingleSrc, MVT::v4bf16, 3 }, // perfectshuffle worst case.
2146       { TTI::SK_PermuteSingleSrc, MVT::v8i16, 8 }, // constpool + load + tbl
2147       { TTI::SK_PermuteSingleSrc, MVT::v8f16, 8 }, // constpool + load + tbl
2148       { TTI::SK_PermuteSingleSrc, MVT::v8bf16, 8 }, // constpool + load + tbl
2149       { TTI::SK_PermuteSingleSrc, MVT::v8i8, 8 }, // constpool + load + tbl
2150       { TTI::SK_PermuteSingleSrc, MVT::v16i8, 8 }, // constpool + load + tbl
2151       // Reverse can be lowered with `rev`.
2152       { TTI::SK_Reverse, MVT::v2i32, 1 }, // mov.
2153       { TTI::SK_Reverse, MVT::v4i32, 2 }, // REV64; EXT
2154       { TTI::SK_Reverse, MVT::v2i64, 1 }, // mov.
2155       { TTI::SK_Reverse, MVT::v2f32, 1 }, // mov.
2156       { TTI::SK_Reverse, MVT::v4f32, 2 }, // REV64; EXT
2157       { TTI::SK_Reverse, MVT::v2f64, 1 }, // mov.
2158       // Broadcast shuffle kinds for scalable vectors
2159       { TTI::SK_Broadcast, MVT::nxv16i8,  1 },
2160       { TTI::SK_Broadcast, MVT::nxv8i16,  1 },
2161       { TTI::SK_Broadcast, MVT::nxv4i32,  1 },
2162       { TTI::SK_Broadcast, MVT::nxv2i64,  1 },
2163       { TTI::SK_Broadcast, MVT::nxv2f16,  1 },
2164       { TTI::SK_Broadcast, MVT::nxv4f16,  1 },
2165       { TTI::SK_Broadcast, MVT::nxv8f16,  1 },
2166       { TTI::SK_Broadcast, MVT::nxv2bf16, 1 },
2167       { TTI::SK_Broadcast, MVT::nxv4bf16, 1 },
2168       { TTI::SK_Broadcast, MVT::nxv8bf16, 1 },
2169       { TTI::SK_Broadcast, MVT::nxv2f32,  1 },
2170       { TTI::SK_Broadcast, MVT::nxv4f32,  1 },
2171       { TTI::SK_Broadcast, MVT::nxv2f64,  1 },
2172       { TTI::SK_Broadcast, MVT::nxv16i1,  1 },
2173       { TTI::SK_Broadcast, MVT::nxv8i1,   1 },
2174       { TTI::SK_Broadcast, MVT::nxv4i1,   1 },
2175       { TTI::SK_Broadcast, MVT::nxv2i1,   1 },
2176       // Handle the cases for vector.reverse with scalable vectors
2177       { TTI::SK_Reverse, MVT::nxv16i8,  1 },
2178       { TTI::SK_Reverse, MVT::nxv8i16,  1 },
2179       { TTI::SK_Reverse, MVT::nxv4i32,  1 },
2180       { TTI::SK_Reverse, MVT::nxv2i64,  1 },
2181       { TTI::SK_Reverse, MVT::nxv2f16,  1 },
2182       { TTI::SK_Reverse, MVT::nxv4f16,  1 },
2183       { TTI::SK_Reverse, MVT::nxv8f16,  1 },
2184       { TTI::SK_Reverse, MVT::nxv2bf16, 1 },
2185       { TTI::SK_Reverse, MVT::nxv4bf16, 1 },
2186       { TTI::SK_Reverse, MVT::nxv8bf16, 1 },
2187       { TTI::SK_Reverse, MVT::nxv2f32,  1 },
2188       { TTI::SK_Reverse, MVT::nxv4f32,  1 },
2189       { TTI::SK_Reverse, MVT::nxv2f64,  1 },
2190       { TTI::SK_Reverse, MVT::nxv16i1,  1 },
2191       { TTI::SK_Reverse, MVT::nxv8i1,   1 },
2192       { TTI::SK_Reverse, MVT::nxv4i1,   1 },
2193       { TTI::SK_Reverse, MVT::nxv2i1,   1 },
2194     };
2195     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
2196     if (const auto *Entry = CostTableLookup(ShuffleTbl, Kind, LT.second))
2197       return LT.first * Entry->Cost;
2198   }
2199   if (Kind == TTI::SK_Splice && isa<ScalableVectorType>(Tp))
2200     return getSpliceCost(Tp, Index);
2201   return BaseT::getShuffleCost(Kind, Tp, Mask, Index, SubTp);
2202 }
2203